Merge branch 'master' into get-vllogs-tenants

This commit is contained in:
Dmytro Kozlov 2024-11-11 10:41:47 +01:00 committed by GitHub
commit f9568661ea
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
252 changed files with 14261 additions and 2827 deletions

View file

@ -60,8 +60,8 @@ body:
For VictoriaMetrics health-state issues please provide full-length screenshots For VictoriaMetrics health-state issues please provide full-length screenshots
of Grafana dashboards if possible: of Grafana dashboards if possible:
* [Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/grafana/dashboards/10229/) * [Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/grafana/dashboards/10229)
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176/) * [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176)
See how to setup monitoring here: See how to setup monitoring here:
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring) * [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring)

View file

@ -527,8 +527,8 @@ test-full:
test-full-386: test-full-386:
DISABLE_FSYNC_FOR_TESTING=1 GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/... DISABLE_FSYNC_FOR_TESTING=1 GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
integration-test: all integration-test: victoria-metrics vmagent vmalert vmauth
go test ./apptest/... go test ./apptest/... -skip="^TestCluster.*"
benchmark: benchmark:
go test -bench=. ./lib/... go test -bench=. ./lib/...

View file

@ -2,10 +2,10 @@
"name": "subquery-aggregation", "name": "subquery-aggregation",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/184", "issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/184",
"data": [ "data": [
"forms_daily_count;item=x 1 {TIME_S-1m}", "forms_daily_count;item=x 1 {TIME_S-59s}",
"forms_daily_count;item=x 2 {TIME_S-2m}", "forms_daily_count;item=x 2 {TIME_S-1m59s}",
"forms_daily_count;item=y 3 {TIME_S-1m}", "forms_daily_count;item=y 3 {TIME_S-59s}",
"forms_daily_count;item=y 4 {TIME_S-2m}"], "forms_daily_count;item=y 4 {TIME_S-1m59s}"],
"query": ["/api/v1/query?query=min%20by%20(item)%20(min_over_time(forms_daily_count[10m:1m]))&time={TIME_S-1m}&latency_offset=1ms"], "query": ["/api/v1/query?query=min%20by%20(item)%20(min_over_time(forms_daily_count[10m:1m]))&time={TIME_S-1m}&latency_offset=1ms"],
"result_query": { "result_query": {
"status":"success", "status":"success",

View file

@ -0,0 +1,185 @@
package datadog
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
)
var parserPool fastjson.ParserPool
// RequestHandler processes Datadog insert requests
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
switch path {
case "/api/v1/validate":
fmt.Fprintf(w, `{}`)
return true
case "/api/v2/logs":
return datadogLogsIngestion(w, r)
default:
return false
}
}
func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
w.Header().Add("Content-Type", "application/json")
startTime := time.Now()
v2LogsRequestsTotal.Inc()
reader := r.Body
var ts int64
if tsValue := r.Header.Get("dd-message-timestamp"); tsValue != "" && tsValue != "0" {
var err error
ts, err = strconv.ParseInt(tsValue, 10, 64)
if err != nil {
httpserver.Errorf(w, r, "could not parse dd-message-timestamp header value: %s", err)
return true
}
ts *= 1e6
} else {
ts = startTime.UnixNano()
}
if r.Header.Get("Content-Encoding") == "gzip" {
zr, err := common.GetGzipReader(reader)
if err != nil {
httpserver.Errorf(w, r, "cannot read gzipped logs request: %s", err)
return true
}
defer common.PutGzipReader(zr)
reader = zr
}
wcr := writeconcurrencylimiter.GetReader(reader)
data, err := io.ReadAll(wcr)
writeconcurrencylimiter.PutReader(wcr)
if err != nil {
httpserver.Errorf(w, r, "cannot read request body: %s", err)
return true
}
cp, err := insertutils.GetCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
lmp := cp.NewLogMessageProcessor()
n, err := readLogsRequest(ts, data, lmp.AddRow)
lmp.MustClose()
if n > 0 {
rowsIngestedTotal.Add(n)
}
if err != nil {
logger.Warnf("cannot decode log message in /api/v2/logs request: %s, stream fields: %s", err, cp.StreamFields)
return true
}
// update v2LogsRequestDuration only for successfully parsed requests
// There is no need in updating v2LogsRequestDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
v2LogsRequestDuration.UpdateDuration(startTime)
fmt.Fprintf(w, `{}`)
return true
}
var (
v2LogsRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/datadog/api/v2/logs"}`)
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="datadog"}`)
v2LogsRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/datadog/api/v2/logs"}`)
)
// readLogsRequest parses data according to DataDog logs format
// https://docs.datadoghq.com/api/latest/logs/#send-logs
func readLogsRequest(ts int64, data []byte, processLogMessage func(int64, []logstorage.Field)) (int, error) {
p := parserPool.Get()
defer parserPool.Put(p)
v, err := p.ParseBytes(data)
if err != nil {
return 0, fmt.Errorf("cannot parse JSON request body: %w", err)
}
records, err := v.Array()
if err != nil {
return 0, fmt.Errorf("cannot extract array from parsed JSON: %w", err)
}
var fields []logstorage.Field
for m, r := range records {
o, err := r.Object()
if err != nil {
return m + 1, fmt.Errorf("could not extract log record: %w", err)
}
o.Visit(func(k []byte, v *fastjson.Value) {
if err != nil {
return
}
val, e := v.StringBytes()
if e != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
switch string(k) {
case "message":
fields = append(fields, logstorage.Field{
Name: "_msg",
Value: bytesutil.ToUnsafeString(val),
})
case "ddtags":
// https://docs.datadoghq.com/getting_started/tagging/
var pair []byte
idx := 0
for idx >= 0 {
idx = bytes.IndexByte(val, ',')
if idx < 0 {
pair = val
} else {
pair = val[:idx]
val = val[idx+1:]
}
if len(pair) > 0 {
n := bytes.IndexByte(pair, ':')
if n < 0 {
// No tag value.
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(pair),
Value: "no_label_value",
})
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(pair[:n]),
Value: bytesutil.ToUnsafeString(pair[n+1:]),
})
}
}
default:
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(val),
})
}
})
processLogMessage(ts, fields)
fields = fields[:0]
}
return len(records), nil
}

View file

@ -0,0 +1,117 @@
package datadog
import (
"fmt"
"strings"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
func TestReadLogsRequestFailure(t *testing.T) {
f := func(data string) {
t.Helper()
ts := time.Now().UnixNano()
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
t.Fatalf("unexpected call to processLogMessage with timestamp=%d, fields=%s", timestamp, fields)
}
rows, err := readLogsRequest(ts, []byte(data), processLogMessage)
if err == nil {
t.Fatalf("expecting non-empty error")
}
if rows != 0 {
t.Fatalf("unexpected non-zero rows=%d", rows)
}
}
f("foobar")
f(`{}`)
f(`["create":{}]`)
f(`{"create":{}}
foobar`)
}
func TestReadLogsRequestSuccess(t *testing.T) {
f := func(data string, rowsExpected int, resultExpected string) {
t.Helper()
ts := time.Now().UnixNano()
var result string
processLogMessage := func(_ int64, fields []logstorage.Field) {
a := make([]string, len(fields))
for i, f := range fields {
a[i] = fmt.Sprintf("%q:%q", f.Name, f.Value)
}
if len(result) > 0 {
result = result + "\n"
}
s := "{" + strings.Join(a, ",") + "}"
result += s
}
// Read the request without compression
rows, err := readLogsRequest(ts, []byte(data), processLogMessage)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if rows != rowsExpected {
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
}
if result != resultExpected {
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
}
}
// Verify non-empty data
data := `[
{
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"bar",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"foobar",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"baz",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}, {
"ddsource": "nginx",
"ddtags":"tag1:value1,tag2:value2,",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":",tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}
]`
rowsExpected := 6
resultExpected := `{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"bar","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"foobar","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"baz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}`
f(data, rowsExpected, resultExpected)
}

View file

@ -2,7 +2,9 @@ package insertutils
import ( import (
"flag" "flag"
"fmt"
"net/http" "net/http"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -31,6 +33,7 @@ type CommonParams struct {
MsgFields []string MsgFields []string
StreamFields []string StreamFields []string
IgnoreFields []string IgnoreFields []string
ExtraFields []logstorage.Field
Debug bool Debug bool
DebugRequestURI string DebugRequestURI string
@ -45,48 +48,25 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
return nil, err return nil, err
} }
// Extract time field name from _time_field query arg or header
timeField := "_time" timeField := "_time"
if tf := r.FormValue("_time_field"); tf != "" { if tf := httputils.GetRequestValue(r, "_time_field", "VL-Time-Field"); tf != "" {
timeField = tf
} else if tf = r.Header.Get("VL-Time-Field"); tf != "" {
timeField = tf timeField = tf
} }
// Extract message field name from _msg_field query arg or header msgFields := httputils.GetArray(r, "_msg_field", "VL-Msg-Field")
msgField := "" streamFields := httputils.GetArray(r, "_stream_fields", "VL-Stream-Fields")
if msgf := r.FormValue("_msg_field"); msgf != "" { ignoreFields := httputils.GetArray(r, "ignore_fields", "VL-Ignore-Fields")
msgField = msgf
} else if msgf = r.Header.Get("VL-Msg-Field"); msgf != "" { extraFields, err := getExtraFields(r)
msgField = msgf if err != nil {
} return nil, err
var msgFields []string
if msgField != "" {
msgFields = strings.Split(msgField, ",")
} }
streamFields := httputils.GetArray(r, "_stream_fields") debug := false
if len(streamFields) == 0 { if dv := httputils.GetRequestValue(r, "debug", "VL-Debug"); dv != "" {
if sf := r.Header.Get("VL-Stream-Fields"); len(sf) > 0 { debug, err = strconv.ParseBool(dv)
streamFields = strings.Split(sf, ",") if err != nil {
} return nil, fmt.Errorf("cannot parse debug=%q: %w", dv, err)
}
ignoreFields := httputils.GetArray(r, "ignore_fields")
if len(ignoreFields) == 0 {
if f := r.Header.Get("VL-Ignore-Fields"); len(f) > 0 {
ignoreFields = strings.Split(f, ",")
}
}
debug := httputils.GetBool(r, "debug")
if !debug {
if dh := r.Header.Get("VL-Debug"); len(dh) > 0 {
hv := strings.ToLower(dh)
switch hv {
case "", "0", "f", "false", "no":
default:
debug = true
}
} }
} }
debugRequestURI := "" debugRequestURI := ""
@ -102,6 +82,7 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
MsgFields: msgFields, MsgFields: msgFields,
StreamFields: streamFields, StreamFields: streamFields,
IgnoreFields: ignoreFields, IgnoreFields: ignoreFields,
ExtraFields: extraFields,
Debug: debug, Debug: debug,
DebugRequestURI: debugRequestURI, DebugRequestURI: debugRequestURI,
DebugRemoteAddr: debugRemoteAddr, DebugRemoteAddr: debugRemoteAddr,
@ -110,20 +91,45 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
return cp, nil return cp, nil
} }
func getExtraFields(r *http.Request) ([]logstorage.Field, error) {
efs := httputils.GetArray(r, "extra_fields", "VL-Extra-Fields")
if len(efs) == 0 {
return nil, nil
}
extraFields := make([]logstorage.Field, len(efs))
for i, ef := range efs {
n := strings.Index(ef, "=")
if n <= 0 || n == len(ef)-1 {
return nil, fmt.Errorf(`invalid extra_field format: %q; must be in the form "field=value"`, ef)
}
extraFields[i] = logstorage.Field{
Name: ef[:n],
Value: ef[n+1:],
}
}
return extraFields, nil
}
// GetCommonParamsForSyslog returns common params needed for parsing syslog messages and storing them to the given tenantID. // GetCommonParamsForSyslog returns common params needed for parsing syslog messages and storing them to the given tenantID.
func GetCommonParamsForSyslog(tenantID logstorage.TenantID) *CommonParams { func GetCommonParamsForSyslog(tenantID logstorage.TenantID, streamFields, ignoreFields []string, extraFields []logstorage.Field) *CommonParams {
// See https://docs.victoriametrics.com/victorialogs/logsql/#unpack_syslog-pipe // See https://docs.victoriametrics.com/victorialogs/logsql/#unpack_syslog-pipe
if streamFields == nil {
streamFields = []string{
"hostname",
"app_name",
"proc_id",
}
}
cp := &CommonParams{ cp := &CommonParams{
TenantID: tenantID, TenantID: tenantID,
TimeField: "timestamp", TimeField: "timestamp",
MsgFields: []string{ MsgFields: []string{
"message", "message",
}, },
StreamFields: []string{ StreamFields: streamFields,
"hostname", IgnoreFields: ignoreFields,
"app_name", ExtraFields: extraFields,
"proc_id",
},
} }
return cp return cp
@ -146,8 +152,6 @@ type logMessageProcessor struct {
stopCh chan struct{} stopCh chan struct{}
lastFlushTime time.Time lastFlushTime time.Time
tmpFields []logstorage.Field
cp *CommonParams cp *CommonParams
lr *logstorage.LogRows lr *logstorage.LogRows
} }
@ -190,17 +194,6 @@ func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Fiel
return return
} }
if *defaultMsgValue != "" && !hasMsgField(fields) {
// The log entry doesn't contain mandatory _msg field. Add _msg field with default value then
// according to https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field .
lmp.tmpFields = append(lmp.tmpFields[:0], fields...)
lmp.tmpFields = append(lmp.tmpFields, logstorage.Field{
Name: "_msg",
Value: *defaultMsgValue,
})
fields = lmp.tmpFields
}
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields) lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields)
if lmp.cp.Debug { if lmp.cp.Debug {
s := lmp.lr.GetRowString(0) s := lmp.lr.GetRowString(0)
@ -214,15 +207,6 @@ func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Fiel
} }
} }
func hasMsgField(fields []logstorage.Field) bool {
for _, f := range fields {
if f.Name == "_msg" {
return len(f.Value) > 0
}
}
return false
}
// flushLocked must be called under locked lmp.mu. // flushLocked must be called under locked lmp.mu.
func (lmp *logMessageProcessor) flushLocked() { func (lmp *logMessageProcessor) flushLocked() {
lmp.lastFlushTime = time.Now() lmp.lastFlushTime = time.Now()
@ -244,7 +228,7 @@ func (lmp *logMessageProcessor) MustClose() {
// //
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed. // MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
func (cp *CommonParams) NewLogMessageProcessor() LogMessageProcessor { func (cp *CommonParams) NewLogMessageProcessor() LogMessageProcessor {
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields) lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields, cp.ExtraFields, *defaultMsgValue)
lmp := &logMessageProcessor{ lmp := &logMessageProcessor{
cp: cp, cp: cp,
lr: lr, lr: lr,

View file

@ -8,6 +8,9 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils" "github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage" "github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
@ -15,8 +18,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
) )
var parserPool fastjson.ParserPool var parserPool fastjson.ParserPool
@ -56,7 +57,7 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
n, err := parseJSONRequest(data, lmp) n, err := parseJSONRequest(data, lmp)
lmp.MustClose() lmp.MustClose()
if err != nil { if err != nil {
httpserver.Errorf(w, r, "cannot parse Loki json request: %s", err) httpserver.Errorf(w, r, "cannot parse Loki json request: %s; data=%s", err, data)
return return
} }
@ -84,7 +85,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
streamsV := v.Get("streams") streamsV := v.Get("streams")
if streamsV == nil { if streamsV == nil {
return 0, fmt.Errorf("missing `streams` item in the parsed JSON: %q", v) return 0, fmt.Errorf("missing `streams` item in the parsed JSON")
} }
streams, err := streamsV.Array() streams, err := streamsV.Array()
if err != nil { if err != nil {
@ -107,9 +108,6 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
labels = o labels = o
} }
labels.Visit(func(k []byte, v *fastjson.Value) { labels.Visit(func(k []byte, v *fastjson.Value) {
if err != nil {
return
}
vStr, errLocal := v.StringBytes() vStr, errLocal := v.StringBytes()
if errLocal != nil { if errLocal != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v) err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
@ -127,7 +125,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
// populate messages from `values` array // populate messages from `values` array
linesV := stream.Get("values") linesV := stream.Get("values")
if linesV == nil { if linesV == nil {
return rowsIngested, fmt.Errorf("missing `values` item in the parsed JSON %q", stream) return rowsIngested, fmt.Errorf("missing `values` item in the parsed `stream` object %q", stream)
} }
lines, err := linesV.Array() lines, err := linesV.Array()
if err != nil { if err != nil {
@ -140,8 +138,8 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
if err != nil { if err != nil {
return rowsIngested, fmt.Errorf("unexpected contents of `values` item; want array; got %q", line) return rowsIngested, fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
} }
if len(lineA) != 2 { if len(lineA) < 2 || len(lineA) > 3 {
return rowsIngested, fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2", line, len(lineA)) return rowsIngested, fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2 or 3", line, len(lineA))
} }
// parse timestamp // parse timestamp
@ -167,6 +165,30 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
Name: "_msg", Name: "_msg",
Value: bytesutil.ToUnsafeString(msg), Value: bytesutil.ToUnsafeString(msg),
}) })
// parse structured metadata - see https://grafana.com/docs/loki/latest/reference/loki-http-api/#ingest-logs
if len(lineA) > 2 {
structuredMetadata, err := lineA[2].Object()
if err != nil {
return rowsIngested, fmt.Errorf("unexpected structured metadata type for %q; want JSON object", lineA[2])
}
structuredMetadata.Visit(func(k []byte, v *fastjson.Value) {
vStr, errLocal := v.StringBytes()
if errLocal != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(vStr),
})
})
if err != nil {
return rowsIngested, fmt.Errorf("error when parsing `structuredMetadata` object: %w", err)
}
}
lmp.AddRow(ts, fields) lmp.AddRow(ts, fields)
} }
rowsIngested += len(lines) rowsIngested += len(lines)

View file

@ -45,13 +45,19 @@ func TestParseJSONRequest_Failure(t *testing.T) {
// Invalid length of `values` individual item // Invalid length of `values` individual item
f(`{"streams":[{"values":[[]]}]}`) f(`{"streams":[{"values":[[]]}]}`)
f(`{"streams":[{"values":[["123"]]}]}`) f(`{"streams":[{"values":[["123"]]}]}`)
f(`{"streams":[{"values":[["123","456","789"]]}]}`) f(`{"streams":[{"values":[["123","456","789","8123"]]}]}`)
// Invalid type for timestamp inside `values` individual item // Invalid type for timestamp inside `values` individual item
f(`{"streams":[{"values":[[123,"456"]}]}`) f(`{"streams":[{"values":[[123,"456"]}]}`)
// Invalid type for log message // Invalid type for log message
f(`{"streams":[{"values":[["123",1234]]}]}`) f(`{"streams":[{"values":[["123",1234]]}]}`)
// invalid structured metadata type
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", ["metadata_1", "md_value"]]]}]}`)
// structured metadata with unexpected value type
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {"metadata_1": 1}]] }]}`)
} }
func TestParseJSONRequest_Success(t *testing.T) { func TestParseJSONRequest_Success(t *testing.T) {
@ -116,4 +122,8 @@ func TestParseJSONRequest_Success(t *testing.T) {
}`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"} }`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"}
{"foo":"bar","a":"b","_msg":"abc"} {"foo":"bar","a":"b","_msg":"abc"}
{"x":"y","_msg":"yx"}`) {"x":"y","_msg":"yx"}`)
// values with metadata
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {"metadata_1": "md_value"}]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar","metadata_1":"md_value"}`)
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {}]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
} }

View file

@ -17,7 +17,7 @@ var mp easyproto.MarshalerPool
// PushRequest represents Loki PushRequest // PushRequest represents Loki PushRequest
// //
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L14C1-L14C20 // See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L14
type PushRequest struct { type PushRequest struct {
Streams []Stream Streams []Stream
@ -87,7 +87,7 @@ func (pr *PushRequest) unmarshalProtobuf(entriesBuf []Entry, labelPairBuf []Labe
// Stream represents Loki stream. // Stream represents Loki stream.
// //
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L23 // See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L23
type Stream struct { type Stream struct {
Labels string Labels string
Entries []Entry Entries []Entry
@ -139,7 +139,7 @@ func (s *Stream) unmarshalProtobuf(entriesBuf []Entry, labelPairBuf []LabelPair,
// Entry represents Loki entry. // Entry represents Loki entry.
// //
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L38 // See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L38
type Entry struct { type Entry struct {
Timestamp time.Time Timestamp time.Time
Line string Line string
@ -203,7 +203,7 @@ func (e *Entry) unmarshalProtobuf(labelPairBuf []LabelPair, src []byte) ([]Label
// LabelPair represents Loki label pair. // LabelPair represents Loki label pair.
// //
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L33 // See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L33
type LabelPair struct { type LabelPair struct {
Name string Name string
Value string Value string

View file

@ -4,6 +4,7 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/datadog"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch" "github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/journald" "github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/journald"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline" "github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
@ -25,6 +26,7 @@ func Stop() {
// RequestHandler handles insert requests for VictoriaLogs // RequestHandler handles insert requests for VictoriaLogs
func RequestHandler(w http.ResponseWriter, r *http.Request) bool { func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
path := r.URL.Path path := r.URL.Path
if !strings.HasPrefix(path, "/insert/") { if !strings.HasPrefix(path, "/insert/") {
// Skip requests, which do not start with /insert/, since these aren't our requests. // Skip requests, which do not start with /insert/, since these aren't our requests.
return false return false
@ -49,6 +51,9 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
case strings.HasPrefix(path, "/journald/"): case strings.HasPrefix(path, "/journald/"):
path = strings.TrimPrefix(path, "/journald") path = strings.TrimPrefix(path, "/journald")
return journald.RequestHandler(path, w, r) return journald.RequestHandler(path, w, r)
case strings.HasPrefix(path, "/datadog/"):
path = strings.TrimPrefix(path, "/datadog")
return datadog.RequestHandler(path, w, r)
default: default:
return false return false
} }

View file

@ -3,11 +3,13 @@ package syslog
import ( import (
"bufio" "bufio"
"crypto/tls" "crypto/tls"
"encoding/json"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"io" "io"
"net" "net"
"sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -35,10 +37,25 @@ var (
syslogTimezone = flag.String("syslog.timezone", "Local", "Timezone to use when parsing timestamps in RFC3164 syslog messages. Timezone must be a valid IANA Time Zone. "+ syslogTimezone = flag.String("syslog.timezone", "Local", "Timezone to use when parsing timestamps in RFC3164 syslog messages. Timezone must be a valid IANA Time Zone. "+
"For example: America/New_York, Europe/Berlin, Etc/GMT+3 . See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/") "For example: America/New_York, Europe/Berlin, Etc/GMT+3 . See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
syslogTenantIDTCP = flagutil.NewArrayString("syslog.tenantID.tcp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. "+ streamFieldsTCP = flagutil.NewArrayString("syslog.streamFields.tcp", "Fields to use as log stream labels for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/") `See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields`)
syslogTenantIDUDP = flagutil.NewArrayString("syslog.tenantID.udp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. "+ streamFieldsUDP = flagutil.NewArrayString("syslog.streamFields.udp", "Fields to use as log stream labels for logs ingested via the corresponding -syslog.listenAddr.udp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/") `See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields`)
ignoreFieldsTCP = flagutil.NewArrayString("syslog.ignoreFields.tcp", "Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.tcp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields`)
ignoreFieldsUDP = flagutil.NewArrayString("syslog.ignoreFields.udp", "Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.udp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields`)
extraFieldsTCP = flagutil.NewArrayString("syslog.extraFields.tcp", "Fields to add to logs ingested via the corresponding -syslog.listenAddr.tcp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields`)
extraFieldsUDP = flagutil.NewArrayString("syslog.extraFields.udp", "Fields to add to logs ingested via the corresponding -syslog.listenAddr.udp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields`)
tenantIDTCP = flagutil.NewArrayString("syslog.tenantID.tcp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#multitenancy")
tenantIDUDP = flagutil.NewArrayString("syslog.tenantID.udp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#multitenancy")
listenAddrTCP = flagutil.NewArrayString("syslog.listenAddr.tcp", "Comma-separated list of TCP addresses to listen to for Syslog messages. "+ listenAddrTCP = flagutil.NewArrayString("syslog.listenAddr.tcp", "Comma-separated list of TCP addresses to listen to for Syslog messages. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/") "See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
@ -150,7 +167,7 @@ func runUDPListener(addr string, argIdx int) {
logger.Fatalf("cannot start UDP syslog server at %q: %s", addr, err) logger.Fatalf("cannot start UDP syslog server at %q: %s", addr, err)
} }
tenantIDStr := syslogTenantIDUDP.GetOptionalArg(argIdx) tenantIDStr := tenantIDUDP.GetOptionalArg(argIdx)
tenantID, err := logstorage.ParseTenantID(tenantIDStr) tenantID, err := logstorage.ParseTenantID(tenantIDStr)
if err != nil { if err != nil {
logger.Fatalf("cannot parse -syslog.tenantID.udp=%q for -syslog.listenAddr.udp=%q: %s", tenantIDStr, addr, err) logger.Fatalf("cannot parse -syslog.tenantID.udp=%q for -syslog.listenAddr.udp=%q: %s", tenantIDStr, addr, err)
@ -161,9 +178,27 @@ func runUDPListener(addr string, argIdx int) {
useLocalTimestamp := useLocalTimestampUDP.GetOptionalArg(argIdx) useLocalTimestamp := useLocalTimestampUDP.GetOptionalArg(argIdx)
streamFieldsStr := streamFieldsUDP.GetOptionalArg(argIdx)
streamFields, err := parseFieldsList(streamFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.streamFields.udp=%q for -syslog.listenAddr.udp=%q: %s", streamFieldsStr, addr, err)
}
ignoreFieldsStr := ignoreFieldsUDP.GetOptionalArg(argIdx)
ignoreFields, err := parseFieldsList(ignoreFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.ignoreFields.udp=%q for -syslog.listenAddr.udp=%q: %s", ignoreFieldsStr, addr, err)
}
extraFieldsStr := extraFieldsUDP.GetOptionalArg(argIdx)
extraFields, err := parseExtraFields(extraFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.extraFields.udp=%q for -syslog.listenAddr.udp=%q: %s", extraFieldsStr, addr, err)
}
doneCh := make(chan struct{}) doneCh := make(chan struct{})
go func() { go func() {
serveUDP(ln, tenantID, compressMethod, useLocalTimestamp) serveUDP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, extraFields)
close(doneCh) close(doneCh)
}() }()
@ -193,7 +228,7 @@ func runTCPListener(addr string, argIdx int) {
logger.Fatalf("syslog: cannot start TCP listener at %s: %s", addr, err) logger.Fatalf("syslog: cannot start TCP listener at %s: %s", addr, err)
} }
tenantIDStr := syslogTenantIDTCP.GetOptionalArg(argIdx) tenantIDStr := tenantIDTCP.GetOptionalArg(argIdx)
tenantID, err := logstorage.ParseTenantID(tenantIDStr) tenantID, err := logstorage.ParseTenantID(tenantIDStr)
if err != nil { if err != nil {
logger.Fatalf("cannot parse -syslog.tenantID.tcp=%q for -syslog.listenAddr.tcp=%q: %s", tenantIDStr, addr, err) logger.Fatalf("cannot parse -syslog.tenantID.tcp=%q for -syslog.listenAddr.tcp=%q: %s", tenantIDStr, addr, err)
@ -204,9 +239,27 @@ func runTCPListener(addr string, argIdx int) {
useLocalTimestamp := useLocalTimestampTCP.GetOptionalArg(argIdx) useLocalTimestamp := useLocalTimestampTCP.GetOptionalArg(argIdx)
streamFieldsStr := streamFieldsTCP.GetOptionalArg(argIdx)
streamFields, err := parseFieldsList(streamFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.streamFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", streamFieldsStr, addr, err)
}
ignoreFieldsStr := ignoreFieldsTCP.GetOptionalArg(argIdx)
ignoreFields, err := parseFieldsList(ignoreFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.ignoreFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", ignoreFieldsStr, addr, err)
}
extraFieldsStr := extraFieldsTCP.GetOptionalArg(argIdx)
extraFields, err := parseExtraFields(extraFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.extraFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", extraFieldsStr, addr, err)
}
doneCh := make(chan struct{}) doneCh := make(chan struct{})
go func() { go func() {
serveTCP(ln, tenantID, compressMethod, useLocalTimestamp) serveTCP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, extraFields)
close(doneCh) close(doneCh)
}() }()
@ -228,7 +281,7 @@ func checkCompressMethod(compressMethod, addr, protocol string) {
} }
} }
func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool) { func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool, streamFields, ignoreFields []string, extraFields []logstorage.Field) {
gomaxprocs := cgroup.AvailableCPUs() gomaxprocs := cgroup.AvailableCPUs()
var wg sync.WaitGroup var wg sync.WaitGroup
localAddr := ln.LocalAddr() localAddr := ln.LocalAddr()
@ -236,7 +289,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod st
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
cp := insertutils.GetCommonParamsForSyslog(tenantID) cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
var bb bytesutil.ByteBuffer var bb bytesutil.ByteBuffer
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024) bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
for { for {
@ -270,7 +323,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod st
wg.Wait() wg.Wait()
} }
func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool) { func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool, streamFields, ignoreFields []string, extraFields []logstorage.Field) {
var cm ingestserver.ConnsMap var cm ingestserver.ConnsMap
cm.Init("syslog") cm.Init("syslog")
@ -300,7 +353,7 @@ func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod stri
wg.Add(1) wg.Add(1)
go func() { go func() {
cp := insertutils.GetCommonParamsForSyslog(tenantID) cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
if err := processStream(c, compressMethod, useLocalTimestamp, cp); err != nil { if err := processStream(c, compressMethod, useLocalTimestamp, cp); err != nil {
logger.Errorf("syslog: cannot process TCP data at %q: %s", addr, err) logger.Errorf("syslog: cannot process TCP data at %q: %s", addr, err)
} }
@ -531,3 +584,35 @@ var (
udpRequestsTotal = metrics.NewCounter(`vl_udp_reqests_total{type="syslog"}`) udpRequestsTotal = metrics.NewCounter(`vl_udp_reqests_total{type="syslog"}`)
udpErrorsTotal = metrics.NewCounter(`vl_udp_errors_total{type="syslog"}`) udpErrorsTotal = metrics.NewCounter(`vl_udp_errors_total{type="syslog"}`)
) )
func parseFieldsList(s string) ([]string, error) {
if s == "" {
return nil, nil
}
var a []string
err := json.Unmarshal([]byte(s), &a)
return a, err
}
func parseExtraFields(s string) ([]logstorage.Field, error) {
if s == "" {
return nil, nil
}
var m map[string]string
if err := json.Unmarshal([]byte(s), &m); err != nil {
return nil, err
}
fields := make([]logstorage.Field, 0, len(m))
for k, v := range m {
fields = append(fields, logstorage.Field{
Name: k,
Value: v,
})
}
sort.Slice(fields, func(i, j int) bool {
return fields[i].Name < fields[j].Name
})
return fields, nil
}

View file

@ -17,7 +17,7 @@ func isTerminal() bool {
return isatty.IsTerminal(os.Stdout.Fd()) && isatty.IsTerminal(os.Stderr.Fd()) return isatty.IsTerminal(os.Stdout.Fd()) && isatty.IsTerminal(os.Stderr.Fd())
} }
func readWithLess(r io.Reader) error { func readWithLess(r io.Reader, wrapLongLines bool) error {
if !isTerminal() { if !isTerminal() {
// Just write everything to stdout if no terminal is available. // Just write everything to stdout if no terminal is available.
_, err := io.Copy(os.Stdout, r) _, err := io.Copy(os.Stdout, r)
@ -48,7 +48,11 @@ func readWithLess(r io.Reader) error {
if err != nil { if err != nil {
return fmt.Errorf("cannot find 'less' command: %w", err) return fmt.Errorf("cannot find 'less' command: %w", err)
} }
p, err := os.StartProcess(path, []string{"less", "-F", "-X"}, &os.ProcAttr{ opts := []string{"less", "-F", "-X"}
if !wrapLongLines {
opts = append(opts, "-S")
}
p, err := os.StartProcess(path, opts, &os.ProcAttr{
Env: append(os.Environ(), "LESSCHARSET=utf-8"), Env: append(os.Environ(), "LESSCHARSET=utf-8"),
Files: []*os.File{pr, os.Stdout, os.Stderr}, Files: []*os.File{pr, os.Stdout, os.Stderr},
}) })

View file

@ -91,6 +91,7 @@ func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
} }
outputMode := outputModeJSONMultiline outputMode := outputModeJSONMultiline
wrapLongLines := false
s := "" s := ""
for { for {
line, err := rl.ReadLine() line, err := rl.ReadLine()
@ -99,7 +100,7 @@ func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
case io.EOF: case io.EOF:
if s != "" { if s != "" {
// This is non-interactive query execution. // This is non-interactive query execution.
executeQuery(context.Background(), rl, s, outputMode) executeQuery(context.Background(), rl, s, outputMode, wrapLongLines)
} }
return return
case readline.ErrInterrupt: case readline.ErrInterrupt:
@ -163,6 +164,18 @@ func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
s = "" s = ""
continue continue
} }
if s == `\wrap_long_lines` {
if wrapLongLines {
wrapLongLines = false
fmt.Fprintf(rl, "wrapping of long lines is disabled\n")
} else {
wrapLongLines = true
fmt.Fprintf(rl, "wrapping of long lines is enabled\n")
}
historyLines = pushToHistory(rl, historyLines, s)
s = ""
continue
}
if line != "" && !strings.HasSuffix(line, ";") { if line != "" && !strings.HasSuffix(line, ";") {
// Assume the query is incomplete and allow the user finishing the query on the next line // Assume the query is incomplete and allow the user finishing the query on the next line
s += "\n" s += "\n"
@ -172,7 +185,7 @@ func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
// Execute the query // Execute the query
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
executeQuery(ctx, rl, s, outputMode) executeQuery(ctx, rl, s, outputMode, wrapLongLines)
cancel() cancel()
historyLines = pushToHistory(rl, historyLines, s) historyLines = pushToHistory(rl, historyLines, s)
@ -259,13 +272,14 @@ func printCommandsHelp(w io.Writer) {
\m - multiline json output mode \m - multiline json output mode
\c - compact output \c - compact output
\logfmt - logfmt output mode \logfmt - logfmt output mode
\wrap_long_lines - toggles wrapping long lines
\tail <query> - live tail <query> results \tail <query> - live tail <query> results
See https://docs.victoriametrics.com/victorialogs/querying/vlogscli/ for more details See https://docs.victoriametrics.com/victorialogs/querying/vlogscli/ for more details
`) `)
} }
func executeQuery(ctx context.Context, output io.Writer, qStr string, outputMode outputMode) { func executeQuery(ctx context.Context, output io.Writer, qStr string, outputMode outputMode, wrapLongLines bool) {
if strings.HasPrefix(qStr, `\tail `) { if strings.HasPrefix(qStr, `\tail `) {
tailQuery(ctx, output, qStr, outputMode) tailQuery(ctx, output, qStr, outputMode)
return return
@ -279,7 +293,7 @@ func executeQuery(ctx context.Context, output io.Writer, qStr string, outputMode
_ = respBody.Close() _ = respBody.Close()
}() }()
if err := readWithLess(respBody); err != nil { if err := readWithLess(respBody, wrapLongLines); err != nil {
fmt.Fprintf(output, "error when reading query response: %s\n", err) fmt.Fprintf(output, "error when reading query response: %s\n", err)
return return
} }

View file

@ -73,7 +73,6 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
} }
// Prepare the query for hits count. // Prepare the query for hits count.
q.Optimize()
q.DropAllPipes() q.DropAllPipes()
q.AddCountByTimePipe(int64(step), int64(offset), fields) q.AddCountByTimePipe(int64(step), int64(offset), fields)
@ -204,7 +203,6 @@ func ProcessFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *htt
} }
// Obtain field names for the given query // Obtain field names for the given query
q.Optimize()
fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q) fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q)
if err != nil { if err != nil {
httpserver.Errorf(w, r, "cannot obtain field names: %s", err) httpserver.Errorf(w, r, "cannot obtain field names: %s", err)
@ -244,7 +242,6 @@ func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *ht
} }
// Obtain unique values for the given field // Obtain unique values for the given field
q.Optimize()
values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit)) values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
if err != nil { if err != nil {
httpserver.Errorf(w, r, "cannot obtain values for field %q: %s", fieldName, err) httpserver.Errorf(w, r, "cannot obtain values for field %q: %s", fieldName, err)
@ -267,7 +264,6 @@ func ProcessStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter,
} }
// Obtain stream field names for the given query // Obtain stream field names for the given query
q.Optimize()
names, err := vlstorage.GetStreamFieldNames(ctx, tenantIDs, q) names, err := vlstorage.GetStreamFieldNames(ctx, tenantIDs, q)
if err != nil { if err != nil {
httpserver.Errorf(w, r, "cannot obtain stream field names: %s", err) httpserver.Errorf(w, r, "cannot obtain stream field names: %s", err)
@ -306,7 +302,6 @@ func ProcessStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter,
} }
// Obtain stream field values for the given query and the given fieldName // Obtain stream field values for the given query and the given fieldName
q.Optimize()
values, err := vlstorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit)) values, err := vlstorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
if err != nil { if err != nil {
httpserver.Errorf(w, r, "cannot obtain stream field values: %s", err) httpserver.Errorf(w, r, "cannot obtain stream field values: %s", err)
@ -338,7 +333,6 @@ func ProcessStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http
} }
// Obtain streamIDs for the given query // Obtain streamIDs for the given query
q.Optimize()
streamIDs, err := vlstorage.GetStreamIDs(ctx, tenantIDs, q, uint64(limit)) streamIDs, err := vlstorage.GetStreamIDs(ctx, tenantIDs, q, uint64(limit))
if err != nil { if err != nil {
httpserver.Errorf(w, r, "cannot obtain stream_ids: %s", err) httpserver.Errorf(w, r, "cannot obtain stream_ids: %s", err)
@ -370,7 +364,6 @@ func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.R
} }
// Obtain streams for the given query // Obtain streams for the given query
q.Optimize()
streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit)) streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit))
if err != nil { if err != nil {
httpserver.Errorf(w, r, "cannot obtain streams: %s", err) httpserver.Errorf(w, r, "cannot obtain streams: %s", err)
@ -398,7 +391,6 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
"see https://docs.victoriametrics.com/victorialogs/querying/#live-tailing for details", q) "see https://docs.victoriametrics.com/victorialogs/querying/#live-tailing for details", q)
return return
} }
q.Optimize()
refreshIntervalMsecs, err := httputils.GetDuration(r, "refresh_interval", 1000) refreshIntervalMsecs, err := httputils.GetDuration(r, "refresh_interval", 1000)
if err != nil { if err != nil {
@ -407,13 +399,28 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
} }
refreshInterval := time.Millisecond * time.Duration(refreshIntervalMsecs) refreshInterval := time.Millisecond * time.Duration(refreshIntervalMsecs)
startOffsetMsecs, err := httputils.GetDuration(r, "start_offset", 5*1000)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
startOffset := startOffsetMsecs * 1e6
offsetMsecs, err := httputils.GetDuration(r, "offset", 1000)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
offset := offsetMsecs * 1e6
ctxWithCancel, cancel := context.WithCancel(ctx) ctxWithCancel, cancel := context.WithCancel(ctx)
tp := newTailProcessor(cancel) tp := newTailProcessor(cancel)
ticker := time.NewTicker(refreshInterval) ticker := time.NewTicker(refreshInterval)
defer ticker.Stop() defer ticker.Stop()
end := time.Now().UnixNano() end := time.Now().UnixNano() - offset
start := end - startOffset
doneCh := ctxWithCancel.Done() doneCh := ctxWithCancel.Done()
flusher, ok := w.(http.Flusher) flusher, ok := w.(http.Flusher)
if !ok { if !ok {
@ -421,14 +428,7 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
} }
qOrig := q qOrig := q
for { for {
start := end - tailOffsetNsecs q = qOrig.CloneWithTimeFilter(end, start, end)
end = time.Now().UnixNano()
q = qOrig.Clone(end)
q.AddTimeFilter(start, end)
// q.Optimize() call is needed for converting '*' into filterNoop.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6785#issuecomment-2358547733
q.Optimize()
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, tp.writeBlock); err != nil { if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, tp.writeBlock); err != nil {
httpserver.Errorf(w, r, "cannot execute tail query [%s]: %s", q, err) httpserver.Errorf(w, r, "cannot execute tail query [%s]: %s", q, err)
return return
@ -447,6 +447,8 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
case <-doneCh: case <-doneCh:
return return
case <-ticker.C: case <-ticker.C:
start = end - tailOffsetNsecs
end = time.Now().UnixNano() - offset
} }
} }
} }
@ -605,8 +607,6 @@ func ProcessStatsQueryRangeRequest(ctx context.Context, w http.ResponseWriter, r
return return
} }
q.Optimize()
m := make(map[string]*statsSeries) m := make(map[string]*statsSeries)
var mLock sync.Mutex var mLock sync.Mutex
@ -717,8 +717,6 @@ func ProcessStatsQueryRequest(ctx context.Context, w http.ResponseWriter, r *htt
return return
} }
q.Optimize()
var rows []statsRow var rows []statsRow
var rowsLock sync.Mutex var rowsLock sync.Mutex
@ -818,7 +816,6 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
q.AddPipeLimit(uint64(limit)) q.AddPipeLimit(uint64(limit))
} }
q.Optimize()
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) { writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
if len(columns) == 0 || len(columns[0].Values) == 0 { if len(columns) == 0 || len(columns[0].Values) == 0 {
@ -888,7 +885,6 @@ type row struct {
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) { func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
limitUpper := 2 * limit limitUpper := 2 * limit
q.AddPipeLimit(uint64(limitUpper)) q.AddPipeLimit(uint64(limitUpper))
q.Optimize()
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper) rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper)
if err != nil { if err != nil {
@ -908,11 +904,7 @@ func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID,
qOrig := q qOrig := q
for { for {
timestamp := qOrig.GetTimestamp() timestamp := qOrig.GetTimestamp()
q = qOrig.Clone(timestamp) q = qOrig.CloneWithTimeFilter(timestamp, start, end)
q.AddTimeFilter(start, end)
// q.Optimize() call is needed for converting '*' into filterNoop.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6785#issuecomment-2358547733
q.Optimize()
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper) rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1016,14 +1008,29 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
} }
tenantIDs := []logstorage.TenantID{tenantID} tenantIDs := []logstorage.TenantID{tenantID}
// Parse optional start and end args
start, okStart, err := getTimeNsec(r, "start")
if err != nil {
return nil, nil, err
}
end, okEnd, err := getTimeNsec(r, "end")
if err != nil {
return nil, nil, err
}
// Parse optional time arg // Parse optional time arg
timestamp, okTime, err := getTimeNsec(r, "time") timestamp, okTime, err := getTimeNsec(r, "time")
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if !okTime { if !okTime {
// If time arg is missing, then evaluate query at the current timestamp // If time arg is missing, then evaluate query either at the end timestamp (if it is set)
timestamp = time.Now().UnixNano() // or at the current timestamp (if end query arg isn't set)
if okEnd {
timestamp = end
} else {
timestamp = time.Now().UnixNano()
}
} }
// decrease timestamp by one nanosecond in order to avoid capturing logs belonging // decrease timestamp by one nanosecond in order to avoid capturing logs belonging
@ -1037,16 +1044,8 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
return nil, nil, fmt.Errorf("cannot parse query [%s]: %s", qStr, err) return nil, nil, fmt.Errorf("cannot parse query [%s]: %s", qStr, err)
} }
// Parse optional start and end args
start, okStart, err := getTimeNsec(r, "start")
if err != nil {
return nil, nil, err
}
end, okEnd, err := getTimeNsec(r, "end")
if err != nil {
return nil, nil, err
}
if okStart || okEnd { if okStart || okEnd {
// Add _time:[start, end] filter if start or end args were set.
if !okStart { if !okStart {
start = math.MinInt64 start = math.MinInt64
} }

View file

@ -1,13 +1,13 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.faf86aa5.css", "main.css": "./static/css/main.faf86aa5.css",
"main.js": "./static/js/main.64aea685.js", "main.js": "./static/js/main.b204330a.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js", "static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md", "static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.faf86aa5.css", "static/css/main.faf86aa5.css",
"static/js/main.64aea685.js" "static/js/main.b204330a.js"
] ]
} }

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.64aea685.js"></script><link href="./static/css/main.faf86aa5.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.b204330a.js"></script><link href="./static/css/main.faf86aa5.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -270,6 +270,9 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
// tear down vmstorage and clean the data dir // tear down vmstorage and clean the data dir
defer tearDown() defer tearDown()
if tg.Interval == nil {
tg.Interval = promutils.NewDuration(evalInterval)
}
err := writeInputSeries(tg.InputSeries, tg.Interval, testStartTime, testPromWriteHTTPPath) err := writeInputSeries(tg.InputSeries, tg.Interval, testStartTime, testPromWriteHTTPPath)
if err != nil { if err != nil {
return []error{err} return []error{err}

View file

@ -71,7 +71,7 @@ func (t *Type) ValidateExpr(expr string) error {
return fmt.Errorf("bad prometheus expr: %q, err: %w", expr, err) return fmt.Errorf("bad prometheus expr: %q, err: %w", expr, err)
} }
case "vlogs": case "vlogs":
if _, err := logstorage.ParseStatsQuery(expr); err != nil { if _, err := logstorage.ParseStatsQuery(expr, 0); err != nil {
return fmt.Errorf("bad LogsQL expr: %q, err: %w", expr, err) return fmt.Errorf("bad LogsQL expr: %q, err: %w", expr, err)
} }
default: default:

View file

@ -10,6 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
) )
@ -221,6 +222,9 @@ func setIntervalAsTimeFilter(dType, expr string) bool {
if dType != "vlogs" { if dType != "vlogs" {
return false return false
} }
q, _ := logstorage.ParseStatsQuery(expr) q, err := logstorage.ParseStatsQuery(expr, 0)
return !q.ContainAnyTimeFilter() if err != nil {
logger.Panicf("BUG: the LogsQL query must be valid here; got error: %s; query=[%s]", err, expr)
}
return !q.HasGlobalTimeFilter()
} }

View file

@ -462,13 +462,17 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
// Slow path - select other backend urls. // Slow path - select other backend urls.
n := atomicCounter.Add(1) - 1 n := atomicCounter.Add(1) - 1
buMin := bus[n%uint32(len(bus))]
for i := uint32(0); i < uint32(len(bus)); i++ { for i := uint32(0); i < uint32(len(bus)); i++ {
idx := (n + i) % uint32(len(bus)) idx := (n + i) % uint32(len(bus))
bu := bus[idx] bu := bus[idx]
if bu.isBroken() { if bu.isBroken() {
continue continue
} }
if buMin.isBroken() {
// verify that buMin isn't set as broken
buMin = bu
}
if bu.concurrentRequests.Load() == 0 { if bu.concurrentRequests.Load() == 0 {
// Fast path - return the backend with zero concurrently executed requests. // Fast path - return the backend with zero concurrently executed requests.
// Do not use CompareAndSwap() instead of Load(), since it is much slower on systems with many CPU cores. // Do not use CompareAndSwap() instead of Load(), since it is much slower on systems with many CPU cores.
@ -478,7 +482,6 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
} }
// Slow path - return the backend with the minimum number of concurrently executed requests. // Slow path - return the backend with the minimum number of concurrently executed requests.
buMin := bus[n%uint32(len(bus))]
minRequests := buMin.concurrentRequests.Load() minRequests := buMin.concurrentRequests.Load()
for _, bu := range bus { for _, bu := range bus {
if bu.isBroken() { if bu.isBroken() {

View file

@ -777,6 +777,28 @@ func TestGetLeastLoadedBackendURL(t *testing.T) {
fn(7, 7, 7) fn(7, 7, 7)
} }
func TestBrokenBackend(t *testing.T) {
up := mustParseURLs([]string{
"http://node1:343",
"http://node2:343",
"http://node3:343",
})
up.loadBalancingPolicy = "least_loaded"
pbus := up.bus.Load()
bus := *pbus
// explicitly mark one of the backends as broken
bus[1].setBroken()
// broken backend should never return while there are healthy backends
for i := 0; i < 1e3; i++ {
b := up.getBackendURL()
if b.isBroken() {
t.Fatalf("unexpected broken backend %q", b.url)
}
}
}
func getRegexs(paths []string) []*Regex { func getRegexs(paths []string) []*Regex {
var sps []*Regex var sps []*Regex
for _, path := range paths { for _, path := range paths {

View file

@ -51,30 +51,31 @@ type Series struct {
Measurement string Measurement string
Field string Field string
LabelPairs []LabelPair LabelPairs []LabelPair
// EmptyTags contains tags in measurement whose value must be empty.
EmptyTags []string
} }
var valueEscaper = strings.NewReplacer(`\`, `\\`, `'`, `\'`) var valueEscaper = strings.NewReplacer(`\`, `\\`, `'`, `\'`)
func (s Series) fetchQuery(timeFilter string) string { func (s Series) fetchQuery(timeFilter string) string {
f := &strings.Builder{} conditions := make([]string, 0, len(s.LabelPairs)+len(s.EmptyTags))
fmt.Fprintf(f, "select %q from %q", s.Field, s.Measurement) for _, pair := range s.LabelPairs {
if len(s.LabelPairs) > 0 || len(timeFilter) > 0 { conditions = append(conditions, fmt.Sprintf("%q::tag='%s'", pair.Name, valueEscaper.Replace(pair.Value)))
f.WriteString(" where")
} }
for i, pair := range s.LabelPairs { for _, label := range s.EmptyTags {
pairV := valueEscaper.Replace(pair.Value) conditions = append(conditions, fmt.Sprintf("%q::tag=''", label))
fmt.Fprintf(f, " %q::tag='%s'", pair.Name, pairV)
if i != len(s.LabelPairs)-1 {
f.WriteString(" and")
}
} }
if len(timeFilter) > 0 { if len(timeFilter) > 0 {
if len(s.LabelPairs) > 0 { conditions = append(conditions, timeFilter)
f.WriteString(" and")
}
fmt.Fprintf(f, " %s", timeFilter)
} }
return f.String()
q := fmt.Sprintf("select %q from %q", s.Field, s.Measurement)
if len(conditions) > 0 {
q += fmt.Sprintf(" where %s", strings.Join(conditions, " and "))
}
return q
} }
// LabelPair is the key-value record // LabelPair is the key-value record
@ -118,7 +119,7 @@ func NewClient(cfg Config) (*Client, error) {
} }
// Database returns database name // Database returns database name
func (c Client) Database() string { func (c *Client) Database() string {
return c.database return c.database
} }
@ -140,7 +141,7 @@ func timeFilter(start, end string) string {
} }
// Explore checks the existing data schema in influx // Explore checks the existing data schema in influx
// by checking available fields and series, // by checking available (non-empty) tags, fields and measurements
// which unique combination represents all possible // which unique combination represents all possible
// time series existing in database. // time series existing in database.
// The explore required to reduce the load on influx // The explore required to reduce the load on influx
@ -150,6 +151,8 @@ func timeFilter(start, end string) string {
// May contain non-existing time series. // May contain non-existing time series.
func (c *Client) Explore() ([]*Series, error) { func (c *Client) Explore() ([]*Series, error) {
log.Printf("Exploring scheme for database %q", c.database) log.Printf("Exploring scheme for database %q", c.database)
// {"measurement1": ["value1", "value2"]}
mFields, err := c.fieldsByMeasurement() mFields, err := c.fieldsByMeasurement()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get field keys: %s", err) return nil, fmt.Errorf("failed to get field keys: %s", err)
@ -159,6 +162,12 @@ func (c *Client) Explore() ([]*Series, error) {
return nil, fmt.Errorf("found no numeric fields for import in database %q", c.database) return nil, fmt.Errorf("found no numeric fields for import in database %q", c.database)
} }
// {"measurement1": {"tag1", "tag2"}}
measurementTags, err := c.getMeasurementTags()
if err != nil {
return nil, fmt.Errorf("failed to get tags of measurements: %s", err)
}
series, err := c.getSeries() series, err := c.getSeries()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get series: %s", err) return nil, fmt.Errorf("failed to get series: %s", err)
@ -171,11 +180,17 @@ func (c *Client) Explore() ([]*Series, error) {
log.Printf("skip measurement %q since it has no fields", s.Measurement) log.Printf("skip measurement %q since it has no fields", s.Measurement)
continue continue
} }
tags, ok := measurementTags[s.Measurement]
if !ok {
return nil, fmt.Errorf("failed to find tags of measurement %s", s.Measurement)
}
emptyTags := getEmptyTags(tags, s.LabelPairs)
for _, field := range fields { for _, field := range fields {
is := &Series{ is := &Series{
Measurement: s.Measurement, Measurement: s.Measurement,
Field: field, Field: field,
LabelPairs: s.LabelPairs, LabelPairs: s.LabelPairs,
EmptyTags: emptyTags,
} }
iSeries = append(iSeries, is) iSeries = append(iSeries, is)
} }
@ -183,6 +198,22 @@ func (c *Client) Explore() ([]*Series, error) {
return iSeries, nil return iSeries, nil
} }
// getEmptyTags returns tags of a measurement that are missing in a specific series.
// Tags represent all tags of a measurement. LabelPairs represent tags of a specific series.
func getEmptyTags(tags map[string]struct{}, LabelPairs []LabelPair) []string {
labelMap := make(map[string]struct{})
for _, pair := range LabelPairs {
labelMap[pair.Name] = struct{}{}
}
result := make([]string, 0, len(labelMap)-len(LabelPairs))
for tag := range tags {
if _, ok := labelMap[tag]; !ok {
result = append(result, tag)
}
}
return result
}
// ChunkedResponse is a wrapper over influx.ChunkedResponse. // ChunkedResponse is a wrapper over influx.ChunkedResponse.
// Used for better memory usage control while iterating // Used for better memory usage control while iterating
// over huge time series. // over huge time series.
@ -357,6 +388,57 @@ func (c *Client) getSeries() ([]*Series, error) {
return result, nil return result, nil
} }
// getMeasurementTags get the tags for each measurement.
// tags are placed in a map without values (similar to a set) for quick lookups:
// {"measurement1": {"tag1", "tag2"}, "measurement2": {"tag3", "tag4"}}
func (c *Client) getMeasurementTags() (map[string]map[string]struct{}, error) {
com := "show tag keys"
q := influx.Query{
Command: com,
Database: c.database,
RetentionPolicy: c.retention,
Chunked: true,
ChunkSize: c.chunkSize,
}
log.Printf("fetching tag keys: %s", stringify(q))
cr, err := c.QueryAsChunk(q)
if err != nil {
return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err)
}
const tagKey = "tagKey"
var tagsCount int
result := make(map[string]map[string]struct{})
for {
resp, err := cr.NextResponse()
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
if resp.Error() != nil {
return nil, fmt.Errorf("response error for query %q: %s", q.Command, resp.Error())
}
qValues, err := parseResult(resp.Results[0])
if err != nil {
return nil, err
}
for _, qv := range qValues {
if result[qv.name] == nil {
result[qv.name] = make(map[string]struct{}, len(qv.values[tagKey]))
}
for _, tk := range qv.values[tagKey] {
result[qv.name][tk.(string)] = struct{}{}
tagsCount++
}
}
}
log.Printf("found %d tag(s) for %d measurements", tagsCount, len(result))
return result, nil
}
func (c *Client) do(q influx.Query) ([]queryValues, error) { func (c *Client) do(q influx.Query) ([]queryValues, error) {
res, err := c.Query(q) res, err := c.Query(q)
if err != nil { if err != nil {

View file

@ -73,6 +73,12 @@ func TestFetchQuery(t *testing.T) {
Measurement: "cpu", Measurement: "cpu",
Field: "value", Field: "value",
}, "", `select "value" from "cpu"`) }, "", `select "value" from "cpu"`)
f(&Series{
Measurement: "cpu",
Field: "value1",
EmptyTags: []string{"e1", "e2", "e3"},
}, "", `select "value1" from "cpu" where "e1"::tag='' and "e2"::tag='' and "e3"::tag=''`)
} }
func TestTimeFilter(t *testing.T) { func TestTimeFilter(t *testing.T) {

View file

@ -2137,6 +2137,25 @@ func TestExecExprSuccess(t *testing.T) {
}, },
}) })
f(`removeEmptySeries(removeBelowValue(time('a'),150),1)`, []*series{}) f(`removeEmptySeries(removeBelowValue(time('a'),150),1)`, []*series{})
// if xFilesFactor is set, a single value in the series needs to be non-null for it to be
// considered non-empty
f(`removeEmptySeries(removeBelowValue(time('a'),150),0)`, []*series{
{
Timestamps: []int64{120000, 180000},
Values: []float64{nan, 180},
Name: "removeBelowValue(a,150)",
Tags: map[string]string{"name": "a"},
},
})
f(`removeEmptySeries(removeBelowValue(time('a'),150),-1)`, []*series{
{
Timestamps: []int64{120000, 180000},
Values: []float64{nan, 180},
Name: "removeBelowValue(a,150)",
Tags: map[string]string{"name": "a"},
},
})
f(`round(time('a',17),-1)`, []*series{ f(`round(time('a',17),-1)`, []*series{
{ {
Timestamps: []int64{120000, 137000, 154000, 171000, 188000, 205000}, Timestamps: []int64{120000, 137000, 154000, 171000, 188000, 205000},

View file

@ -3151,7 +3151,7 @@ func transformRemoveEmptySeries(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSe
xff = xFilesFactor xff = xFilesFactor
} }
n := aggrCount(s.Values) n := aggrCount(s.Values)
if n/float64(len(s.Values)) < xff { if n/float64(len(s.Values)) <= xff {
return nil, nil return nil, nil
} }
s.expr = fe s.expr = fe

View file

@ -108,7 +108,7 @@ func maySortResults(e metricsql.Expr) bool {
switch v := e.(type) { switch v := e.(type) {
case *metricsql.FuncExpr: case *metricsql.FuncExpr:
switch strings.ToLower(v.Name) { switch strings.ToLower(v.Name) {
case "sort", "sort_desc", case "sort", "sort_desc", "limit_offset",
"sort_by_label", "sort_by_label_desc", "sort_by_label", "sort_by_label_desc",
"sort_by_label_numeric", "sort_by_label_numeric_desc": "sort_by_label_numeric", "sort_by_label_numeric_desc":
// Results already sorted // Results already sorted

View file

@ -9274,6 +9274,75 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r1, r2} resultExpected := []netstorage.Result{r1, r2}
f(q, resultExpected) f(q, resultExpected)
}) })
t.Run(`limit_offset(5, 0, sort_by_label_numeric_desc(multiple_labels_numbers_special_chars, "foo"))`, func(t *testing.T) {
t.Parallel()
q := `limit_offset(5, 0, sort_by_label_numeric_desc((
label_set(3, "foo", "1:0:3"),
label_set(4, "foo", "5:0:15"),
label_set(1, "foo", "1:0:2"),
label_set(5, "foo", "7:0:15"),
label_set(7, "foo", "3:0:1"),
label_set(6, "foo", "1:0:2"),
label_set(8, "foo", "9:0:15")
), "foo"))`
r1 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{8, 8, 8, 8, 8, 8},
Timestamps: timestampsExpected,
}
r1.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("9:0:15"),
},
}
r2 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{5, 5, 5, 5, 5, 5},
Timestamps: timestampsExpected,
}
r2.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("7:0:15"),
},
}
r3 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{4, 4, 4, 4, 4, 4},
Timestamps: timestampsExpected,
}
r3.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("5:0:15"),
},
}
r4 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{7, 7, 7, 7, 7, 7},
Timestamps: timestampsExpected,
}
r4.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("3:0:1"),
},
}
r5 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{3, 3, 3, 3, 3, 3},
Timestamps: timestampsExpected,
}
r5.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("1:0:3"),
},
}
resultExpected := []netstorage.Result{r1, r2, r3, r4, r5}
f(q, resultExpected)
})
t.Run(`sort_by_label_numeric(alias_numbers_with_special_chars)`, func(t *testing.T) { t.Run(`sort_by_label_numeric(alias_numbers_with_special_chars)`, func(t *testing.T) {
t.Parallel() t.Parallel()
q := `sort_by_label_numeric(( q := `sort_by_label_numeric((

View file

@ -1,13 +1,13 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.d781989c.css", "main.css": "./static/css/main.d781989c.css",
"main.js": "./static/js/main.68e2aae8.js", "main.js": "./static/js/main.7ec4e6eb.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js", "static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md", "static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.d781989c.css", "static/css/main.d781989c.css",
"static/js/main.68e2aae8.js" "static/js/main.7ec4e6eb.js"
] ]
} }

View file

@ -0,0 +1,5 @@
{
"license": {
"type": "opensource"
}
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.68e2aae8.js"></script><link href="./static/css/main.d781989c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.7ec4e6eb.js"></script><link href="./static/css/main.d781989c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -67,8 +67,6 @@ var (
"See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning") "See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning")
cacheSizeIndexDBDataBlocks = flagutil.NewBytes("storage.cacheSizeIndexDBDataBlocks", 0, "Overrides max size for indexdb/dataBlocks cache. "+ cacheSizeIndexDBDataBlocks = flagutil.NewBytes("storage.cacheSizeIndexDBDataBlocks", 0, "Overrides max size for indexdb/dataBlocks cache. "+
"See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning") "See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning")
cacheSizeIndexDBDataBlocksSparse = flagutil.NewBytes("storage.cacheSizeIndexDBDataBlocksSparse", 0, "Overrides max size for indexdb/dataBlocksSparse cache. "+
"See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning")
cacheSizeIndexDBTagFilters = flagutil.NewBytes("storage.cacheSizeIndexDBTagFilters", 0, "Overrides max size for indexdb/tagFiltersToMetricIDs cache. "+ cacheSizeIndexDBTagFilters = flagutil.NewBytes("storage.cacheSizeIndexDBTagFilters", 0, "Overrides max size for indexdb/tagFiltersToMetricIDs cache. "+
"See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning") "See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning")
) )
@ -102,7 +100,6 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
storage.SetTagFiltersCacheSize(cacheSizeIndexDBTagFilters.IntN()) storage.SetTagFiltersCacheSize(cacheSizeIndexDBTagFilters.IntN())
mergeset.SetIndexBlocksCacheSize(cacheSizeIndexDBIndexBlocks.IntN()) mergeset.SetIndexBlocksCacheSize(cacheSizeIndexDBIndexBlocks.IntN())
mergeset.SetDataBlocksCacheSize(cacheSizeIndexDBDataBlocks.IntN()) mergeset.SetDataBlocksCacheSize(cacheSizeIndexDBDataBlocks.IntN())
mergeset.SetDataBlocksSparseCacheSize(cacheSizeIndexDBDataBlocksSparse.IntN())
if retentionPeriod.Duration() < 24*time.Hour { if retentionPeriod.Duration() < 24*time.Hour {
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod) logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
@ -584,7 +581,6 @@ func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/next_day_metric_ids"}`, m.NextDayMetricIDCacheSize) metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/next_day_metric_ids"}`, m.NextDayMetricIDCacheSize)
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSize) metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSize)
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSize) metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSize)
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/dataBlocksSparse"}`, idbm.DataBlocksSparseCacheSize)
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSize) metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSize)
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheSize) metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheSize)
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/regexps"}`, uint64(storage.RegexpCacheSize())) metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/regexps"}`, uint64(storage.RegexpCacheSize()))
@ -596,7 +592,6 @@ func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/metricName"}`, m.MetricNameCacheSizeBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/metricName"}`, m.MetricNameCacheSizeBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSizeBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSizeBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSizeBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSizeBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="indexdb/dataBlocksSparse"}`, idbm.DataBlocksSparseCacheSizeBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSizeBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSizeBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/date_metricID"}`, m.DateMetricIDCacheSizeBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/date_metricID"}`, m.DateMetricIDCacheSizeBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/hour_metric_ids"}`, m.HourMetricIDCacheSizeBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/hour_metric_ids"}`, m.HourMetricIDCacheSizeBytes)
@ -611,7 +606,6 @@ func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/metricName"}`, m.MetricNameCacheSizeMaxBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/metricName"}`, m.MetricNameCacheSizeMaxBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSizeMaxBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSizeMaxBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSizeMaxBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSizeMaxBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/dataBlocksSparse"}`, idbm.DataBlocksSparseCacheSizeMaxBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSizeMaxBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSizeMaxBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheSizeMaxBytes) metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheSizeMaxBytes)
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/regexps"}`, uint64(storage.RegexpCacheMaxSizeBytes())) metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/regexps"}`, uint64(storage.RegexpCacheMaxSizeBytes()))
@ -622,7 +616,6 @@ func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/metricName"}`, m.MetricNameCacheRequests) metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/metricName"}`, m.MetricNameCacheRequests)
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/indexBlocks"}`, tm.IndexBlocksCacheRequests) metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/indexBlocks"}`, tm.IndexBlocksCacheRequests)
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheRequests) metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheRequests)
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/dataBlocksSparse"}`, idbm.DataBlocksSparseCacheRequests)
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheRequests) metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheRequests)
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheRequests) metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheRequests)
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/regexps"}`, storage.RegexpCacheRequests()) metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/regexps"}`, storage.RegexpCacheRequests())
@ -633,7 +626,6 @@ func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/metricName"}`, m.MetricNameCacheMisses) metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/metricName"}`, m.MetricNameCacheMisses)
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/indexBlocks"}`, tm.IndexBlocksCacheMisses) metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/indexBlocks"}`, tm.IndexBlocksCacheMisses)
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheMisses) metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheMisses)
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/dataBlocksSparse"}`, idbm.DataBlocksSparseCacheMisses)
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheMisses) metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheMisses)
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheMisses) metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheMisses)
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/regexps"}`, storage.RegexpCacheMisses()) metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/regexps"}`, storage.RegexpCacheMisses())

View file

@ -1,4 +1,4 @@
FROM golang:1.23.1 AS build-web-stage FROM golang:1.23.3 AS build-web-stage
COPY build /build COPY build /build
WORKDIR /build WORKDIR /build

View file

@ -1,7 +1,6 @@
import { useAppDispatch, useAppState } from "../state/common/StateContext"; import { useAppDispatch, useAppState } from "../state/common/StateContext";
import { useEffect, useState } from "preact/compat"; import { useEffect, useState } from "preact/compat";
import { ErrorTypes } from "../types"; import { ErrorTypes } from "../types";
import { getUrlWithoutTenant } from "../utils/tenants";
const useFetchFlags = () => { const useFetchFlags = () => {
const { serverUrl } = useAppState(); const { serverUrl } = useAppState();
@ -17,7 +16,7 @@ const useFetchFlags = () => {
setIsLoading(true); setIsLoading(true);
try { try {
const url = getUrlWithoutTenant(serverUrl); const url = new URL(serverUrl).origin;
const response = await fetch(`${url}/flags`); const response = await fetch(`${url}/flags`);
const data = await response.text(); const data = await response.text();
const flags = data.split("\n").filter(flag => flag.trim() !== "") const flags = data.split("\n").filter(flag => flag.trim() !== "")

View file

@ -64,11 +64,15 @@ const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
return groupByMultipleKeys(logs, [groupBy]).map((item) => { return groupByMultipleKeys(logs, [groupBy]).map((item) => {
const streamValue = item.values[0]?.[groupBy] || ""; const streamValue = item.values[0]?.[groupBy] || "";
const pairs = getStreamPairs(streamValue); const pairs = getStreamPairs(streamValue);
// values sorting by time
const values = item.values.sort((a,b) => new Date(b._time).getTime() - new Date(a._time).getTime());
return { return {
...item, keys: item.keys,
keysString: item.keys.join(""),
values,
pairs, pairs,
}; };
}); }).sort((a, b) => a.keysString.localeCompare(b.keysString)); // groups sorting
}, [logs, groupBy]); }, [logs, groupBy]);
const handleClickByPair = (value: string) => async (e: MouseEvent<HTMLDivElement>) => { const handleClickByPair = (value: string) => async (e: MouseEvent<HTMLDivElement>) => {
@ -117,7 +121,7 @@ const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
{groupData.map((item, i) => ( {groupData.map((item, i) => (
<div <div
className="vm-group-logs-section" className="vm-group-logs-section"
key={item.keys.join("")} key={item.keysString}
> >
<Accordion <Accordion
key={String(expandGroups[i])} key={String(expandGroups[i])}
@ -129,7 +133,7 @@ const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
{item.pairs.map((pair) => ( {item.pairs.map((pair) => (
<Tooltip <Tooltip
title={copied === pair ? "Copied" : "Copy to clipboard"} title={copied === pair ? "Copied" : "Copy to clipboard"}
key={`${item.keys.join("")}_${pair}`} key={`${item.keysString}_${pair}`}
placement={"top-center"} placement={"top-center"}
> >
<div <div

View file

@ -1,4 +1,5 @@
import router, { routerOptions } from "./index"; import router, { routerOptions } from "./index";
import { getTenantIdFromUrl } from "../utils/tenants";
export enum NavigationItemType { export enum NavigationItemType {
internalLink, internalLink,
@ -24,10 +25,12 @@ interface NavigationConfig {
* Special case for alert link * Special case for alert link
*/ */
const getAlertLink = (url: string, showAlertLink: boolean) => { const getAlertLink = (url: string, showAlertLink: boolean) => {
// see more https://docs.victoriametrics.com/cluster-victoriametrics/?highlight=vmalertproxyurl#vmalert // see more https://docs.victoriametrics.com/cluster-victoriametrics/#vmalert
const isCluster = !!getTenantIdFromUrl(url);
const value = isCluster ? `${url}/vmalert` : url.replace(/\/prometheus$/, "/vmalert");
return { return {
label: "Alerts", label: "Alerts",
value: `${url}/vmalert`, value,
type: NavigationItemType.externalLink, type: NavigationItemType.externalLink,
hide: !showAlertLink, hide: !showAlertLink,
}; };

View file

@ -26,7 +26,7 @@ queries to them:
- `client.go` - provides helper functions for sending HTTP requests to - `client.go` - provides helper functions for sending HTTP requests to
applications. applications.
The integration tests themselves reside in `*_test.go` files. Apart from having The integration tests themselves reside in `tests/*_test.go` files. Apart from having
the `_test` suffix, there are no strict rules of how to name a file, but the the `_test` suffix, there are no strict rules of how to name a file, but the
name should reflect the prevailing purpose of the tests located in that file. name should reflect the prevailing purpose of the tests located in that file.
For example, `sharding_test.go` aims at testing data sharding. For example, `sharding_test.go` aims at testing data sharding.
@ -38,3 +38,10 @@ accounts for that, it builds all application binaries before running the tests.
But if you want to run the tests without `make`, i.e. by executing But if you want to run the tests without `make`, i.e. by executing
`go test ./app/apptest`, you will need to build the binaries first (for example, `go test ./app/apptest`, you will need to build the binaries first (for example,
by executing `make all`). by executing `make all`).
Not all binaries can be built from `master` branch, cluster binaries can be built
only from `cluster` branch. Hence, not all test cases suitable to run in both branches:
- If test is using binaries from `cluster` branch, then test name should be prefixed
with `TestCluster` word
- If test is using binaries from `master` branch, then test name should be prefixed
with `TestVmsingle` word.

121
apptest/model.go Normal file
View file

@ -0,0 +1,121 @@
package apptest
import (
"encoding/json"
"fmt"
"strconv"
"testing"
"time"
)
// PrometheusQuerier contains methods available to Prometheus-like HTTP API for Querying
type PrometheusQuerier interface {
PrometheusAPIV1Query(t *testing.T, query, time, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse
PrometheusAPIV1QueryRange(t *testing.T, query, start, end, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse
PrometheusAPIV1Series(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1SeriesResponse
}
// PrometheusWriter contains methods available to Prometheus-like HTTP API for Writing new data
type PrometheusWriter interface {
PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, opts QueryOpts)
}
// QueryOpts contains various params used for querying or ingesting data
type QueryOpts struct {
Tenant string
Timeout string
}
// PrometheusAPIV1QueryResponse is an inmemory representation of the
// /prometheus/api/v1/query or /prometheus/api/v1/query_range response.
type PrometheusAPIV1QueryResponse struct {
Status string
Data *QueryData
}
// NewPrometheusAPIV1QueryResponse is a test helper function that creates a new
// instance of PrometheusAPIV1QueryResponse by unmarshalling a json string.
func NewPrometheusAPIV1QueryResponse(t *testing.T, s string) *PrometheusAPIV1QueryResponse {
t.Helper()
res := &PrometheusAPIV1QueryResponse{}
if err := json.Unmarshal([]byte(s), res); err != nil {
t.Fatalf("could not unmarshal query response: %v", err)
}
return res
}
// QueryData holds the query result along with its type.
type QueryData struct {
ResultType string
Result []*QueryResult
}
// QueryResult holds the metric name (in the form of label name-value
// collection) and its samples.
//
// Sample or Samples field is set for /prometheus/api/v1/query or
// /prometheus/api/v1/query_range response respectively.
type QueryResult struct {
Metric map[string]string
Sample *Sample `json:"value"`
Samples []*Sample `json:"values"`
}
// Sample is a timeseries value at a given timestamp.
type Sample struct {
Timestamp int64
Value float64
}
// NewSample is a test helper function that creates a new sample out of time in
// RFC3339 format and a value.
func NewSample(t *testing.T, timeStr string, value float64) *Sample {
parsedTime, err := time.Parse(time.RFC3339, timeStr)
if err != nil {
t.Fatalf("could not parse RFC3339 time %q: %v", timeStr, err)
}
return &Sample{parsedTime.Unix(), value}
}
// UnmarshalJSON populates the sample fields from a JSON string.
func (s *Sample) UnmarshalJSON(b []byte) error {
var (
ts int64
v string
)
raw := []any{&ts, &v}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if got, want := len(raw), 2; got != want {
return fmt.Errorf("unexpected number of fields: got %d, want %d (raw sample: %s)", got, want, string(b))
}
s.Timestamp = ts
var err error
s.Value, err = strconv.ParseFloat(v, 64)
if err != nil {
return fmt.Errorf("could not parse sample value %q: %w", v, err)
}
return nil
}
// PrometheusAPIV1SeriesResponse is an inmemory representation of the
// /prometheus/api/v1/series response.
type PrometheusAPIV1SeriesResponse struct {
Status string
IsPartial bool
Data []map[string]string
}
// NewPrometheusAPIV1SeriesResponse is a test helper function that creates a new
// instance of PrometheusAPIV1SeriesResponse by unmarshalling a json string.
func NewPrometheusAPIV1SeriesResponse(t *testing.T, s string) *PrometheusAPIV1SeriesResponse {
t.Helper()
res := &PrometheusAPIV1SeriesResponse{}
if err := json.Unmarshal([]byte(s), res); err != nil {
t.Fatalf("could not unmarshal series response: %v", err)
}
return res
}

View file

@ -11,11 +11,18 @@ import (
type TestCase struct { type TestCase struct {
t *testing.T t *testing.T
cli *Client cli *Client
startedApps []Stopper
}
// Stopper is an interface of objects that needs to be stopped via Stop() call
type Stopper interface {
Stop()
} }
// NewTestCase creates a new test case. // NewTestCase creates a new test case.
func NewTestCase(t *testing.T) *TestCase { func NewTestCase(t *testing.T) *TestCase {
return &TestCase{t, NewClient()} return &TestCase{t, NewClient(), nil}
} }
// Dir returns the directory name that should be used by as the -storageDataDir. // Dir returns the directory name that should be used by as the -storageDataDir.
@ -29,14 +36,73 @@ func (tc *TestCase) Client() *Client {
return tc.cli return tc.cli
} }
// Close performs the test case clean up, such as closing all client connections // Stop performs the test case clean up, such as closing all client connections
// and removing the -storageDataDir directory. // and removing the -storageDataDir directory.
// //
// Note that the -storageDataDir is not removed in case of test case failure to // Note that the -storageDataDir is not removed in case of test case failure to
// allow for furher manual debugging. // allow for further manual debugging.
func (tc *TestCase) Close() { func (tc *TestCase) Stop() {
tc.cli.CloseConnections() tc.cli.CloseConnections()
for _, app := range tc.startedApps {
app.Stop()
}
if !tc.t.Failed() { if !tc.t.Failed() {
fs.MustRemoveAll(tc.Dir()) fs.MustRemoveAll(tc.Dir())
} }
} }
// MustStartVmsingle is a test helper function that starts an instance of
// vmsingle and fails the test if the app fails to start.
func (tc *TestCase) MustStartVmsingle(instance string, flags []string) *Vmsingle {
tc.t.Helper()
app, err := StartVmsingle(instance, flags, tc.cli)
if err != nil {
tc.t.Fatalf("Could not start %s: %v", instance, err)
}
tc.addApp(app)
return app
}
// MustStartVmstorage is a test helper function that starts an instance of
// vmstorage and fails the test if the app fails to start.
func (tc *TestCase) MustStartVmstorage(instance string, flags []string) *Vmstorage {
tc.t.Helper()
app, err := StartVmstorage(instance, flags, tc.cli)
if err != nil {
tc.t.Fatalf("Could not start %s: %v", instance, err)
}
tc.addApp(app)
return app
}
// MustStartVmselect is a test helper function that starts an instance of
// vmselect and fails the test if the app fails to start.
func (tc *TestCase) MustStartVmselect(instance string, flags []string) *Vmselect {
tc.t.Helper()
app, err := StartVmselect(instance, flags, tc.cli)
if err != nil {
tc.t.Fatalf("Could not start %s: %v", instance, err)
}
tc.addApp(app)
return app
}
// MustStartVminsert is a test helper function that starts an instance of
// vminsert and fails the test if the app fails to start.
func (tc *TestCase) MustStartVminsert(instance string, flags []string) *Vminsert {
tc.t.Helper()
app, err := StartVminsert(instance, flags, tc.cli)
if err != nil {
tc.t.Fatalf("Could not start %s: %v", instance, err)
}
tc.addApp(app)
return app
}
func (tc *TestCase) addApp(app Stopper) {
tc.startedApps = append(tc.startedApps, app)
}

View file

@ -0,0 +1,154 @@
package tests
import (
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"testing"
"time"
)
// Data used in examples in
// https://docs.victoriametrics.com/keyconcepts/#instant-query and
// https://docs.victoriametrics.com/keyconcepts/#range-query
var docData = []string{
"foo_bar 1.00 1652169600000", // 2022-05-10T08:00:00Z
"foo_bar 2.00 1652169660000", // 2022-05-10T08:01:00Z
"foo_bar 3.00 1652169720000", // 2022-05-10T08:02:00Z
"foo_bar 5.00 1652169840000", // 2022-05-10T08:04:00Z, one point missed
"foo_bar 5.50 1652169960000", // 2022-05-10T08:06:00Z, one point missed
"foo_bar 5.50 1652170020000", // 2022-05-10T08:07:00Z
"foo_bar 4.00 1652170080000", // 2022-05-10T08:08:00Z
"foo_bar 3.50 1652170260000", // 2022-05-10T08:11:00Z, two points missed
"foo_bar 3.25 1652170320000", // 2022-05-10T08:12:00Z
"foo_bar 3.00 1652170380000", // 2022-05-10T08:13:00Z
"foo_bar 2.00 1652170440000", // 2022-05-10T08:14:00Z
"foo_bar 1.00 1652170500000", // 2022-05-10T08:15:00Z
"foo_bar 4.00 1652170560000", // 2022-05-10T08:16:00Z
}
// TestSingleKeyConceptsQuery verifies cases from https://docs.victoriametrics.com/keyconcepts/#query-data
func TestSingleKeyConceptsQuery(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
vmsingle := tc.MustStartVmsingle("vmsingle", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage",
"-retentionPeriod=100y",
})
opts := apptest.QueryOpts{Timeout: "5s"}
// Insert example data from documentation.
vmsingle.PrometheusAPIV1ImportPrometheus(t, docData, opts)
vmsingle.ForceFlush(t)
testInstantQuery(t, vmsingle, opts)
testRangeQuery(t, vmsingle, opts)
}
// TestClusterKeyConceptsQuery verifies cases from https://docs.victoriametrics.com/keyconcepts/#query-data
func TestClusterKeyConceptsQuery(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
// Set up the following cluster configuration:
//
// - two vmstorage instances
// - vminsert points to the two vmstorages, its replication setting
// is off which means it will only shard the incoming data across the two
// vmstorages.
// - vmselect points to the two vmstorages and is expected to query both
// vmstorages and build the full result out of the two partial results.
vmstorage1 := tc.MustStartVmstorage("vmstorage-1", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage-1",
"-retentionPeriod=100y",
})
vmstorage2 := tc.MustStartVmstorage("vmstorage-2", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage-2",
"-retentionPeriod=100y",
})
vminsert := tc.MustStartVminsert("vminsert", []string{
"-storageNode=" + vmstorage1.VminsertAddr() + "," + vmstorage2.VminsertAddr(),
})
vmselect := tc.MustStartVmselect("vmselect", []string{
"-storageNode=" + vmstorage1.VmselectAddr() + "," + vmstorage2.VmselectAddr(),
})
opts := apptest.QueryOpts{Timeout: "5s", Tenant: "0"}
// Insert example data from documentation.
vminsert.PrometheusAPIV1ImportPrometheus(t, docData, opts)
time.Sleep(2 * time.Second)
vmstorage1.ForceFlush(t)
vmstorage2.ForceFlush(t)
testInstantQuery(t, vmselect, opts)
testRangeQuery(t, vmselect, opts)
}
// vmsingleInstantQuery verifies the statements made in the
// `Instant query` section of the VictoriaMetrics documentation. See:
// https://docs.victoriametrics.com/keyconcepts/#instant-query
func testInstantQuery(t *testing.T, q apptest.PrometheusQuerier, opts apptest.QueryOpts) {
// Get the value of the foo_bar time series at 2022-05-10Z08:03:00Z with the
// step of 5m and timeout 5s. There is no sample at exactly this timestamp.
// Therefore, VictoriaMetrics will search for the nearest sample within the
// [time-5m..time] interval.
got := q.PrometheusAPIV1Query(t, "foo_bar", "2022-05-10T08:03:00.000Z", "5m", opts)
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data":{"result":[{"metric":{"__name__":"foo_bar"},"value":[1652169780,"3"]}]}}`)
opt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType")
if diff := cmp.Diff(want, got, opt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// Get the value of the foo_bar time series at 2022-05-10Z08:18:00Z with the
// step of 1m and timeout 5s. There is no sample at this timestamp.
// Therefore, VictoriaMetrics will search for the nearest sample within the
// [time-1m..time] interval. Since the nearest sample is 2m away and the
// step is 1m, then the VictoriaMetrics must return empty response.
got = q.PrometheusAPIV1Query(t, "foo_bar", "2022-05-10T08:18:00.000Z", "1m", opts)
if len(got.Data.Result) > 0 {
t.Errorf("unexpected response: got non-empty result, want empty result:\n%v", got)
}
}
// vmsingleRangeQuery verifies the statements made in the
// `Range query` section of the VictoriaMetrics documentation. See:
// https://docs.victoriametrics.com/keyconcepts/#range-query
func testRangeQuery(t *testing.T, q apptest.PrometheusQuerier, opts apptest.QueryOpts) {
// Get the values of the foo_bar time series for
// [2022-05-10Z07:59:00Z..2022-05-10Z08:17:00Z] time interval with the step
// of 1m and timeout 5s.
got := q.PrometheusAPIV1QueryRange(t, "foo_bar", "2022-05-10T07:59:00.000Z", "2022-05-10T08:17:00.000Z", "1m", opts)
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data": {"result": [{"metric": {"__name__": "foo_bar"}, "values": []}]}}`)
s := make([]*apptest.Sample, 17)
// Sample for 2022-05-10T07:59:00Z is missing because the time series has
// samples only starting from 8:00.
s[0] = apptest.NewSample(t, "2022-05-10T08:00:00Z", 1)
s[1] = apptest.NewSample(t, "2022-05-10T08:01:00Z", 2)
s[2] = apptest.NewSample(t, "2022-05-10T08:02:00Z", 3)
s[3] = apptest.NewSample(t, "2022-05-10T08:03:00Z", 3)
s[4] = apptest.NewSample(t, "2022-05-10T08:04:00Z", 5)
s[5] = apptest.NewSample(t, "2022-05-10T08:05:00Z", 5)
s[6] = apptest.NewSample(t, "2022-05-10T08:06:00Z", 5.5)
s[7] = apptest.NewSample(t, "2022-05-10T08:07:00Z", 5.5)
s[8] = apptest.NewSample(t, "2022-05-10T08:08:00Z", 4)
s[9] = apptest.NewSample(t, "2022-05-10T08:09:00Z", 4)
// Sample for 2022-05-10T08:10:00Z is missing because there is no sample
// within the [8:10 - 1m .. 8:10] interval.
s[10] = apptest.NewSample(t, "2022-05-10T08:11:00Z", 3.5)
s[11] = apptest.NewSample(t, "2022-05-10T08:12:00Z", 3.25)
s[12] = apptest.NewSample(t, "2022-05-10T08:13:00Z", 3)
s[13] = apptest.NewSample(t, "2022-05-10T08:14:00Z", 2)
s[14] = apptest.NewSample(t, "2022-05-10T08:15:00Z", 1)
s[15] = apptest.NewSample(t, "2022-05-10T08:16:00Z", 4)
s[16] = apptest.NewSample(t, "2022-05-10T08:17:00Z", 4)
want.Data.Result[0].Samples = s
opt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType")
if diff := cmp.Diff(want, got, opt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
}

View file

@ -9,9 +9,9 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/apptest" "github.com/VictoriaMetrics/VictoriaMetrics/apptest"
) )
func TestMultilevelSelect(t *testing.T) { func TestClusterMultilevelSelect(t *testing.T) {
tc := apptest.NewTestCase(t) tc := apptest.NewTestCase(t)
defer tc.Close() defer tc.Stop()
// Set up the following multi-level cluster configuration: // Set up the following multi-level cluster configuration:
// //
@ -20,24 +20,18 @@ func TestMultilevelSelect(t *testing.T) {
// vmisert writes data into vmstorage. // vmisert writes data into vmstorage.
// vmselect (L2) reads that data via vmselect (L1). // vmselect (L2) reads that data via vmselect (L1).
cli := tc.Client() vmstorage := tc.MustStartVmstorage("vmstorage", []string{
vmstorage := apptest.MustStartVmstorage(t, "vmstorage", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage", "-storageDataPath=" + tc.Dir() + "/vmstorage",
}, cli) })
defer vmstorage.Stop() vminsert := tc.MustStartVminsert("vminsert", []string{
vminsert := apptest.MustStartVminsert(t, "vminsert", []string{
"-storageNode=" + vmstorage.VminsertAddr(), "-storageNode=" + vmstorage.VminsertAddr(),
}, cli) })
defer vminsert.Stop() vmselectL1 := tc.MustStartVmselect("vmselect-level1", []string{
vmselectL1 := apptest.MustStartVmselect(t, "vmselect-level1", []string{
"-storageNode=" + vmstorage.VmselectAddr(), "-storageNode=" + vmstorage.VmselectAddr(),
}, cli) })
defer vmselectL1.Stop() vmselectL2 := tc.MustStartVmselect("vmselect-level2", []string{
vmselectL2 := apptest.MustStartVmselect(t, "vmselect-level2", []string{
"-storageNode=" + vmselectL1.ClusternativeListenAddr(), "-storageNode=" + vmselectL1.ClusternativeListenAddr(),
}, cli) })
defer vmselectL2.Stop()
// Insert 1000 unique time series.Wait for 2 seconds to let vmstorage // Insert 1000 unique time series.Wait for 2 seconds to let vmstorage
// flush pending items so they become searchable. // flush pending items so they become searchable.
@ -47,13 +41,13 @@ func TestMultilevelSelect(t *testing.T) {
for i := range numMetrics { for i := range numMetrics {
records[i] = fmt.Sprintf("metric_%d %d", i, rand.IntN(1000)) records[i] = fmt.Sprintf("metric_%d %d", i, rand.IntN(1000))
} }
vminsert.PrometheusAPIV1ImportPrometheus(t, "0", records) vminsert.PrometheusAPIV1ImportPrometheus(t, records, apptest.QueryOpts{Tenant: "0"})
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
// Retrieve all time series and verify that vmselect (L1) serves the complete // Retrieve all time series and verify that vmselect (L1) serves the complete
// set of time series. // set of time series.
seriesL1 := vmselectL1.PrometheusAPIV1Series(t, "0", `{__name__=~".*"}`) seriesL1 := vmselectL1.PrometheusAPIV1Series(t, `{__name__=~".*"}`, apptest.QueryOpts{Tenant: "0"})
if got, want := len(seriesL1.Data), numMetrics; got != want { if got, want := len(seriesL1.Data), numMetrics; got != want {
t.Fatalf("unexpected level-1 series count: got %d, want %d", got, want) t.Fatalf("unexpected level-1 series count: got %d, want %d", got, want)
} }
@ -61,7 +55,7 @@ func TestMultilevelSelect(t *testing.T) {
// Retrieve all time series and verify that vmselect (L2) serves the complete // Retrieve all time series and verify that vmselect (L2) serves the complete
// set of time series. // set of time series.
seriesL2 := vmselectL2.PrometheusAPIV1Series(t, "0", `{__name__=~".*"}`) seriesL2 := vmselectL2.PrometheusAPIV1Series(t, `{__name__=~".*"}`, apptest.QueryOpts{Tenant: "0"})
if got, want := len(seriesL2.Data), numMetrics; got != want { if got, want := len(seriesL2.Data), numMetrics; got != want {
t.Fatalf("unexpected level-2 series count: got %d, want %d", got, want) t.Fatalf("unexpected level-2 series count: got %d, want %d", got, want)
} }

View file

@ -9,9 +9,9 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/apptest" "github.com/VictoriaMetrics/VictoriaMetrics/apptest"
) )
func TestVminsertShardsDataVmselectBuildsFullResultFromShards(t *testing.T) { func TestClusterVminsertShardsDataVmselectBuildsFullResultFromShards(t *testing.T) {
tc := apptest.NewTestCase(t) tc := apptest.NewTestCase(t)
defer tc.Close() defer tc.Stop()
// Set up the following cluster configuration: // Set up the following cluster configuration:
// //
@ -22,24 +22,18 @@ func TestVminsertShardsDataVmselectBuildsFullResultFromShards(t *testing.T) {
// - vmselect points to the two vmstorages and is expected to query both // - vmselect points to the two vmstorages and is expected to query both
// vmstorages and build the full result out of the two partial results. // vmstorages and build the full result out of the two partial results.
cli := tc.Client() vmstorage1 := tc.MustStartVmstorage("vmstorage-1", []string{
vmstorage1 := apptest.MustStartVmstorage(t, "vmstorage-1", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage-1", "-storageDataPath=" + tc.Dir() + "/vmstorage-1",
}, cli) })
defer vmstorage1.Stop() vmstorage2 := tc.MustStartVmstorage("vmstorage-2", []string{
vmstorage2 := apptest.MustStartVmstorage(t, "vmstorage-2", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage-2", "-storageDataPath=" + tc.Dir() + "/vmstorage-2",
}, cli) })
defer vmstorage2.Stop() vminsert := tc.MustStartVminsert("vminsert", []string{
vminsert := apptest.MustStartVminsert(t, "vminsert", []string{
"-storageNode=" + vmstorage1.VminsertAddr() + "," + vmstorage2.VminsertAddr(), "-storageNode=" + vmstorage1.VminsertAddr() + "," + vmstorage2.VminsertAddr(),
}, cli) })
defer vminsert.Stop() vmselect := tc.MustStartVmselect("vmselect", []string{
vmselect := apptest.MustStartVmselect(t, "vmselect", []string{
"-storageNode=" + vmstorage1.VmselectAddr() + "," + vmstorage2.VmselectAddr(), "-storageNode=" + vmstorage1.VmselectAddr() + "," + vmstorage2.VmselectAddr(),
}, cli) })
defer vmselect.Stop()
// Insert 1000 unique time series and verify the that inserted data has been // Insert 1000 unique time series and verify the that inserted data has been
// indeed sharded by checking various metrics exposed by vminsert and // indeed sharded by checking various metrics exposed by vminsert and
@ -53,7 +47,7 @@ func TestVminsertShardsDataVmselectBuildsFullResultFromShards(t *testing.T) {
for i := range numMetrics { for i := range numMetrics {
records[i] = fmt.Sprintf("metric_%d %d", i, rand.IntN(1000)) records[i] = fmt.Sprintf("metric_%d %d", i, rand.IntN(1000))
} }
vminsert.PrometheusAPIV1ImportPrometheus(t, "0", records) vminsert.PrometheusAPIV1ImportPrometheus(t, records, apptest.QueryOpts{Tenant: "0"})
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
numMetrics1 := vmstorage1.GetIntMetric(t, "vm_vminsert_metrics_read_total") numMetrics1 := vmstorage1.GetIntMetric(t, "vm_vminsert_metrics_read_total")
@ -71,7 +65,7 @@ func TestVminsertShardsDataVmselectBuildsFullResultFromShards(t *testing.T) {
// Retrieve all time series and verify that vmselect serves the complete set // Retrieve all time series and verify that vmselect serves the complete set
//of time series. //of time series.
series := vmselect.PrometheusAPIV1Series(t, "0", `{__name__=~".*"}`) series := vmselect.PrometheusAPIV1Series(t, `{__name__=~".*"}`, apptest.QueryOpts{Tenant: "0"})
if got, want := series.Status, "success"; got != want { if got, want := series.Status, "success"; got != want {
t.Fatalf("unexpected /ap1/v1/series response status: got %s, want %s", got, want) t.Fatalf("unexpected /ap1/v1/series response status: got %s, want %s", got, want)
} }

View file

@ -18,19 +18,6 @@ type Vminsert struct {
cli *Client cli *Client
} }
// MustStartVminsert is a test helper function that starts an instance of
// vminsert and fails the test if the app fails to start.
func MustStartVminsert(t *testing.T, instance string, flags []string, cli *Client) *Vminsert {
t.Helper()
app, err := StartVminsert(instance, flags, cli)
if err != nil {
t.Fatalf("Could not start %s: %v", instance, err)
}
return app
}
// StartVminsert starts an instance of vminsert with the given flags. It also // StartVminsert starts an instance of vminsert with the given flags. It also
// sets the default flags and populates the app instance state with runtime // sets the default flags and populates the app instance state with runtime
// values extracted from the application log (such as httpListenAddr) // values extracted from the application log (such as httpListenAddr)
@ -64,10 +51,10 @@ func StartVminsert(instance string, flags []string, cli *Client) (*Vminsert, err
// /prometheus/api/v1/import/prometheus vminsert endpoint. // /prometheus/api/v1/import/prometheus vminsert endpoint.
// //
// See https://docs.victoriametrics.com/url-examples/#apiv1importprometheus // See https://docs.victoriametrics.com/url-examples/#apiv1importprometheus
func (app *Vminsert) PrometheusAPIV1ImportPrometheus(t *testing.T, tenant string, records []string) { func (app *Vminsert) PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, opts QueryOpts) {
t.Helper() t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/import/prometheus", app.httpListenAddr, tenant) url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/import/prometheus", app.httpListenAddr, opts.Tenant)
app.cli.Post(t, url, "text/plain", strings.Join(records, "\n"), http.StatusNoContent) app.cli.Post(t, url, "text/plain", strings.Join(records, "\n"), http.StatusNoContent)
} }

View file

@ -1,7 +1,6 @@
package apptest package apptest
import ( import (
"encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"net/url" "net/url"
@ -20,19 +19,6 @@ type Vmselect struct {
cli *Client cli *Client
} }
// MustStartVmselect is a test helper function that starts an instance of
// vmselect and fails the test if the app fails to start.
func MustStartVmselect(t *testing.T, instance string, flags []string, cli *Client) *Vmselect {
t.Helper()
app, err := StartVmselect(instance, flags, cli)
if err != nil {
t.Fatalf("Could not start %s: %v", instance, err)
}
return app
}
// StartVmselect starts an instance of vmselect with the given flags. It also // StartVmselect starts an instance of vmselect with the given flags. It also
// sets the default flags and populates the app instance state with runtime // sets the default flags and populates the app instance state with runtime
// values extracted from the application log (such as httpListenAddr) // values extracted from the application log (such as httpListenAddr)
@ -69,30 +55,55 @@ func (app *Vmselect) ClusternativeListenAddr() string {
return app.clusternativeListenAddr return app.clusternativeListenAddr
} }
// PrometheusAPIV1SeriesResponse is an inmemory representation of the // PrometheusAPIV1Query is a test helper function that performs PromQL/MetricsQL
// /prometheus/api/v1/series response. // instant query by sending a HTTP POST request to /prometheus/api/v1/query
type PrometheusAPIV1SeriesResponse struct { // vmsingle endpoint.
Status string //
IsPartial bool // See https://docs.victoriametrics.com/url-examples/#apiv1query
Data []map[string]string func (app *Vmselect) PrometheusAPIV1Query(t *testing.T, query, time, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/query", app.httpListenAddr, opts.Tenant)
values := url.Values{}
values.Add("query", query)
values.Add("time", time)
values.Add("step", step)
values.Add("timeout", opts.Timeout)
res := app.cli.PostForm(t, queryURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1QueryRange is a test helper function that performs
// PromQL/MetricsQL range query by sending a HTTP POST request to
// /prometheus/api/v1/query_range vmsingle endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1query_range
func (app *Vmselect) PrometheusAPIV1QueryRange(t *testing.T, query, start, end, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/query_range", app.httpListenAddr, opts.Tenant)
values := url.Values{}
values.Add("query", query)
values.Add("start", start)
values.Add("end", end)
values.Add("step", step)
values.Add("timeout", opts.Timeout)
res := app.cli.PostForm(t, queryURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
} }
// PrometheusAPIV1Series sends a query to a /prometheus/api/v1/series endpoint // PrometheusAPIV1Series sends a query to a /prometheus/api/v1/series endpoint
// and returns the list of time series that match the query. // and returns the list of time series that match the query.
// //
// See https://docs.victoriametrics.com/url-examples/#apiv1series // See https://docs.victoriametrics.com/url-examples/#apiv1series
func (app *Vmselect) PrometheusAPIV1Series(t *testing.T, tenant, matchQuery string) *PrometheusAPIV1SeriesResponse { func (app *Vmselect) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1SeriesResponse {
t.Helper() t.Helper()
seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/series", app.httpListenAddr, tenant) seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/series", app.httpListenAddr, opts.Tenant)
values := url.Values{} values := url.Values{}
values.Add("match[]", matchQuery) values.Add("match[]", matchQuery)
jsonRes := app.cli.PostForm(t, seriesURL, values, http.StatusOK) res := app.cli.PostForm(t, seriesURL, values, http.StatusOK)
var res PrometheusAPIV1SeriesResponse return NewPrometheusAPIV1SeriesResponse(t, res)
if err := json.Unmarshal([]byte(jsonRes), &res); err != nil {
t.Fatalf("could not unmarshal /api/v1/series response: %v", err)
}
return &res
} }
// String returns the string representation of the vmselect app state. // String returns the string representation of the vmselect app state.

136
apptest/vmsingle.go Normal file
View file

@ -0,0 +1,136 @@
package apptest
import (
"fmt"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"testing"
"time"
)
// Vmsingle holds the state of a vmsingle app and provides vmsingle-specific
// functions.
type Vmsingle struct {
*app
*ServesMetrics
storageDataPath string
httpListenAddr string
forceFlushURL string
prometheusAPIV1ImportPrometheusURL string
prometheusAPIV1QueryURL string
prometheusAPIV1QueryRangeURL string
prometheusAPIV1SeriesURL string
}
// StartVmsingle starts an instance of vmsingle with the given flags. It also
// sets the default flags and populates the app instance state with runtime
// values extracted from the application log (such as httpListenAddr).
func StartVmsingle(instance string, flags []string, cli *Client) (*Vmsingle, error) {
app, stderrExtracts, err := startApp(instance, "../../bin/victoria-metrics", flags, &appOptions{
defaultFlags: map[string]string{
"-storageDataPath": fmt.Sprintf("%s/%s-%d", os.TempDir(), instance, time.Now().UnixNano()),
"-httpListenAddr": "127.0.0.1:0",
},
extractREs: []*regexp.Regexp{
storageDataPathRE,
httpListenAddrRE,
},
})
if err != nil {
return nil, err
}
return &Vmsingle{
app: app,
ServesMetrics: &ServesMetrics{
metricsURL: fmt.Sprintf("http://%s/metrics", stderrExtracts[1]),
cli: cli,
},
storageDataPath: stderrExtracts[0],
httpListenAddr: stderrExtracts[1],
forceFlushURL: fmt.Sprintf("http://%s/internal/force_flush", stderrExtracts[1]),
prometheusAPIV1ImportPrometheusURL: fmt.Sprintf("http://%s/prometheus/api/v1/import/prometheus", stderrExtracts[1]),
prometheusAPIV1QueryURL: fmt.Sprintf("http://%s/prometheus/api/v1/query", stderrExtracts[1]),
prometheusAPIV1QueryRangeURL: fmt.Sprintf("http://%s/prometheus/api/v1/query_range", stderrExtracts[1]),
prometheusAPIV1SeriesURL: fmt.Sprintf("http://%s/prometheus/api/v1/series", stderrExtracts[1]),
}, nil
}
// ForceFlush is a test helper function that forces the flushing of inserted
// data, so it becomes available for searching immediately.
func (app *Vmsingle) ForceFlush(t *testing.T) {
t.Helper()
app.cli.Get(t, app.forceFlushURL, http.StatusOK)
}
// PrometheusAPIV1ImportPrometheus is a test helper function that inserts a
// collection of records in Prometheus text exposition format by sending a HTTP
// POST request to /prometheus/api/v1/import/prometheus vmsingle endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1importprometheus
func (app *Vmsingle) PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, _ QueryOpts) {
t.Helper()
app.cli.Post(t, app.prometheusAPIV1ImportPrometheusURL, "text/plain", strings.Join(records, "\n"), http.StatusNoContent)
}
// PrometheusAPIV1Query is a test helper function that performs PromQL/MetricsQL
// instant query by sending a HTTP POST request to /prometheus/api/v1/query
// vmsingle endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1query
func (app *Vmsingle) PrometheusAPIV1Query(t *testing.T, query, time, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
values := url.Values{}
values.Add("query", query)
values.Add("time", time)
values.Add("step", step)
values.Add("timeout", opts.Timeout)
res := app.cli.PostForm(t, app.prometheusAPIV1QueryURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1QueryRange is a test helper function that performs
// PromQL/MetricsQL range query by sending a HTTP POST request to
// /prometheus/api/v1/query_range vmsingle endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1query_range
func (app *Vmsingle) PrometheusAPIV1QueryRange(t *testing.T, query, start, end, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
values := url.Values{}
values.Add("query", query)
values.Add("start", start)
values.Add("end", end)
values.Add("step", step)
values.Add("timeout", opts.Timeout)
res := app.cli.PostForm(t, app.prometheusAPIV1QueryRangeURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1Series sends a query to a /prometheus/api/v1/series endpoint
// and returns the list of time series that match the query.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1series
func (app *Vmsingle) PrometheusAPIV1Series(t *testing.T, matchQuery string, _ QueryOpts) *PrometheusAPIV1SeriesResponse {
t.Helper()
values := url.Values{}
values.Add("match[]", matchQuery)
res := app.cli.PostForm(t, app.prometheusAPIV1SeriesURL, values, http.StatusOK)
return NewPrometheusAPIV1SeriesResponse(t, res)
}
// String returns the string representation of the vmsingle app state.
func (app *Vmsingle) String() string {
return fmt.Sprintf("{app: %s storageDataPath: %q httpListenAddr: %q}", []any{
app.app, app.storageDataPath, app.httpListenAddr}...)
}

View file

@ -2,6 +2,7 @@ package apptest
import ( import (
"fmt" "fmt"
"net/http"
"os" "os"
"regexp" "regexp"
"testing" "testing"
@ -18,19 +19,8 @@ type Vmstorage struct {
httpListenAddr string httpListenAddr string
vminsertAddr string vminsertAddr string
vmselectAddr string vmselectAddr string
}
// MustStartVmstorage is a test helper function that starts an instance of forceFlushURL string
// vmstorage and fails the test if the app fails to start.
func MustStartVmstorage(t *testing.T, instance string, flags []string, cli *Client) *Vmstorage {
t.Helper()
app, err := StartVmstorage(instance, flags, cli)
if err != nil {
t.Fatalf("Could not start %s: %v", instance, err)
}
return app
} }
// StartVmstorage starts an instance of vmstorage with the given flags. It also // StartVmstorage starts an instance of vmstorage with the given flags. It also
@ -65,6 +55,8 @@ func StartVmstorage(instance string, flags []string, cli *Client) (*Vmstorage, e
httpListenAddr: stderrExtracts[1], httpListenAddr: stderrExtracts[1],
vminsertAddr: stderrExtracts[2], vminsertAddr: stderrExtracts[2],
vmselectAddr: stderrExtracts[3], vmselectAddr: stderrExtracts[3],
forceFlushURL: fmt.Sprintf("http://%s/internal/force_flush", stderrExtracts[1]),
}, nil }, nil
} }
@ -80,6 +72,14 @@ func (app *Vmstorage) VmselectAddr() string {
return app.vmselectAddr return app.vmselectAddr
} }
// ForceFlush is a test helper function that forces the flushing of inserted
// data, so it becomes available for searching immediately.
func (app *Vmstorage) ForceFlush(t *testing.T) {
t.Helper()
app.cli.Get(t, app.forceFlushURL, http.StatusOK)
}
// String returns the string representation of the vmstorage app state. // String returns the string representation of the vmstorage app state.
func (app *Vmstorage) String() string { func (app *Vmstorage) String() string {
return fmt.Sprintf("{app: %s storageDataPath: %q httpListenAddr: %q vminsertAddr: %q vmselectAddr: %q}", []any{ return fmt.Sprintf("{app: %s storageDataPath: %q httpListenAddr: %q vminsertAddr: %q vmselectAddr: %q}", []any{

View file

@ -16,3 +16,6 @@ dashboards-sync:
SRC=vmalert.json D_UID=LzldHAVnz TITLE="VictoriaMetrics - vmalert" $(MAKE) dashboard-copy SRC=vmalert.json D_UID=LzldHAVnz TITLE="VictoriaMetrics - vmalert" $(MAKE) dashboard-copy
SRC=vmauth.json D_UID=nbuo5Mr4k TITLE="VictoriaMetrics - vmauth" $(MAKE) dashboard-copy SRC=vmauth.json D_UID=nbuo5Mr4k TITLE="VictoriaMetrics - vmauth" $(MAKE) dashboard-copy
SRC=operator.json D_UID=1H179hunk TITLE="VictoriaMetrics - operator" $(MAKE) dashboard-copy SRC=operator.json D_UID=1H179hunk TITLE="VictoriaMetrics - operator" $(MAKE) dashboard-copy
SRC=backupmanager.json D_UID=gF-lxRdVz TITLE="VictoriaMetrics - backupmanager" $(MAKE) dashboard-copy
SRC=clusterbytenant.json D_UID=IZFqd3lMz TITLE="VictoriaMetrics Cluster Per Tenant Statistic" $(MAKE) dashboard-copy
SRC=victorialogs.json D_UID=OqPIZTX4z TITLE="VictoriaLogs" $(MAKE) dashboard-copy

View file

@ -2816,8 +2816,8 @@
}, },
"definition": "label_values(vm_app_version{job=~\"$job\"}, instance)", "definition": "label_values(vm_app_version{job=~\"$job\"}, instance)",
"hide": 0, "hide": 0,
"includeAll": false, "includeAll": true,
"multi": false, "multi": true,
"name": "instance", "name": "instance",
"options": [], "options": [],
"query": { "query": {

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -6,7 +6,7 @@ ROOT_IMAGE ?= alpine:3.20.3
ROOT_IMAGE_SCRATCH ?= scratch ROOT_IMAGE_SCRATCH ?= scratch
CERTS_IMAGE := alpine:3.20.3 CERTS_IMAGE := alpine:3.20.3
GO_BUILDER_IMAGE := golang:1.23.1-alpine GO_BUILDER_IMAGE := golang:1.23.3-alpine
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1 BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __) BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
DOCKER ?= docker DOCKER ?= docker

View file

@ -1,6 +1,6 @@
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b # See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
ARG certs_image=non-existing ARG certs_image=non-existing
ARG root_image==non-existing ARG root_image=non-existing
FROM $certs_image AS certs FROM $certs_image AS certs
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates

View file

@ -4,7 +4,7 @@ services:
# And forward them to --remoteWrite.url # And forward them to --remoteWrite.url
vmagent: vmagent:
container_name: vmagent container_name: vmagent
image: victoriametrics/vmagent:v1.105.0 image: victoriametrics/vmagent:v1.106.0
depends_on: depends_on:
- "vminsert" - "vminsert"
ports: ports:
@ -39,7 +39,7 @@ services:
# where N is number of vmstorages (2 in this case). # where N is number of vmstorages (2 in this case).
vmstorage-1: vmstorage-1:
container_name: vmstorage-1 container_name: vmstorage-1
image: victoriametrics/vmstorage:v1.105.0-cluster image: victoriametrics/vmstorage:v1.106.0-cluster
ports: ports:
- 8482 - 8482
- 8400 - 8400
@ -51,7 +51,7 @@ services:
restart: always restart: always
vmstorage-2: vmstorage-2:
container_name: vmstorage-2 container_name: vmstorage-2
image: victoriametrics/vmstorage:v1.105.0-cluster image: victoriametrics/vmstorage:v1.106.0-cluster
ports: ports:
- 8482 - 8482
- 8400 - 8400
@ -66,7 +66,7 @@ services:
# pre-process them and distributes across configured vmstorage shards. # pre-process them and distributes across configured vmstorage shards.
vminsert: vminsert:
container_name: vminsert container_name: vminsert
image: victoriametrics/vminsert:v1.105.0-cluster image: victoriametrics/vminsert:v1.106.0-cluster
depends_on: depends_on:
- "vmstorage-1" - "vmstorage-1"
- "vmstorage-2" - "vmstorage-2"
@ -81,7 +81,7 @@ services:
# vmselect collects results from configured `--storageNode` shards. # vmselect collects results from configured `--storageNode` shards.
vmselect-1: vmselect-1:
container_name: vmselect-1 container_name: vmselect-1
image: victoriametrics/vmselect:v1.105.0-cluster image: victoriametrics/vmselect:v1.106.0-cluster
depends_on: depends_on:
- "vmstorage-1" - "vmstorage-1"
- "vmstorage-2" - "vmstorage-2"
@ -94,7 +94,7 @@ services:
restart: always restart: always
vmselect-2: vmselect-2:
container_name: vmselect-2 container_name: vmselect-2
image: victoriametrics/vmselect:v1.105.0-cluster image: victoriametrics/vmselect:v1.106.0-cluster
depends_on: depends_on:
- "vmstorage-1" - "vmstorage-1"
- "vmstorage-2" - "vmstorage-2"
@ -112,7 +112,7 @@ services:
# It can be used as an authentication proxy. # It can be used as an authentication proxy.
vmauth: vmauth:
container_name: vmauth container_name: vmauth
image: victoriametrics/vmauth:v1.105.0 image: victoriametrics/vmauth:v1.106.0
depends_on: depends_on:
- "vmselect-1" - "vmselect-1"
- "vmselect-2" - "vmselect-2"
@ -127,7 +127,7 @@ services:
# vmalert executes alerting and recording rules # vmalert executes alerting and recording rules
vmalert: vmalert:
container_name: vmalert container_name: vmalert
image: victoriametrics/vmalert:v1.105.0 image: victoriametrics/vmalert:v1.106.0
depends_on: depends_on:
- "vmauth" - "vmauth"
ports: ports:

View file

@ -16,7 +16,7 @@ services:
- ./../../dashboards/victoriametrics.json:/var/lib/grafana/dashboards/vm.json - ./../../dashboards/victoriametrics.json:/var/lib/grafana/dashboards/vm.json
- ./../../dashboards/victorialogs.json:/var/lib/grafana/dashboards/vl.json - ./../../dashboards/victorialogs.json:/var/lib/grafana/dashboards/vl.json
environment: environment:
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.5.0/victorialogs-datasource-v0.5.0.zip;victorialogs-datasource" - "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.6.2/victorialogs-datasource-v0.6.2.zip;victorialogs-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victorialogs-datasource" - "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victorialogs-datasource"
networks: networks:
- vm_net - vm_net
@ -40,7 +40,7 @@ services:
# storing logs and serving read queries. # storing logs and serving read queries.
victorialogs: victorialogs:
container_name: victorialogs container_name: victorialogs
image: victoriametrics/victoria-logs:v0.39.0-victorialogs image: victoriametrics/victoria-logs:v0.42.0-victorialogs
command: command:
- "--storageDataPath=/vlogs" - "--storageDataPath=/vlogs"
- "--httpListenAddr=:9428" - "--httpListenAddr=:9428"
@ -55,7 +55,7 @@ services:
# scraping, storing metrics and serve read requests. # scraping, storing metrics and serve read requests.
victoriametrics: victoriametrics:
container_name: victoriametrics container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.105.0 image: victoriametrics/victoria-metrics:v1.106.0
ports: ports:
- 8428:8428 - 8428:8428
volumes: volumes:
@ -74,7 +74,7 @@ services:
# depending on the requested path. # depending on the requested path.
vmauth: vmauth:
container_name: vmauth container_name: vmauth
image: victoriametrics/vmauth:v1.105.0 image: victoriametrics/vmauth:v1.106.0
depends_on: depends_on:
- "victoriametrics" - "victoriametrics"
- "victorialogs" - "victorialogs"
@ -91,7 +91,7 @@ services:
# vmalert executes alerting and recording rules according to given rule type. # vmalert executes alerting and recording rules according to given rule type.
vmalert: vmalert:
container_name: vmalert container_name: vmalert
image: victoriametrics/vmalert:v1.105.0 image: victoriametrics/vmalert:v1.106.0
depends_on: depends_on:
- "vmauth" - "vmauth"
- "alertmanager" - "alertmanager"

View file

@ -4,7 +4,7 @@ services:
# And forward them to --remoteWrite.url # And forward them to --remoteWrite.url
vmagent: vmagent:
container_name: vmagent container_name: vmagent
image: victoriametrics/vmagent:v1.105.0 image: victoriametrics/vmagent:v1.106.0
depends_on: depends_on:
- "victoriametrics" - "victoriametrics"
ports: ports:
@ -22,7 +22,7 @@ services:
# storing metrics and serve read requests. # storing metrics and serve read requests.
victoriametrics: victoriametrics:
container_name: victoriametrics container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.105.0 image: victoriametrics/victoria-metrics:v1.106.0
ports: ports:
- 8428:8428 - 8428:8428
- 8089:8089 - 8089:8089
@ -65,7 +65,7 @@ services:
# vmalert executes alerting and recording rules # vmalert executes alerting and recording rules
vmalert: vmalert:
container_name: vmalert container_name: vmalert
image: victoriametrics/vmalert:v1.105.0 image: victoriametrics/vmalert:v1.106.0
depends_on: depends_on:
- "victoriametrics" - "victoriametrics"
- "alertmanager" - "alertmanager"

View file

@ -74,6 +74,17 @@ groups:
description: "vmalert instance {{ $labels.instance }} is failing to push metrics generated via alerting description: "vmalert instance {{ $labels.instance }} is failing to push metrics generated via alerting
or recording rules to the configured remote write URL. Check vmalert's logs for detailed error message." or recording rules to the configured remote write URL. Check vmalert's logs for detailed error message."
- alert: RemoteWriteDroppingData
expr: increase(vmalert_remotewrite_dropped_rows_total[5m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "vmalert instance {{ $labels.instance }} is dropping data sent to remote write URL"
description: "vmalert instance {{ $labels.instance }} is failing to send results of alerting or recording rules
to the configured remote write URL. This may result into gaps in recording rules or alerts state.
Check vmalert's logs for detailed error message."
- alert: AlertmanagerErrors - alert: AlertmanagerErrors
expr: increase(vmalert_alerts_send_errors_total[5m]) > 0 expr: increase(vmalert_alerts_send_errors_total[5m]) > 0
for: 15m for: 15m

View file

@ -1,7 +1,7 @@
services: services:
# meta service will be ignored by compose # meta service will be ignored by compose
.victorialogs: .victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.39.0-victorialogs image: docker.io/victoriametrics/victoria-logs:v0.42.0-victorialogs
command: command:
- -storageDataPath=/vlogs - -storageDataPath=/vlogs
- -loggerFormat=json - -loggerFormat=json
@ -17,6 +17,13 @@ services:
timeout: 1s timeout: 1s
retries: 10 retries: 10
dd-logs:
image: docker.io/victoriametrics/vmauth:v1.106.0
restart: on-failure
volumes:
- ./:/etc/vmauth
command: -auth.config=/etc/vmauth/vmauth.yaml
victorialogs: victorialogs:
extends: .victorialogs extends: .victorialogs
ports: ports:

View file

@ -0,0 +1 @@
**/logs

View file

@ -0,0 +1,29 @@
# Docker compose DataDog Agent integration with VictoriaLogs
The folder contains examples of [DataDog agent](https://docs.datadoghq.com/agent) integration with VictoriaLogs using protocols:
* [datadog](./datadog)
To spin-up environment `cd` to any of listed above directories run the following command:
```
docker compose up -d
```
To shut down the docker-compose environment run the following command:
```
docker compose down
docker compose rm -f
```
The docker compose file contains the following components:
* datadog - Datadog logs collection agent, which is configured to collect and write data to `victorialogs`
* victorialogs - VictoriaLogs log database, which accepts the data from `datadog`
* victoriametrics - VictoriaMetrics metrics database, which collects metrics from `victorialogs` and `datadog`
Querying the data
* [vmui](https://docs.victoriametrics.com/victorialogs/querying/#vmui) - a web UI is accessible by `http://localhost:9428/select/vmui`
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance.

View file

@ -0,0 +1,26 @@
include:
- ../compose-base.yml
services:
agent:
image: docker.io/datadog/agent:7.57.2
restart: on-failure
volumes:
- /var/lib/docker/containers:/var/lib/docker/containers
- /var/run/docker.sock:/var/run/docker.sock:ro
- /proc/:/host/proc/:ro
- /sys/fs/cgroup/:/host/sys/fs/cgroup:ro
environment:
DD_API_KEY: test
DD_URL: http://victoriametrics:8428/datadog
DD_LOGS_CONFIG_LOGS_DD_URL: http://dd-logs:8427
DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL: true
DD_LOGS_ENABLED: true
DD_LOGS_CONFIG_USE_HTTP: true
DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED: false
DD_PROCESS_CONFIG_CONTAINER_COLLECTION_ENABLED: false
DD_PROCESS_CONFIG_PROCESS_DISCOVERY_ENABLED: false
depends_on:
victorialogs:
condition: service_healthy
victoriametrics:
condition: service_healthy

View file

@ -0,0 +1,3 @@
include:
- ../compose-base.yml
name: agent-datadog

View file

@ -2,6 +2,7 @@
The folder contains examples of [FluentBit](https://docs.fluentbit.io/manual) integration with VictoriaLogs using protocols: The folder contains examples of [FluentBit](https://docs.fluentbit.io/manual) integration with VictoriaLogs using protocols:
* [datadog](./datadog)
* [loki](./loki) * [loki](./loki)
* [jsonline single node](./jsonline) * [jsonline single node](./jsonline)
* [jsonline HA setup](./jsonline-ha) * [jsonline HA setup](./jsonline-ha)
@ -30,6 +31,7 @@ Querying the data
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line) * for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
FluentBit configuration example can be found below: FluentBit configuration example can be found below:
* [datadog](./datadog/fluent-bit.conf)
* [loki](./loki/fluent-bit.conf) * [loki](./loki/fluent-bit.conf)
* [jsonline single node](./jsonline/fluent-bit.conf) * [jsonline single node](./jsonline/fluent-bit.conf)
* [jsonline HA setup](./jsonline-ha/fluent-bit.conf) * [jsonline HA setup](./jsonline-ha/fluent-bit.conf)

View file

@ -0,0 +1,3 @@
include:
- ../compose-base.yml
name: fluentbit-datadog

View file

@ -0,0 +1,31 @@
[INPUT]
name tail
path /var/lib/docker/containers/**/*.log
path_key path
multiline.parser docker, cri
Parser docker
Docker_Mode On
[INPUT]
Name syslog
Listen 0.0.0.0
Port 5140
Parser syslog-rfc3164
Mode tcp
[SERVICE]
Flush 1
Parsers_File parsers.conf
[OUTPUT]
Name datadog
Match *
Host dd-logs
Port 8427
TLS off
compress gzip
apikey test
dd_service test
dd_source data
dd_message_key log
dd_tags env:dev

View file

@ -4,6 +4,7 @@ The folder contains examples of [Fluentd](https://www.fluentd.org/) integration
* [loki](./loki) * [loki](./loki)
* [jsonline](./jsonline) * [jsonline](./jsonline)
* [datadog](./datadog)
* [elasticsearch](./elasticsearch) * [elasticsearch](./elasticsearch)
All required plugins, that should be installed in order to support protocols listed above can be found in a [Dockerfile](./Dockerfile) All required plugins, that should be installed in order to support protocols listed above can be found in a [Dockerfile](./Dockerfile)

View file

@ -0,0 +1,3 @@
include:
- ../compose-base.yml
name: fluentd-datadog

View file

@ -0,0 +1,27 @@
<source>
@type tail
format none
tag docker.testlog
path /var/lib/docker/containers/**/*.log
</source>
<source>
@type forward
port 24224
bind 0.0.0.0
</source>
<match **>
@type datadog
api_key test
# Optional
port 8427
use_ssl false
host dd-logs
include_tag_key true
tag_key 'tag'
# Optional parameters
dd_source 'fluentd'
dd_tags 'key1:value1,key2:value2'
dd_sourcecategory 'test'
</match>

View file

@ -6,6 +6,7 @@ The folder contains examples of [Vector](https://vector.dev/docs/) integration w
* [loki](./loki) * [loki](./loki)
* [jsonline single node](./jsonline) * [jsonline single node](./jsonline)
* [jsonline HA setup](./jsonline-ha) * [jsonline HA setup](./jsonline-ha)
* [datadog](./datadog)
To spin-up environment `cd` to any of listed above directories run the following command: To spin-up environment `cd` to any of listed above directories run the following command:
``` ```
@ -34,5 +35,6 @@ Vector configuration example can be found below:
* [loki](./loki/vector.yaml) * [loki](./loki/vector.yaml)
* [jsonline single node](./jsonline/vector.yaml) * [jsonline single node](./jsonline/vector.yaml)
* [jsonline HA setup](./jsonline-ha/vector.yaml) * [jsonline HA setup](./jsonline-ha/vector.yaml)
* [datadog](./datadog/vector.yaml)
Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance. Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance.

View file

@ -0,0 +1,3 @@
include:
- ../compose-base.yml
name: vector-datadog

View file

@ -0,0 +1,28 @@
api:
enabled: true
address: 0.0.0.0:8686
sources:
vector_metrics:
type: internal_metrics
demo:
type: demo_logs
format: json
sinks:
datadog:
type: datadog_logs
inputs: [demo]
default_api_key: test
endpoint: http://dd-logs:8427
compression: gzip
request:
headers:
dd-protocol: test # required by VictoriaLogs
AccountID: "0"
ProjectID: "0"
VL-Stream-Fields: "service,host"
victoriametrics:
type: prometheus_remote_write
endpoint: http://victoriametrics:8428/api/v1/write
inputs: [vector_metrics]
healthcheck:
enabled: false

View file

@ -0,0 +1,6 @@
unauthorized_user:
url_map:
- src_paths:
- "/api/v2/logs"
- "/api/v1/validate"
url_prefix: "http://victorialogs:9428/insert/datadog/"

View file

@ -16,6 +16,6 @@ services:
- ./../../dashboards/vm/vmalert.json:/var/lib/grafana/dashboards/vmalert.json - ./../../dashboards/vm/vmalert.json:/var/lib/grafana/dashboards/vmalert.json
- ./../../dashboards/vm/vmauth.json:/var/lib/grafana/dashboards/vmauth.json - ./../../dashboards/vm/vmauth.json:/var/lib/grafana/dashboards/vmauth.json
environment: environment:
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.9.1/victoriametrics-datasource-v0.9.1.zip;victoriametrics-datasource" - "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.10.1/victoriametrics-datasource-v0.10.1.zip;victoriametrics-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource" - "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource"
restart: always restart: always

View file

@ -15,7 +15,7 @@ services:
- ./../../dashboards/vm/vmagent.json:/var/lib/grafana/dashboards/vmagent.json - ./../../dashboards/vm/vmagent.json:/var/lib/grafana/dashboards/vmagent.json
- ./../../dashboards/vm/vmalert.json:/var/lib/grafana/dashboards/vmalert.json - ./../../dashboards/vm/vmalert.json:/var/lib/grafana/dashboards/vmalert.json
environment: environment:
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.9.1/victoriametrics-datasource-v0.9.1.zip;victoriametrics-datasource" - "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.10.1/victoriametrics-datasource-v0.10.1.zip;victoriametrics-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource" - "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource"
networks: networks:
- vm_net - vm_net

View file

@ -1,7 +1,7 @@
services: services:
vmagent: vmagent:
container_name: vmagent container_name: vmagent
image: victoriametrics/vmagent:v1.105.0 image: victoriametrics/vmagent:v1.106.0
depends_on: depends_on:
- "victoriametrics" - "victoriametrics"
ports: ports:
@ -18,7 +18,7 @@ services:
victoriametrics: victoriametrics:
container_name: victoriametrics container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.105.0 image: victoriametrics/victoria-metrics:v1.106.0
ports: ports:
- 8428:8428 - 8428:8428
volumes: volumes:
@ -50,7 +50,7 @@ services:
vmalert: vmalert:
container_name: vmalert container_name: vmalert
image: victoriametrics/vmalert:v1.105.0 image: victoriametrics/vmalert:v1.106.0
depends_on: depends_on:
- "victoriametrics" - "victoriametrics"
ports: ports:

View file

@ -18,7 +18,7 @@ services:
- vlogs - vlogs
generator: generator:
image: golang:1.23.1-alpine image: golang:1.23.3-alpine
restart: always restart: always
working_dir: /go/src/app working_dir: /go/src/app
volumes: volumes:

View file

@ -2,7 +2,7 @@ version: '3'
services: services:
generator: generator:
image: golang:1.23.1-alpine image: golang:1.23.3-alpine
restart: always restart: always
working_dir: /go/src/app working_dir: /go/src/app
volumes: volumes:

View file

@ -3,7 +3,7 @@ version: "3"
services: services:
# Run `make package-victoria-logs` to build victoria-logs image # Run `make package-victoria-logs` to build victoria-logs image
vlogs: vlogs:
image: docker.io/victoriametrics/victoria-logs:v0.39.0-victorialogs image: docker.io/victoriametrics/victoria-logs:v0.42.0-victorialogs
volumes: volumes:
- vlogs:/vlogs - vlogs:/vlogs
ports: ports:
@ -46,7 +46,7 @@ services:
- "--config=/config.yml" - "--config=/config.yml"
vmsingle: vmsingle:
image: victoriametrics/victoria-metrics:v1.105.0 image: victoriametrics/victoria-metrics:v1.106.0
ports: ports:
- "8428:8428" - "8428:8428"
command: command:

View file

@ -4,7 +4,7 @@
1. To build the snapshot in DigitalOcean account you will need API Token and [packer](https://learn.hashicorp.com/tutorials/packer/get-started-install-cli). 1. To build the snapshot in DigitalOcean account you will need API Token and [packer](https://learn.hashicorp.com/tutorials/packer/get-started-install-cli).
2. API Token can be generated on [https://cloud.digitalocean.com/account/api/tokens](https://cloud.digitalocean.com/account/api/tokens) or use already generated from OnePassword. 2. API Token can be generated on [https://cloud.digitalocean.com/account/api/tokens](https://cloud.digitalocean.com/account/api/tokens) or use already generated from OnePassword.
3. Choose prefered version of VictoriaMetrics on [Github releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) page. 3. Choose preferred version of VictoriaMetrics on [Github releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) page.
4. Set variables `DIGITALOCEAN_API_TOKEN` with `VM_VERSION` for `packer` environment and run make from example below: 4. Set variables `DIGITALOCEAN_API_TOKEN` with `VM_VERSION` for `packer` environment and run make from example below:
```console ```console
@ -21,7 +21,7 @@ make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOK
3. Send a PR to https://github.com/digitalocean/marketplace-kubernetes. 3. Send a PR to https://github.com/digitalocean/marketplace-kubernetes.
4. Add changes to product page at [https://cloud.digitalocean.com/vendorportal/61de9e7fbbd94c7e4b9b80be/15/edit](https://cloud.digitalocean.com/vendorportal/61de9e7fbbd94c7e4b9b80be/15/edit): 4. Add changes to product page at [https://cloud.digitalocean.com/vendorportal/61de9e7fbbd94c7e4b9b80be/15/edit](https://cloud.digitalocean.com/vendorportal/61de9e7fbbd94c7e4b9b80be/15/edit):
* update App Version; * update App Version;
* (onfly if PR was submittedm apprived and merged) add select a checkbox "I made a change, submitted a pull request, and the pull request was approved and merged." * (only if PR was submitted, approved and merged) add select a checkbox "I made a change, submitted a pull request, and the pull request was approved and merged."
* updated Version of packages and links to changelogs in `Software Included` section; * updated Version of packages and links to changelogs in `Software Included` section;
* describe your updates in `Reason for update` section. * describe your updates in `Reason for update` section.
* submit your changes. * submit your changes.

View file

@ -85,6 +85,7 @@ See also [case studies](https://docs.victoriametrics.com/casestudies/).
* [Persistent Data Structures in VictoriaMetrics (Part 1): vmagent](https://medium.com/devops-dev/persistent-data-structures-in-victoriametrics-part-1-vmagent-2e9c7681a6f0) * [Persistent Data Structures in VictoriaMetrics (Part 1): vmagent](https://medium.com/devops-dev/persistent-data-structures-in-victoriametrics-part-1-vmagent-2e9c7681a6f0)
* [Persistent Data Structures in VictoriaMetrics (Part 2): vmselect](https://medium.com/@jiekun/persistent-data-structures-in-victoriametrics-part-2-vmselect-9e3de39a4d20) * [Persistent Data Structures in VictoriaMetrics (Part 2): vmselect](https://medium.com/@jiekun/persistent-data-structures-in-victoriametrics-part-2-vmselect-9e3de39a4d20)
* [Migrating to VictoriaMetrics (by Zomato): A Complete Overhaul for Enhanced Observability](https://blog.zomato.com/migrating-to-victoriametrics-a-complete-overhaul-for-enhanced-observability) * [Migrating to VictoriaMetrics (by Zomato): A Complete Overhaul for Enhanced Observability](https://blog.zomato.com/migrating-to-victoriametrics-a-complete-overhaul-for-enhanced-observability)
* [Harness the Power of VictoriaMetrics and Grafana Operators for Metrics Management](https://blog.ogenki.io/post/series/observability/metrics/)
## Our articles ## Our articles

View file

@ -63,7 +63,8 @@ A pull request should contain the following attributes:
To run tests and code checks locally execute commands `make tests-full` and `make check-all`. To run tests and code checks locally execute commands `make tests-full` and `make check-all`.
1. Try to not extend the scope of the pull requests outside the issue, do not make unrelated changes. 1. Try to not extend the scope of the pull requests outside the issue, do not make unrelated changes.
1. Documentation update, if needed. For example, adding a new flag or changing behavior of existing flags or features 1. Documentation update, if needed. For example, adding a new flag or changing behavior of existing flags or features
requires reflecting these changes in the documentation. requires reflecting these changes in the documentation. For new features add `{{%/* available_from "#" */%}}` shortcode
to the documentation. It will be later automatically replaced with an actual release version.
1. A line in the [changelog](https://docs.victoriametrics.com/changelog/#tip) mentioning the change and related issue in a way 1. A line in the [changelog](https://docs.victoriametrics.com/changelog/#tip) mentioning the change and related issue in a way
that would be clear to other readers even if they don't have the full context. that would be clear to other readers even if they don't have the full context.
1. Reviewers who you think have the best expertise on the matter. 1. Reviewers who you think have the best expertise on the matter.

View file

@ -461,20 +461,20 @@ VictoriaMetrics has allowed us to extend data retention for our metrics effortle
Across our production VictoriaMetrics clusters, in a 12 months period we go beyond the following figures. Across our production VictoriaMetrics clusters, in a 12 months period we go beyond the following figures.
- Active time series: 10M - Active time series: 20M
- Ingestion rate: 300k samples per second - Ingestion rate: 1M samples per second (2M before stream aggregation)
- Total number of datapoints: 4400G - Total number of datapoints: 15T+
- Data size on disk: 3600 GiB - Data size on disk: 12+ TiB
- Available memory: 320 GiB as seen by kubernetes (160 GiB physical memory for the hosts) - Available memory: 1.2 TiB as seen by kubernetes (640 GiB physical memory for the hosts)
- CPU: 20 cores (AMD EPYC 7763), about 70 % idle - CPU: 80 cores (AMD EPYC 7763), about 50 % idle
- Retention period: ~1 year - Retention period: ~1 year
- Churn rate: 6M new time series per day (monthly average) - Churn rate: 16M new time series per day (monthly average)
- Query rate: - Query rate:
- `/api/v1/query_range`: 10 queries per second - `/api/v1/query_range`: 15 queries per second
- `/api/v1/query`: 10 queries per second - `/api/v1/query`: 10 queries per second
- Query duration for `/api/v1/query_range` (weekly mean): - Query duration for `/api/v1/query_range` (weekly mean):
- 99th percentile: 700 ms - 99th percentile: 1.5 s
- median: 10 ms - median: 15 ms
## Roblox ## Roblox

View file

@ -114,10 +114,9 @@ such as [Graphite](https://docs.victoriametrics.com/#how-to-send-data-from-graph
**Reads** **Reads**
_Available from [v1.104.0](https://docs.victoriametrics.com/changelog/#v11040)._
_For better performance prefer specifying [tenants in read URL](https://docs.victoriametrics.com/cluster-victoriametrics/#url-format)._ _For better performance prefer specifying [tenants in read URL](https://docs.victoriametrics.com/cluster-victoriametrics/#url-format)._
`vmselect` can execute queries over multiple [tenants](#multitenancy) via special `multitenant` endpoints `http://vmselect:8481/select/multitenant/<suffix>`. `vmselect` can execute {{% available_from "v1.104.0" %}} queries over multiple [tenants](#multitenancy) via special `multitenant` endpoints `http://vmselect:8481/select/multitenant/<suffix>`.
Currently supported endpoints for `<suffix>` are: Currently supported endpoints for `<suffix>` are:
- `/prometheus/api/v1/query` - `/prometheus/api/v1/query`
- `/prometheus/api/v1/query_range` - `/prometheus/api/v1/query_range`
@ -1910,9 +1909,6 @@ Below is the output for `/path/to/vmstorage -help`:
-storage.cacheSizeIndexDBDataBlocks size -storage.cacheSizeIndexDBDataBlocks size
Overrides max size for indexdb/dataBlocks cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning Overrides max size for indexdb/dataBlocks cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0) Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
-storage.cacheSizeIndexDBDataBlocksSparse size
Overrides max size for indexdb/dataBlocksSparse cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
-storage.cacheSizeIndexDBIndexBlocks size -storage.cacheSizeIndexDBIndexBlocks size
Overrides max size for indexdb/indexBlocks cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning Overrides max size for indexdb/indexBlocks cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0) Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)

View file

@ -21,6 +21,18 @@ docs-debug: docs docs-image
-p 1313:1313 \ -p 1313:1313 \
-v ./docs:/opt/docs/content vmdocs-docker-package -v ./docs:/opt/docs/content vmdocs-docker-package
docs-update-version: docs-image
$(if $(filter v%,$(PKG_TAG)), \
docker run \
--rm \
--entrypoint /usr/bin/find \
--name vmdocs-docker-container \
-v ./docs:/opt/docs/content vmdocs-docker-package \
content \
-regex ".*\.md" \
-exec sed -i 's/{{% available_from "#" %}}/{{% available_from "$(PKG_TAG)" %}}/g' {} \;, \
$(info "Skipping docs version update, invalid $$PKG_TAG: $(PKG_TAG)"))
docs-images-to-webp: docs-image docs-images-to-webp: docs-image
docker run \ docker run \
--rm \ --rm \

View file

@ -960,6 +960,13 @@ VictoriaMetrics accepts `limit` query arg at [/api/v1/series](https://docs.victo
for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest of series. for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest of series.
If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
VictoriaMetrics returns an extra object `stats` in JSON response for [`/api/v1/query`](https://docs.victoriametrics.com/keyconcepts/#instant-query)
and [`/api/v1/query_range`](https://docs.victoriametrics.com/keyconcepts/#range-query) APIs. This object contains two
fields: `executionTimeMsec` with number of milliseconds the request took and `seriesFetched` with number of series that
were fetched from database before filtering. The `seriesFetched` field is effectively used by vmalert for detecting
[misconfigured rule expressions](https://docs.victoriametrics.com/vmalert/#never-firing-alerts). Please note, `seriesFetched`
provides approximate number of series, it is not recommended to rely on it in tests.
Additionally, VictoriaMetrics provides the following handlers: Additionally, VictoriaMetrics provides the following handlers:
* `/vmui` - Basic Web UI. See [these docs](#vmui). * `/vmui` - Basic Web UI. See [these docs](#vmui).
@ -1590,7 +1597,7 @@ The format follows [JSON streaming concept](https://jsonlines.org/), e.g. each l
```json ```json
{ {
// metric contans metric name plus labels for a particular time series // metric contains metric name plus labels for a particular time series
"metric":{ "metric":{
"__name__": "metric_name", // <- this is metric name "__name__": "metric_name", // <- this is metric name
@ -2042,7 +2049,7 @@ Important notes:
So the IndexDB size can grow big under [high churn rate](https://docs.victoriametrics.com/faq/#what-is-high-churn-rate) So the IndexDB size can grow big under [high churn rate](https://docs.victoriametrics.com/faq/#what-is-high-churn-rate)
even for small retentions configured via `-retentionFilter`. even for small retentions configured via `-retentionFilter`.
Retention filters configuration can be tested in enterprise version of vmui on the page `Tools.Retnetion filters debug`. Retention filters configuration can be tested in enterprise version of vmui on the page `Tools.Retention filters debug`.
It is safe updating `-retentionFilter` during VictoriaMetrics restarts - the updated retention filters are applied eventually It is safe updating `-retentionFilter` during VictoriaMetrics restarts - the updated retention filters are applied eventually
to historical data. to historical data.
@ -2064,7 +2071,7 @@ The `-downsampling.period` command-line flag can be specified multiple times in
For example, `-downsampling.period=30d:5m,180d:1h` instructs leaving the last sample per each 5-minute interval for samples older than 30 days, For example, `-downsampling.period=30d:5m,180d:1h` instructs leaving the last sample per each 5-minute interval for samples older than 30 days,
while leaving the last sample per each 1-hour interval for samples older than 180 days. while leaving the last sample per each 1-hour interval for samples older than 180 days.
VictoriaMetrics supports configuring independent downsampling per different sets of [time series](https://docs.victoriametrics.com/keyconcepts/#time-series) VictoriaMetrics supports{{% available_from "v1.100.0" %}} configuring independent downsampling per different sets of [time series](https://docs.victoriametrics.com/keyconcepts/#time-series)
via `-downsampling.period=filter:offset:interval` syntax. In this case the given `offset:interval` downsampling is applied only to time series matching the given `filter`. via `-downsampling.period=filter:offset:interval` syntax. In this case the given `offset:interval` downsampling is applied only to time series matching the given `filter`.
The `filter` can contain arbitrary [series filter](https://docs.victoriametrics.com/keyconcepts/#filtering). The `filter` can contain arbitrary [series filter](https://docs.victoriametrics.com/keyconcepts/#filtering).
For example, `-downsampling.period='{__name__=~"(node|process)_.*"}:1d:1m` instructs VictoriaMetrics to deduplicate samples older than one day with one minute interval For example, `-downsampling.period='{__name__=~"(node|process)_.*"}:1d:1m` instructs VictoriaMetrics to deduplicate samples older than one day with one minute interval
@ -3260,9 +3267,6 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-storage.cacheSizeIndexDBDataBlocks size -storage.cacheSizeIndexDBDataBlocks size
Overrides max size for indexdb/dataBlocks cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning Overrides max size for indexdb/dataBlocks cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0) Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
-storage.cacheSizeIndexDBDataBlocksSparse size
Overrides max size for indexdb/dataBlocksSparse cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
-storage.cacheSizeIndexDBIndexBlocks size -storage.cacheSizeIndexDBIndexBlocks size
Overrides max size for indexdb/indexBlocks cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning Overrides max size for indexdb/indexBlocks cache. See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0) Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)

View file

@ -47,7 +47,7 @@ Bumping the limits may significantly improve build speed.
Changes in these branches must be synced immediately after they are committed in at least a single branch. Changes in these branches must be synced immediately after they are committed in at least a single branch.
1. Make sure that the release branches have no security issues. 1. Make sure that the release branches have no security issues.
1. Update release versions if needed in [SECURITY.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/SECURITY.md). 1. Update release versions if needed in [SECURITY.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/SECURITY.md).
1. Add `(available starting from v1.xx.y)` line to feature docs introduced in the upcoming release. 1. Run `PKG_TAG=v1.xx.y make docs-update-version` command to update version help tooltips.
1. Cut new version in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) 1. Cut new version in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md)
and make it merged. See example in this [commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/b771152039d23b5ccd637a23ea748bc44a9511a7). and make it merged. See example in this [commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/b771152039d23b5ccd637a23ea748bc44a9511a7).
1. Cherry-pick bug fixes relevant for [LTS releases](https://docs.victoriametrics.com/lts-releases/). 1. Cherry-pick bug fixes relevant for [LTS releases](https://docs.victoriametrics.com/lts-releases/).

View file

@ -15,8 +15,47 @@ according to [these docs](https://docs.victoriametrics.com/victorialogs/quicksta
## tip ## tip
## [v0.42.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.42.0-victorialogs)
Released at 2024-11-08
* FEATURE: [`join` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#join-pipe): add an ability to add prefix to all the log field names from the joined query, by using `| join by (<by_fields>) (<query>) prefix "some_prefix"` syntax.
* FEATURE: [`_time` filter](https://docs.victoriametrics.com/victorialogs/logsql/#time-filter): allow specifying offset without time range. For example, `_time:offset 1d` matches all the logs until `now-1d` in the [`_time` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field). This is useful when building graphs for time ranges with some offset in the past.
* FEATURE: [`/select/logsql/tail` HTTP endpoint](): support for `offset` query arg, which can be used for delayed emission of matching logs during live tailing. Thanks to @Fusl for the initial idea and implementation in [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7428).
* FEATURE: [vlogscli](https://docs.victoriametrics.com/victorialogs/querying/vlogscli/): allow enabling and disabling wrapping of long lines, which do not fit screen width, with `\wrap_long_lines` command.
* FEATURE: [syslog data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/): allow overriding default [stream fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) with the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) during data ingestion. See [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7480).
* FEATURE: [syslog data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/): allow adding arbitrary [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) via `[label1=value1 ... labelN=valueN]` syntax inside Syslog messages. For example, `<165>1 2024-06-03T17:42:00.000Z example.com appname 12345 ID47 [field1=value1 field2=value2] some message`.
* FEATURE: [syslog data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/): allow dropping the specified [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) during data ingestion. See [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields).
* FEATURE: [syslog data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/): allow adding the specified [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) during data ingestion. See [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7354).
* FEATURE: [Loki data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/#loki-json-api): show the original request body on parse errors. This should simplify debugging. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7490).
* BUGFIX: [`values` stats function](https://docs.victoriametrics.com/victorialogs/logsql/#values-stats): fix a bug, which could lead to corrupted results. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7458).
* BUGFIX: [`uniq_values`](https://docs.victoriametrics.com/victorialogs/logsql/#uniq_values-stats), [`min`](https://docs.victoriametrics.com/victorialogs/logsql/#min-stats), [`max`](https://docs.victoriametrics.com/victorialogs/logsql/#max-stats), [`row_min`](https://docs.victoriametrics.com/victorialogs/logsql/#row_min-stats) and [`row_max`](https://docs.victoriametrics.com/victorialogs/logsql/#row_max-stats) stats functions: fix a bug, which could return non-matching field values for these functions. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7458).
* BUGFIX: [HTTP querying APIs](https://docs.victoriametrics.com/victorialogs/querying/#http-api): properly take into account the `end` query arg when calculating time range for [`_time:duration` filter](https://docs.victoriametrics.com/victorialogs/logsql/#time-filter). Previously the `_time:duration` filter was treated as `_time:[now-duration, now)`, while it should be treated as `_time:[end-duration, end)`.
## [v0.41.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.41.0-victorialogs)
Released at 2024-11-06
* FEATURE: support [structured metadata](https://grafana.com/docs/loki/latest/get-started/labels/structured-metadata/) when ingesting logs with [Grafana Loki ingestion protocol](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7431).
* FEATURE: add [`join` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#join-pipe), which can be used for performing SQL-like joins.
* FEATURE: support returning historical logs from [live tailing API](https://docs.victoriametrics.com/victorialogs/querying/#live-tailing) via `start_offset` query arg. For example, request to `/select/logsql/tail?query=*&start_offset=5m` returns logs for the last 5 minutes before starting returning live tailing logs for the given `query`.
* FEATURE: add an ability to specify extra fields for logs ingested via [HTTP-based data ingestion protocols](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-apis). See `extra_fields` query arg and `VL-Extra-Fields` HTTP header in [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters).
* FEATURE: add [`block_stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#block_stats-pipe) for returning various per-block stats. This pipe is useful for debugging.
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add sorting of logs by groups and within each group by time in desc order. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7184) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7045).
* FEATURE: add support for receiving DataDog logs over network. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6632).
* BUGFIX: properly sort fields with floating-point numbers by [`sort` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#sort-pipe). Previously floating-point numbers could be improperly sorted because they were treated as strings, and [natural sorting](https://en.wikipedia.org/wiki/Natural_sort_order) was incorrectly applied to them. For example, `0.123` was treated as bigger than `0.9`.
## [v0.40.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.40.0-victorialogs)
Released at 2024-10-31
* FEATURE: add support for extra filters across all the [HTTP querying APIs](https://docs.victoriametrics.com/victorialogs/querying/#http-api). See [these docs](https://docs.victoriametrics.com/victorialogs/querying/#extra-filters) for details. This is needed for implementing quick filtering on field values at [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7365). * FEATURE: add support for extra filters across all the [HTTP querying APIs](https://docs.victoriametrics.com/victorialogs/querying/#http-api). See [these docs](https://docs.victoriametrics.com/victorialogs/querying/#extra-filters) for details. This is needed for implementing quick filtering on field values at [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7365).
* BUGFIX: properly apply [`replace`](https://docs.victoriametrics.com/victorialogs/logsql/#replace-pipe) and [`replace_regexp`](https://docs.victoriametrics.com/victorialogs/logsql/#replace_regexp-pipe) pipes to identical values in adjacent log entries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7162).
* BUGFIX: properly apply [`extract`](https://docs.victoriametrics.com/victorialogs/logsql/#extract-pipe) and [`extract_regexp`](https://docs.victoriametrics.com/victorialogs/logsql/#extract_regexp-pipe) pipe with additional `if (...)` filter (aka [conditional extract](https://docs.victoriametrics.com/victorialogs/logsql/#conditional-extract) and [conditional extract_regexp](https://docs.victoriametrics.com/victorialogs/logsql/#conditional-extract_regexp)).
## [v0.39.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.39.0-victorialogs) ## [v0.39.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.39.0-victorialogs)
Released at 2024-10-30 Released at 2024-10-30
@ -180,7 +219,7 @@ Released at 2024-07-05
Released at 2024-07-02 Released at 2024-07-02
* FEATURE: add `-syslog.useLocalTimestamp.tcp` and `-syslog.useLocalTimestamp.udp` command-line flags, which could be used for using the local timestamp as [`_time` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) for the logs ingested via the corresponding `-syslog.listenAddr.tcp` / `-syslog.listenAddr.udp`. By default the timestap from the syslog message is used as [`_time` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field). See [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/). * FEATURE: add `-syslog.useLocalTimestamp.tcp` and `-syslog.useLocalTimestamp.udp` command-line flags, which could be used for using the local timestamp as [`_time` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) for the logs ingested via the corresponding `-syslog.listenAddr.tcp` / `-syslog.listenAddr.udp`. By default the timestamp from the syslog message is used as [`_time` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field). See [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/).
* BUGFIX: make slowly ingested logs visible for search as soon as they are ingested into VictoriaLogs. Previously slowly ingested logs could remain invisible for search for long time. * BUGFIX: make slowly ingested logs visible for search as soon as they are ingested into VictoriaLogs. Previously slowly ingested logs could remain invisible for search for long time.
@ -312,7 +351,7 @@ Released at 2024-05-30
* FEATURE: add [`row_any`](https://docs.victoriametrics.com/victorialogs/logsql/#row_any-stats) function for [`stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe). This function returns a sample log entry per every calculated [group of results](https://docs.victoriametrics.com/victorialogs/logsql/#stats-by-fields). * FEATURE: add [`row_any`](https://docs.victoriametrics.com/victorialogs/logsql/#row_any-stats) function for [`stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe). This function returns a sample log entry per every calculated [group of results](https://docs.victoriametrics.com/victorialogs/logsql/#stats-by-fields).
* FEATURE: add `default` operator to [`math` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#math-pipe). It allows overriding `NaN` results with the given default value. * FEATURE: add `default` operator to [`math` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#math-pipe). It allows overriding `NaN` results with the given default value.
* FEATURE: add `exp()` and `ln()` functions to [`math` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#math-pipe). * FEATURE: add `exp()` and `ln()` functions to [`math` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#math-pipe).
* FEATURE: allow omitting result name in [`math` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#math-pipe) expresions. In this case the result name is automatically set to string representation of the corresponding math expression. For example, `_time:5m | math duration / 1000` is equivalent to `_time:5m | math (duration / 1000) as "duration / 1000"`. * FEATURE: allow omitting result name in [`math` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#math-pipe) expressions. In this case the result name is automatically set to string representation of the corresponding math expression. For example, `_time:5m | math duration / 1000` is equivalent to `_time:5m | math (duration / 1000) as "duration / 1000"`.
* FEATURE: allow omitting result name in [`stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe). In this case the result name is automatically set to string representation of the corresponding [stats function expression](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe-functions). For example, `_time:5m | count(*)` is valid [LogsQL query](https://docs.victoriametrics.com/victorialogs/logsql/) now. It is equivalent to `_time:5m | stats count(*) as "count(*)"`. * FEATURE: allow omitting result name in [`stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe). In this case the result name is automatically set to string representation of the corresponding [stats function expression](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe-functions). For example, `_time:5m | count(*)` is valid [LogsQL query](https://docs.victoriametrics.com/victorialogs/logsql/) now. It is equivalent to `_time:5m | stats count(*) as "count(*)"`.
* BUGFIX: properly calculate the number of matching rows in `* | field_values x | stats count() rows` and in `* | unroll (x) | stats count() rows` queries. * BUGFIX: properly calculate the number of matching rows in `* | field_values x | stats count() rows` and in `* | unroll (x) | stats count() rows` queries.
@ -466,7 +505,7 @@ Released at 2024-05-12
* BUGFIX: prevent from possible corruption of short [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) during data ingestion. * BUGFIX: prevent from possible corruption of short [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) during data ingestion.
* BUGFIX: prevent from additional CPU usage for up to a few seconds after canceling the query. * BUGFIX: prevent from additional CPU usage for up to a few seconds after canceling the query.
* BUGFIX: prevent from returning log entries with emtpy `_stream` field in the form `"_stream":""` in [search query results](https://docs.victoriametrics.com/victorialogs/querying/). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6042). * BUGFIX: prevent from returning log entries with empty `_stream` field in the form `"_stream":""` in [search query results](https://docs.victoriametrics.com/victorialogs/querying/). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6042).
## [v0.5.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.5.2-victorialogs) ## [v0.5.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.5.2-victorialogs)

View file

@ -131,3 +131,42 @@ Just send the query with the needed [filters](https://docs.victoriametrics.com/v
to [`/select/logsql/query`](https://docs.victoriametrics.com/victorialogs/querying/#querying-logs) - VictoriaLogs will return to [`/select/logsql/query`](https://docs.victoriametrics.com/victorialogs/querying/#querying-logs) - VictoriaLogs will return
the requested logs as a [stream of JSON lines](https://jsonlines.org/). It is recommended specifying [time filter](https://docs.victoriametrics.com/victorialogs/logsql/#time-filter) the requested logs as a [stream of JSON lines](https://jsonlines.org/). It is recommended specifying [time filter](https://docs.victoriametrics.com/victorialogs/logsql/#time-filter)
for limiting the amounts of exported logs. for limiting the amounts of exported logs.
## I want to ingest logs without message field, is that possible?
Starting from version `v0.30.0`, VictoriaLogs started blocking the ingestion of logs **without a message field**, as it is a requirement of the [VictoriaLogs data model](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field).
However, some logs do not have a message field and only contain other fields, such as logs in [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7056#issuecomment-2434189718) and [this slack thread](https://victoriametrics.slack.com/archives/C05UNTPAEDN/p1730982146818249). Therefore, starting from version `v0.39.0`, logs without a message field are **allowed to be ingested**,
and their message field will be recorded as:
```json
{"_msg": "missing _msg field; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field"}
```
The default message field value can be changed using the `-defaultMsgValue` flag, for example, `-defaultMsgValue=foo`.
Please note that the message field is **crucial** for VictoriaLogs, so it is important to fill it with meaningful content.
## What if my logs have multiple message fields candidates?
When ingesting with VictoriaLogs, the message fields is specified through `_msg_field` param, which can accept **multiple fields**, and the **first non-empty field** will be used as the message field.
Here is an example URL when pushing logs to VictoriaLogs with Promtail:
```yaml
clients:
- url: http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app&_msg=message,body
```
For the following log, its `_msg` will be `foo bar in message`:
```json
{
"message": "foo bar in message",
"body": "foo bar in body"
}
```
And for the following log, its `_msg` will be `foo bar in body`:
```json
{
"message": "",
"body": "foo bar in body"
}
```

View file

@ -181,7 +181,7 @@ _time:5m log.level:error {app!~"buggy_app|foobar"}
``` ```
This query skips scanning for [log messages](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) from `buggy_app` and `foobar` apps. This query skips scanning for [log messages](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) from `buggy_app` and `foobar` apps.
It inpsects only `log.level` and [`_stream`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) labels. It inspects only `log.level` and [`_stream`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) labels.
This significantly reduces disk read IO and CPU time needed for performing the query. This significantly reduces disk read IO and CPU time needed for performing the query.
LogsQL also provides [functions for statistics calculation](#stats-pipe) over the selected logs. For example, the following query returns the number of logs LogsQL also provides [functions for statistics calculation](#stats-pipe) over the selected logs. For example, the following query returns the number of logs
@ -316,6 +316,7 @@ For example, `_time:2023-10-20` matches all the logs for `2023-10-20` day accord
It is possible to specify generic offset for the selected time range by appending `offset` after the `_time` filter. Examples: It is possible to specify generic offset for the selected time range by appending `offset` after the `_time` filter. Examples:
- `_time:offset 1h` matches logs until `now-1h`.
- `_time:5m offset 1h` matches logs on the time range `(now-1h5m, now-1h]`. - `_time:5m offset 1h` matches logs on the time range `(now-1h5m, now-1h]`.
- `_time:2023-07Z offset 5h30m` matches logs on July, 2023 by UTC with offset 5h30m. - `_time:2023-07Z offset 5h30m` matches logs on July, 2023 by UTC with offset 5h30m.
- `_time:[2023-02-01Z, 2023-03-01Z) offset 1w` matches logs the week before the time range `[2023-02-01Z, 2023-03-01Z)` by UTC. - `_time:[2023-02-01Z, 2023-03-01Z) offset 1w` matches logs the week before the time range `[2023-02-01Z, 2023-03-01Z)` by UTC.
@ -381,7 +382,7 @@ See also:
- `Mon` or `Monday` - `Mon` or `Monday`
- `Tue` or `Tuesday` - `Tue` or `Tuesday`
- `Wed` or `Wednesday` - `Wed` or `Wednesday`
- `Thu` or `Thusday` - `Thu` or `Thursday`
- `Fri` or `Friday` - `Fri` or `Friday`
- `Sat` or `Saturday` - `Sat` or `Saturday`
@ -1293,6 +1294,7 @@ _time:5m | stats by (_stream) count() per_stream_logs | sort by (per_stream_logs
LogsQL supports the following pipes: LogsQL supports the following pipes:
- [`block_stats`](#block_stats-pipe) returns various stats for the selected blocks with logs.
- [`blocks_count`](#blocks_count-pipe) counts the number of blocks with logs processed by the query. - [`blocks_count`](#blocks_count-pipe) counts the number of blocks with logs processed by the query.
- [`copy`](#copy-pipe) copies [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`copy`](#copy-pipe) copies [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`delete`](#delete-pipe) deletes [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`delete`](#delete-pipe) deletes [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
@ -1304,6 +1306,7 @@ LogsQL supports the following pipes:
- [`fields`](#fields-pipe) selects the given set of [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`fields`](#fields-pipe) selects the given set of [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`filter`](#filter-pipe) applies additional [filters](#filters) to results. - [`filter`](#filter-pipe) applies additional [filters](#filters) to results.
- [`format`](#format-pipe) formats output field from input [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`format`](#format-pipe) formats output field from input [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`join`](#join-pipe) joins query results by the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`len`](#len-pipe) calculates byte length of the given [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) value. - [`len`](#len-pipe) calculates byte length of the given [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) value.
- [`limit`](#limit-pipe) limits the number selected logs. - [`limit`](#limit-pipe) limits the number selected logs.
- [`math`](#math-pipe) performs mathematical calculations over [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`math`](#math-pipe) performs mathematical calculations over [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
@ -1324,10 +1327,34 @@ LogsQL supports the following pipes:
- [`unpack_syslog`](#unpack_syslog-pipe) unpacks [syslog](https://en.wikipedia.org/wiki/Syslog) messages from [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`unpack_syslog`](#unpack_syslog-pipe) unpacks [syslog](https://en.wikipedia.org/wiki/Syslog) messages from [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`unroll`](#unroll-pipe) unrolls JSON arrays from [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`unroll`](#unroll-pipe) unrolls JSON arrays from [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
### block_stats pipe
`<q> | block_stats` [pipe](#pipes) returns the following stats per each block processed by `<q>`. This pipe is needed mostly for debugging.
The returned per-block stats:
- `field` - [field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) name
- `rows` - the number of rows at the given `field.
- `type` - internal storage type for the given `field`
- `values_bytes` - on-disk size of the data for the given `field`
- `bloom_bytes` - on-disk size of bloom filter data for the given `field`
- `dict_bytes` - on-disk size of the dictionary data for the given `field`
- `dict_items` - the number of unique values in the dictionary for the given `field`
See also:
- [`blocks_count` pipe](#blocks_count-pipe)
- [`len` pipe](#len-pipe)
### blocks_count pipe ### blocks_count pipe
`<q> | blocks_count` [pipe](#pipes) counts the number of blocks with logs processed by `<q>`. This pipe is needed mostly for debugging. `<q> | blocks_count` [pipe](#pipes) counts the number of blocks with logs processed by `<q>`. This pipe is needed mostly for debugging.
See also:
- [`block_stats` pipe](#block_stats-pipe)
- [`len` pipe](#len-pipe)
### copy pipe ### copy pipe
If some [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) must be copied, then `| copy src1 as dst1, ..., srcN as dstN` [pipe](#pipes) can be used. If some [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) must be copied, then `| copy src1 as dst1, ..., srcN as dstN` [pipe](#pipes) can be used.
@ -1342,7 +1369,7 @@ Multiple fields can be copied with a single `| copy ...` pipe. For example, the
is copied to `message`: is copied to `message`:
```logsql ```logsql
_time:5m | copy _time as timestmap, _msg as message _time:5m | copy _time as timestamp, _msg as message
``` ```
The `as` keyword is optional. The `as` keyword is optional.
@ -1759,9 +1786,64 @@ only if `ip` and `host` [fields](https://docs.victoriametrics.com/victorialogs/k
_time:5m | format if (ip:* and host:*) "request from <ip>:<host>" as message _time:5m | format if (ip:* and host:*) "request from <ip>:<host>" as message
``` ```
### join pipe
The `| join by (<fields>) (<query>)` [pipe](#pipes) joins the current results with the `<query>` results by the given set of comma-separated `<fields>`.
This pipe works in the following way:
1. It executes the `<query>` and remembers its' results. It may contain arbitrary [LogsQL query](https://docs.victoriametrics.com/victorialogs/logsql/).
1. For each input row it searches for matching rows in the `<query>` results by the given `<fields>`.
1. If the `<query>` results have no matching rows, then the input row is sent to the output as is.
1. If the `<query>` results has matching rows, then for each matching row the input row is extended
with new fields seen at the matching row, and the result is sent to the output.
This logic is similar to `LEFT JOIN` in SQL. For example, the following query returns the number of per-user logs across two applications - `app1` and `app2` (
see [stream filters](https://docs.victoriametrics.com/victorialogs/logsql/#stream-filter) for details on `{...}` filter):
```logsql
_time:1d {app="app1"} | stats by (user) count() app1_hits
| join by (user) (
_time:1d {app="app2"} | stats by (user) count() app2_hits
)
```
If you need results similar to `JOIN` in SQL, then apply [`filter` pipe](#filter-pipe) with [`*` filter](https://docs.victoriametrics.com/victorialogs/logsql/#any-value-filter)
on fields, which must be non-empty after the join. For example, the following query returns stats only for users, which exist in both applications `app1` and `app2`:
```logsql
_time:1d {app="app1"} | stats by (user) count() app1_hits
| join by (user) (
_time:1d {app="app2"} | stats by (user) count() app2_hits
)
| filter app2_hits:*
```
It is possible adding a prefix to all the field names returned by the `<query>` by specifying the needed prefix after the `<query>`.
For example, the following query adds `app2.` prefix to all `<query>` log fields:
```logsql
_time:1d {app="app1"} | stats by (user) count() app1_hits
| join by (user) (
_time:1d {app="app2"} | stats by (user) count() app2_hits
) prefix "app2."
```
**Performance tips**:
- Make sure that the `<query>` in the `join` pipe returns relatively small number of results, since they are kept in RAM during execution of `join` pipe.
- [Conditional `stats`](https://docs.victoriametrics.com/victorialogs/logsql/#stats-with-additional-filters) is usually faster to execute.
They usually require less RAM than the equivalent `join` pipe.
See also:
- [`stats` pipe](#stats-pipe)
- [conditional `stats`](https://docs.victoriametrics.com/victorialogs/logsql/#stats-with-additional-filters)
- [`filter` pipe](#filter-pipe)
### len pipe ### len pipe
The `| len(field) as result` pipe stores byte length of the given `field` value into the `result` field. The `| len(field) as result` [pipe](#pipes) stores byte length of the given `field` value into the `result` field.
For example, the following query shows top 5 log entries with the maximum byte length of `_msg` field across For example, the following query shows top 5 log entries with the maximum byte length of `_msg` field across
logs for the last 5 minutes: logs for the last 5 minutes:
@ -1771,9 +1853,10 @@ _time:5m | len(_msg) as msg_len | sort by (msg_len desc) | limit 1
See also: See also:
- [`sum_len` stats function](#sum-len-stats) - [`sum_len` stats function](#sum_len-stats)
- [`sort` pipe](#sort-pipe) - [`sort` pipe](#sort-pipe)
- [`limit` pipe](#limit-pipe) - [`limit` pipe](#limit-pipe)
- [`block_stats` pipe](#block_stats-pipe)
### limit pipe ### limit pipe
@ -2230,6 +2313,7 @@ See also:
- [`sort` pipe](#sort-pipe) - [`sort` pipe](#sort-pipe)
- [`uniq` pipe](#uniq-pipe) - [`uniq` pipe](#uniq-pipe)
- [`top` pipe](#top-pipe) - [`top` pipe](#top-pipe)
- [`join` pipe](#join-pipe)
#### Stats by fields #### Stats by fields
@ -2347,6 +2431,13 @@ _time:5m | stats
count() total count() total
``` ```
If zero input rows match the given `if (...)` filter, then zero result is returned for the given stats function.
See also:
- [`join` pipe](#join-pipe)
- [`stats` pipe functions](#stats-pipe-functions)
### stream_context pipe ### stream_context pipe
`| stream_context ...` [pipe](#pipes) allows selecting surrounding logs for the matching logs in [logs stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) `| stream_context ...` [pipe](#pipes) allows selecting surrounding logs for the matching logs in [logs stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
@ -2761,9 +2852,9 @@ LogsQL supports the following functions for [`stats` pipe](#stats-pipe):
- [`count`](#count-stats) returns the number of log entries. - [`count`](#count-stats) returns the number of log entries.
- [`count_empty`](#count_empty-stats) returns the number logs with empty [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`count_empty`](#count_empty-stats) returns the number logs with empty [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`count_uniq`](#count_uniq-stats) returns the number of unique non-empty values for the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`count_uniq`](#count_uniq-stats) returns the number of unique non-empty values for the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`max`](#max-stats) returns the maximum value over the given numeric [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`max`](#max-stats) returns the maximum value over the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`median`](#median-stats) returns the [median](https://en.wikipedia.org/wiki/Median) value over the given numeric [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`median`](#median-stats) returns the [median](https://en.wikipedia.org/wiki/Median) value over the given numeric [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`min`](#min-stats) returns the minumum value over the given numeric [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`min`](#min-stats) returns the minimum value over the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`quantile`](#quantile-stats) returns the given quantile for the given numeric [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). - [`quantile`](#quantile-stats) returns the given quantile for the given numeric [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`row_any`](#row_any-stats) returns a sample [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) per each selected [stats group](#stats-by-fields). - [`row_any`](#row_any-stats) returns a sample [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) per each selected [stats group](#stats-by-fields).
- [`row_max`](#row_max-stats) returns the [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) with the minimum value at the given field. - [`row_max`](#row_max-stats) returns the [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) with the minimum value at the given field.
@ -3109,7 +3200,7 @@ See also:
### values stats ### values stats
`values(field1, ..., fieldN)` [stats pipe fuction](#stats-pipe-functions) returns all the values (including empty values) `values(field1, ..., fieldN)` [stats pipe function](#stats-pipe-functions) returns all the values (including empty values)
for the mentioned [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). for the mentioned [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
The returned values are encoded in JSON array. The returned values are encoded in JSON array.

View file

@ -33,8 +33,8 @@ Just download archive for the needed Operating system and architecture, unpack i
For example, the following commands download VictoriaLogs archive for Linux/amd64, unpack and run it: For example, the following commands download VictoriaLogs archive for Linux/amd64, unpack and run it:
```sh ```sh
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v0.39.0-victorialogs/victoria-logs-linux-amd64-v0.39.0-victorialogs.tar.gz curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v0.42.0-victorialogs/victoria-logs-linux-amd64-v0.42.0-victorialogs.tar.gz
tar xzf victoria-logs-linux-amd64-v0.39.0-victorialogs.tar.gz tar xzf victoria-logs-linux-amd64-v0.42.0-victorialogs.tar.gz
./victoria-logs-prod ./victoria-logs-prod
``` ```
@ -58,7 +58,7 @@ Here is the command to run VictoriaLogs in a Docker container:
```sh ```sh
docker run --rm -it -p 9428:9428 -v ./victoria-logs-data:/victoria-logs-data \ docker run --rm -it -p 9428:9428 -v ./victoria-logs-data:/victoria-logs-data \
docker.io/victoriametrics/victoria-logs:v0.39.0-victorialogs docker.io/victoriametrics/victoria-logs:v0.42.0-victorialogs
``` ```
See also: See also:

View file

@ -24,6 +24,7 @@ VictoriaLogs provides the following features:
- It supports selecting surrounding logs in front and after the selected logs. See [these docs](https://docs.victoriametrics.com/victorialogs/logsql/#stream_context-pipe). - It supports selecting surrounding logs in front and after the selected logs. See [these docs](https://docs.victoriametrics.com/victorialogs/logsql/#stream_context-pipe).
- It provides web UI for querying logs - see [these docs](https://docs.victoriametrics.com/victorialogs/querying/#web-ui). - It provides web UI for querying logs - see [these docs](https://docs.victoriametrics.com/victorialogs/querying/#web-ui).
- It provides [Grafana plugin for querying logs](https://docs.victoriametrics.com/victorialogs/victorialogs-datasource/). - It provides [Grafana plugin for querying logs](https://docs.victoriametrics.com/victorialogs/victorialogs-datasource/).
- It supports alerting - see [these docs](https://docs.victoriametrics.com/victorialogs/vmalert/).
If you have questions about VictoriaLogs, then read [this FAQ](https://docs.victoriametrics.com/victorialogs/faq/). If you have questions about VictoriaLogs, then read [this FAQ](https://docs.victoriametrics.com/victorialogs/faq/).
Also feel free asking any questions at [VictoriaMetrics community Slack chat](https://victoriametrics.slack.com/), Also feel free asking any questions at [VictoriaMetrics community Slack chat](https://victoriametrics.slack.com/),
@ -52,6 +53,8 @@ It is recommended to set up monitoring of these metrics via VictoriaMetrics
(see [these docs](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)), (see [these docs](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)),
vmagent (see [these docs](https://docs.victoriametrics.com/vmagent/#how-to-collect-metrics-in-prometheus-format)) or via Prometheus. vmagent (see [these docs](https://docs.victoriametrics.com/vmagent/#how-to-collect-metrics-in-prometheus-format)) or via Prometheus.
We recommend installing [Grafana dashboard for VictoriaLogs](https://grafana.com/grafana/dashboards/22084).
We recommend setting up [alerts](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-vlogs.yml) We recommend setting up [alerts](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-vlogs.yml)
via [vmalert](https://docs.victoriametrics.com/vmalert/) or via Prometheus. via [vmalert](https://docs.victoriametrics.com/vmalert/) or via Prometheus.
@ -194,7 +197,7 @@ Therefore, for a complete data **backup**, you need to run the `rsync` command *
```sh ```sh
# example of rsync to remote host # example of rsync to remote host
rsync -avh --progress <path-to-victorialogs-data> <username>@<host>:<path-to-victorialogs-backup> rsync -avh --progress --delete <path-to-victorialogs-data> <username>@<host>:<path-to-victorialogs-backup>
``` ```
The first `rsync` will sync the majority of the data, which can be time-consuming. The first `rsync` will sync the majority of the data, which can be time-consuming.
@ -224,7 +227,7 @@ VictoriaLogs will automatically load this data upon startup.
```sh ```sh
# example of rsync from remote backup to local # example of rsync from remote backup to local
rsync -avh --progress <username>@<host>:<path-to-victorialogs-backup> <path-to-victorialogs-data> rsync -avh --progress --delete <username>@<host>:<path-to-victorialogs-backup> <path-to-victorialogs-data>
``` ```
It is also possible to use **the disk snapshot** in order to perform a backup. This feature could be provided by your operating system, It is also possible to use **the disk snapshot** in order to perform a backup. This feature could be provided by your operating system,
@ -242,6 +245,13 @@ VictoriaLogs has very low overhead for per-tenant management, so it is OK to hav
VictoriaLogs doesn't perform per-tenant authorization. Use [vmauth](https://docs.victoriametrics.com/vmauth/) or similar tools for per-tenant authorization. VictoriaLogs doesn't perform per-tenant authorization. Use [vmauth](https://docs.victoriametrics.com/vmauth/) or similar tools for per-tenant authorization.
## Security
It is expected that VictoriaLogs runs in a protected environment, which is unreachable from the Internet without proper authorization.
It is recommended providing access to VictoriaLogs [data ingestion APIs](https://docs.victoriametrics.com/victorialogs/data-ingestion/)
and [querying APIs](https://docs.victoriametrics.com/victorialogs/querying/#http-api) via [vmauth](https://docs.victoriametrics.com/vmauth/)
or similar authorization proxies.
## Benchmarks ## Benchmarks
Here is a [benchmark suite](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/logs-benchmark) for comparing data ingestion performance Here is a [benchmark suite](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/logs-benchmark) for comparing data ingestion performance
@ -421,6 +431,22 @@ Pass `-help` to VictoriaLogs in order to see the list of supported command-line
Compression method for syslog messages received at the corresponding -syslog.listenAddr.udp. Supported values: none, gzip, deflate. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#compression Compression method for syslog messages received at the corresponding -syslog.listenAddr.udp. Supported values: none, gzip, deflate. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#compression
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces. Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.extraFields.tcp array
Fields to add to logs ingested via the corresponding -syslog.listenAddr.tcp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields
Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.extraFields.udp array
Fields to add to logs ingested via the corresponding -syslog.listenAddr.udp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields
Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.ignoreFields.tcp array
Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.tcp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields
Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.ignoreFields.udp array
Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.udp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields
Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.listenAddr.tcp array -syslog.listenAddr.tcp array
Comma-separated list of TCP addresses to listen to for Syslog messages. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/ Comma-separated list of TCP addresses to listen to for Syslog messages. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
@ -429,12 +455,20 @@ Pass `-help` to VictoriaLogs in order to see the list of supported command-line
Comma-separated list of UDP address to listen to for Syslog messages. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/ Comma-separated list of UDP address to listen to for Syslog messages. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces. Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.streamFields.tcp array
Fields to use as log stream labels for logs ingested via the corresponding -syslog.listenAddr.tcp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields
Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.streamFields.udp array
Fields to use as log stream labels for logs ingested via the corresponding -syslog.listenAddr.udp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields
Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.tenantID.tcp array -syslog.tenantID.tcp array
TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/ TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#multitenancy
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces. Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.tenantID.udp array -syslog.tenantID.udp array
TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/ TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#multitenancy
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces. Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-syslog.timezone string -syslog.timezone string

View file

@ -20,11 +20,7 @@ See [these docs](https://docs.victoriametrics.com/victorialogs/) for details.
The following functionality is planned in the future versions of VictoriaLogs: The following functionality is planned in the future versions of VictoriaLogs:
- Support for [data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/) from popular log collectors and formats:
- [ ] [Datadog protocol for logs](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6632)
- [ ] Integration with Grafana. Partially done, check the [documentation](https://docs.victoriametrics.com/victorialogs/victorialogs-datasource/) and [datasource repository](https://github.com/VictoriaMetrics/victorialogs-datasource).
- [ ] Ability to make instant snapshots and backups in the way [similar to VictoriaMetrics](https://docs.victoriametrics.com/#how-to-work-with-snapshots). - [ ] Ability to make instant snapshots and backups in the way [similar to VictoriaMetrics](https://docs.victoriametrics.com/#how-to-work-with-snapshots).
- [ ] Cluster version of VictoriaLogs. - [ ] Cluster version of VictoriaLogs.
- [ ] Ability to store data to object storage (such as S3, GCS, Minio). - [ ] Ability to store data to object storage (such as S3, GCS, Minio).
- [x] [Alerting on LogsQL queries](https://docs.victoriametrics.com/victorialogs/vmalert/).
- [ ] Data migration tool from Grafana Loki to VictoriaLogs (similar to [vmctl](https://docs.victoriametrics.com/vmctl/)). - [ ] Data migration tool from Grafana Loki to VictoriaLogs (similar to [vmctl](https://docs.victoriametrics.com/vmctl/)).

View file

@ -0,0 +1,52 @@
---
weight: 5
title: DataDog Agent setup
disableToc: true
menu:
docs:
parent: "victorialogs-data-ingestion"
weight: 5
url: /victorialogs/data-ingestion/datadog-agent/
aliases:
- /VictoriaLogs/data-ingestion/DataDogAgent.html
---
Datadog Agent doesn't support custom path prefix, so for this reason it's required to use [VMAuth](https://docs.victoriametrics.com/vmauth/) or any other
reverse proxy to append `/insert/datadog` path prefix to all Datadog API logs requests.
In case of [VMAuth](https://docs.victoriametrics.com/vmauth/) your config should look like:
```yaml
unauthorized_user:
url_map:
- src_paths:
- "/api/v2/logs"
url_prefix: "`<victoria-logs-base-url>`/insert/datadog/"
```
To start ingesting logs from DataDog agent please specify a custom URL instead of default one for sending collected logs to [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/):
```yaml
logs_enabled: true
logs_config:
logs_dd_url: `<vmauth-base-url>`
use_http: true
```
While using [Serverless DataDog plugin](https://github.com/DataDog/serverless-plugin-datadog) please set VictoriaLogs endpoint using `LOGS_DD_URL` environment variable:
```yaml
custom:
datadog:
apiKey: fakekey # Set any key, otherwise plugin fails
provider:
environment:
LOGS_DD_URL: `<vmauth-base-url>`/ # VictoriaLogs endpoint for DataDog
```
Substitute the `<vmauth-base-url>` address with the real address of VMAuth proxy.
See also:
- [Data ingestion troubleshooting](https://docs.victoriametrics.com/victorialogs/data-ingestion/#troubleshooting).
- [How to query VictoriaLogs](https://docs.victoriametrics.com/victorialogs/querying/).
- [Docker-compose demo for Datadog integration with VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/datadog-agent).

View file

@ -11,15 +11,16 @@ aliases:
- /victorialogs/data-ingestion/Promtail.html - /victorialogs/data-ingestion/Promtail.html
- /victorialogs/data-ingestion/promtail.html - /victorialogs/data-ingestion/promtail.html
--- ---
[Promtail](https://grafana.com/docs/loki/latest/clients/promtail/) is a default log shipper for Grafana Loki. [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/), [Grafana Agent](https://grafana.com/docs/agent/latest/)
Promtail can be configured to send the collected logs to VictoriaLogs according to the following docs. and [Grafana Alloy](https://grafana.com/docs/alloy/latest/) are default log collectors for Grafana Loki.
They can be configured to send the collected logs to VictoriaLogs according to the following docs.
Specify [`clients`](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#clients) section in the configuration file Specify [`clients`](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#clients) section in the configuration file
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/): for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
```yaml ```yaml
clients: clients:
- url: http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app - url: "http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app"
``` ```
Substitute `localhost:9428` address inside `clients` with the real TCP address of VictoriaLogs. Substitute `localhost:9428` address inside `clients` with the real TCP address of VictoriaLogs.
@ -40,7 +41,7 @@ and inspecting VictoriaLogs logs then:
```yaml ```yaml
clients: clients:
- url: http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app&debug=1 - url: "http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app&debug=1"
``` ```
If some [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) must be skipped If some [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) must be skipped
@ -49,7 +50,7 @@ For example, the following config instructs VictoriaLogs to ignore `filename` an
```yaml ```yaml
clients: clients:
- url: http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app&ignore_fields=filename,stream - url: 'http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app&ignore_fields=filename,stream'
``` ```
By default the ingested logs are stored in the `(AccountID=0, ProjectID=0)` [tenant](https://docs.victoriametrics.com/victorialogs/#multitenancy). By default the ingested logs are stored in the `(AccountID=0, ProjectID=0)` [tenant](https://docs.victoriametrics.com/victorialogs/#multitenancy).
@ -60,7 +61,7 @@ For example, the following config instructs VictoriaLogs to store logs in the `(
```yaml ```yaml
clients: clients:
- url: http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app&debug=1 - url: "http://localhost:9428/insert/loki/api/v1/push?_stream_fields=instance,job,host,app&debug=1"
tenant_id: "12:34" tenant_id: "12:34"
``` ```

View file

@ -6,10 +6,11 @@
- Fluentd - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentd/). - Fluentd - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentd/).
- Logstash - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/). - Logstash - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/).
- Vector - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/). - Vector - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/).
- Promtail (aka Grafana Loki) - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/). - Promtail (aka Grafana Loki, Grafana Agent or Grafana Alloy) - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/).
- Telegraf - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/). - Telegraf - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/).
- OpenTelemetry Collector - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/opentelemetry/). - OpenTelemetry Collector - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/opentelemetry/).
- Journald - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/). - Journald - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/).
- DataDog - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/datadog-agent/).
The ingested logs can be queried according to [these docs](https://docs.victoriametrics.com/victorialogs/querying/). The ingested logs can be queried according to [these docs](https://docs.victoriametrics.com/victorialogs/querying/).
@ -189,66 +190,78 @@ HTTP query string parameters have priority over HTTP Headers.
#### HTTP Query string parameters #### HTTP Query string parameters
List of supported [Query string](https://en.wikipedia.org/wiki/Query_string) parameters: All the [HTTP-based data ingestion protocols](#http-apis) support the following [HTTP query string](https://en.wikipedia.org/wiki/Query_string) args:
- `_msg_field` - it must contain the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) - `_msg_field` - the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) generated by the log shipper. containing [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field).
This is usually the `message` field for Filebeat and Logstash. This is usually the `message` field for Filebeat and Logstash.
The `_msg_field` arg may contain comma-separated list of field names. In this case the first non-empty field from the list The `_msg_field` arg may contain comma-separated list of field names. In this case the first non-empty field from the list
is treated as [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field). is treated as [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field).
If the `_msg_field` parameter isn't set, then VictoriaLogs reads the log message from the `_msg` field. If the `_msg_field` arg isn't set, then VictoriaLogs reads the log message from the `_msg` field. If the `_msg` field is empty,
then it is set to `-defaultMsgValue` command-line flag value.
- `_time_field` - it must contain the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) - `_time_field` - the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the [log timestamp](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) generated by the log shipper. containing [log timestamp](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field).
This is usually the `@timestamp` field for Filebeat and Logstash. This is usually the `@timestamp` field for Filebeat and Logstash.
If the `_time_field` parameter isn't set, then VictoriaLogs reads the timestamp from the `_time` field.
If this field doesn't exist, then the current timestamp is used.
- `_stream_fields` - it should contain comma-separated list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names, If the `_time_field` arg isn't set, then VictoriaLogs reads the timestamp from the `_time` field. If this field doesn't exist, then the current timestamp is used.
which uniquely identify every [log stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) collected the log shipper.
If the `_stream_fields` parameter isn't set, then all the ingested logs are written to default log stream - `{}`.
- `ignore_fields` - this parameter may contain the list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names, - `_stream_fields` - comma-separated list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names,
which uniquely identify every [log stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields).
If the `_stream_fields` arg isn't set, then all the ingested logs are written to default log stream - `{}`.
- `ignore_fields` - an optional comma-separated list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names,
which must be ignored during data ingestion. which must be ignored during data ingestion.
- `debug` - if this parameter is set to `1`, then the ingested logs aren't stored in VictoriaLogs. Instead, - `extra_fields` - an optional comma-separated list [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
which must be added to all the ingested logs. The format of every `extra_fields` entry is `field_name=field_value`.
If the log entry contains fields from the `extra_fields`, then they are overwritten by the values specified in `extra_fields`.
- `debug` - if this arg is set to `1`, then the ingested logs aren't stored in VictoriaLogs. Instead,
the ingested data is logged by VictoriaLogs, so it can be investigated later. the ingested data is logged by VictoriaLogs, so it can be investigated later.
See also [HTTP headers](#http-headers). See also [HTTP headers](#http-headers).
#### HTTP headers #### HTTP headers
List of supported [HTTP Headers](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields) parameters: All the [HTTP-based data ingestion protocols](#http-apis) support the following [HTTP Headers](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields)
additionally to [HTTP query args](#http-query-string-parameters):
- `AccountID` - may contain the needed accountID of tenant to ingest data to. See [multitenancy docs](https://docs.victoriametrics.com/victorialogs/#multitenancy) for details. - `AccountID` - accountID of the tenant to ingest data to. See [multitenancy docs](https://docs.victoriametrics.com/victorialogs/#multitenancy) for details.
- `ProjectID`- may contain the projectID needed of tenant to ingest data to. See [multitenancy docs](https://docs.victoriametrics.com/victorialogs/#multitenancy) for details. - `ProjectID`- projectID of the tenant to ingest data to. See [multitenancy docs](https://docs.victoriametrics.com/victorialogs/#multitenancy) for details.
VictoriaLogs accepts optional `AccountID` and `ProjectID` headers at [data ingestion HTTP APIs](#http-apis).
- `VL-Msg-Field` - it must contain the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) - `VL-Msg-Field` - the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) generated by the log shipper. containing [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field).
This is usually the `message` field for Filebeat and Logstash. This is usually the `message` field for Filebeat and Logstash.
The `VL-Msg-Field` header may contain comma-separated list of field names. In this case the first non-empty field from the list The `VL-Msg-Field` header may contain comma-separated list of field names. In this case the first non-empty field from the list
is treated as [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field). is treated as [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field).
If the `VL-Msg-Field` header isn't set, then VictoriaLogs reads the log message from the `_msg` field. If the `VL-Msg-Field` header isn't set, then VictoriaLogs reads log message from the `_msg` field. If the `_msg` field is empty,
then it is set to `-defaultMsgValue` command-line flag value.
- `VL-Time-Field` - it must contain the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) - `VL-Time-Field` - the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the [log timestamp](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) generated by the log shipper. containing [log timestamp](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field).
This is usually the `@timestamp` field for Filebeat and Logstash. This is usually the `@timestamp` field for Filebeat and Logstash.
If the `VL-Time-Field` header isn't set, then VictoriaLogs reads the timestamp from the `_time` field.
If this field doesn't exist, then the current timestamp is used.
- `VL-Stream-Fields` - it should contain comma-separated list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names, If the `VL-Time-Field` header isn't set, then VictoriaLogs reads the timestamp from the `_time` field. If this field doesn't exist, then the current timestamp is used.
which uniquely identify every [log stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) collected the log shipper.
- `VL-Stream-Fields` - comma-separated list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names,
which uniquely identify every [log stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields).
If the `VL-Stream-Fields` header isn't set, then all the ingested logs are written to default log stream - `{}`. If the `VL-Stream-Fields` header isn't set, then all the ingested logs are written to default log stream - `{}`.
- `VL-Ignore-Fields` - this parameter may contain the list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names, - `VL-Ignore-Fields` - an optional comma-separated list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names,
which must be ignored during data ingestion. which must be ignored during data ingestion.
- `VL-Extra-Field` - an optional comma-separated list of [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
which must be added to all the ingested logs. The format of every `extra_fields` entry is `field_name=field_value`.
If the log entry contains fields from the `extra_fields`, then they are overwritten by the values specified in `extra_fields`.
- `VL-Debug` - if this parameter is set to `1`, then the ingested logs aren't stored in VictoriaLogs. Instead, - `VL-Debug` - if this parameter is set to `1`, then the ingested logs aren't stored in VictoriaLogs. Instead,
the ingested data is logged by VictoriaLogs, so it can be investigated later. the ingested data is logged by VictoriaLogs, so it can be investigated later.
@ -297,17 +310,18 @@ VictoriaLogs exposes various [metrics](https://docs.victoriametrics.com/victoria
Here is the list of log collectors and their ingestion formats supported by VictoriaLogs: Here is the list of log collectors and their ingestion formats supported by VictoriaLogs:
| How to setup the collector | Format: Elasticsearch | Format: JSON Stream | Format: Loki | Format: syslog | Format: OpenTelemetry | Format: Journald | | How to setup the collector | Format: Elasticsearch | Format: JSON Stream | Format: Loki | Format: syslog | Format: OpenTelemetry | Format: Journald | Format: DataDog |
|----------------------------|-----------------------|---------------------|--------------|----------------|-----------------------|------------------| |----------------------------|-----------------------|---------------------|--------------|----------------|-----------------------|------------------|-----------------|
| [Rsyslog](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/) | [Yes](https://www.rsyslog.com/doc/configuration/modules/omelasticsearch.html) | No | No | [Yes](https://www.rsyslog.com/doc/configuration/modules/omfwd.html) | No | No | | [Rsyslog](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/) | [Yes](https://www.rsyslog.com/doc/configuration/modules/omelasticsearch.html) | No | No | [Yes](https://www.rsyslog.com/doc/configuration/modules/omfwd.html) | No | No | No |
| [Syslog-ng](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/) | Yes, [v1](https://support.oneidentity.com/technical-documents/syslog-ng-open-source-edition/3.16/administration-guide/28#TOPIC-956489), [v2](https://support.oneidentity.com/technical-documents/doc/syslog-ng-open-source-edition/3.16/administration-guide/29#TOPIC-956494) | No | No | [Yes](https://support.oneidentity.com/technical-documents/doc/syslog-ng-open-source-edition/3.16/administration-guide/44#TOPIC-956553) | No | No | | [Syslog-ng](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/) | Yes, [v1](https://support.oneidentity.com/technical-documents/syslog-ng-open-source-edition/3.16/administration-guide/28#TOPIC-956489), [v2](https://support.oneidentity.com/technical-documents/doc/syslog-ng-open-source-edition/3.16/administration-guide/29#TOPIC-956494) | No | No | [Yes](https://support.oneidentity.com/technical-documents/doc/syslog-ng-open-source-edition/3.16/administration-guide/44#TOPIC-956553) | No | No | No |
| [Filebeat](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/) | [Yes](https://www.elastic.co/guide/en/beats/filebeat/current/elasticsearch-output.html) | No | No | No | No | No | | [Filebeat](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/) | [Yes](https://www.elastic.co/guide/en/beats/filebeat/current/elasticsearch-output.html) | No | No | No | No | No | No |
| [Fluentbit](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentbit/) | No | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/http) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/loki) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/syslog) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/opentelemetry) | No | | [Fluentbit](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentbit/) | No | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/http) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/loki) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/syslog) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/opentelemetry) | No | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/datadog) |
| [Logstash](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/) | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) | No | No | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-syslog.html) | [Yes](https://github.com/paulgrav/logstash-output-opentelemetry) | No | | [Logstash](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/) | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) | No | No | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-syslog.html) | [Yes](https://github.com/paulgrav/logstash-output-opentelemetry) | No | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-datadog.html) |
| [Vector](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/elasticsearch/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/http/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/loki/) | No | [Yes](https://vector.dev/docs/reference/configuration/sources/opentelemetry/) | No | | [Vector](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/elasticsearch/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/http/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/loki/) | No | [Yes](https://vector.dev/docs/reference/configuration/sources/opentelemetry/) | No | [Yes](https://vector.dev/docs/reference/configuration/sinks/datadog_logs/) |
| [Promtail](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/) | No | No | [Yes](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#clients) | No | No | No | | [Promtail](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/) | No | No | [Yes](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#clients) | No | No | No | No |
| [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter) | No | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/lokiexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/syslogexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) | No | | [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter) | No | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/lokiexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/syslogexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) | No | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/datadogexporter) |
| [Telegraf](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/elasticsearch) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/http) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) | [Yes](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/syslog) | Yes | No | | [Telegraf](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/elasticsearch) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/http) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) | [Yes](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/syslog) | Yes | No | No |
| [Fluentd](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentd/) | [Yes](https://github.com/uken/fluent-plugin-elasticsearch) | [Yes](https://docs.fluentd.org/output/http) | [Yes](https://grafana.com/docs/loki/latest/send-data/fluentd/) | [Yes](https://github.com/fluent-plugins-nursery/fluent-plugin-remote_syslog) | No | No | | [Fluentd](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentd/) | [Yes](https://github.com/uken/fluent-plugin-elasticsearch) | [Yes](https://docs.fluentd.org/output/http) | [Yes](https://grafana.com/docs/loki/latest/send-data/fluentd/) | [Yes](https://github.com/fluent-plugins-nursery/fluent-plugin-remote_syslog) | No | No | No |
| [Journald](https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/) | No | No | No | No | No | Yes | | [Journald](https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/) | No | No | No | No | No | Yes | No |
| [DataDog Agent](https://docs.victoriametrics.com/VictoriaLogs/data-ingestion/datadog-agent) | No | No | No | No | No | No | Yes |

Some files were not shown because too many files have changed in this diff Show more