Merge branch 'master' into vmui/logs/add-autocomplete

This commit is contained in:
Aliaksandr Valialkin 2024-09-30 14:22:23 +02:00 committed by GitHub
commit c98e4f4660
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
135 changed files with 915 additions and 355 deletions

View file

@ -6,9 +6,7 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"math"
"net/http" "net/http"
"strconv"
"strings" "strings"
"time" "time"
@ -251,19 +249,8 @@ func parseElasticsearchTimestamp(s string) (int64, error) {
return 0, nil return 0, nil
} }
if len(s) < len("YYYY-MM-DD") || s[len("YYYY")] != '-' { if len(s) < len("YYYY-MM-DD") || s[len("YYYY")] != '-' {
// Try parsing timestamp in milliseconds // Try parsing timestamp in seconds or milliseconds
n, err := strconv.ParseInt(s, 10, 64) return insertutils.ParseUnixTimestamp(s)
if err != nil {
return 0, fmt.Errorf("cannot parse timestamp in milliseconds from %q: %w", s, err)
}
if n > int64(math.MaxInt64)/1e6 {
return 0, fmt.Errorf("too big timestamp in milliseconds: %d; mustn't exceed %d", n, int64(math.MaxInt64)/1e6)
}
if n < int64(math.MinInt64)/1e6 {
return 0, fmt.Errorf("too small timestamp in milliseconds: %d; must be bigger than %d", n, int64(math.MinInt64)/1e6)
}
n *= 1e6
return n, nil
} }
if len(s) == len("YYYY-MM-DD") { if len(s) == len("YYYY-MM-DD") {
t, err := time.Parse("2006-01-02", s) t, err := time.Parse("2006-01-02", s)

View file

@ -78,15 +78,18 @@ func TestReadBulkRequest_Success(t *testing.T) {
{"create":{"_index":"filebeat-8.8.0"}} {"create":{"_index":"filebeat-8.8.0"}}
{"@timestamp":"2023-06-06 04:48:12.735+01:00","message":"baz"} {"@timestamp":"2023-06-06 04:48:12.735+01:00","message":"baz"}
{"index":{"_index":"filebeat-8.8.0"}} {"index":{"_index":"filebeat-8.8.0"}}
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"} {"message":"xyz","@timestamp":"1686026893735","x":"y"}
{"create":{"_index":"filebeat-8.8.0"}}
{"message":"qwe rty","@timestamp":"1686026893"}
` `
timeField := "@timestamp" timeField := "@timestamp"
msgField := "message" msgField := "message"
rowsExpected := 3 rowsExpected := 4
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000} timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000, 1686026893000000000}
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"} resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
{"@timestamp":"","_msg":"baz"} {"@timestamp":"","_msg":"baz"}
{"_msg":"xyz","@timestamp":"","x":"y"}` {"_msg":"xyz","@timestamp":"","x":"y"}
{"_msg":"qwe rty","@timestamp":""}`
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected) f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
} }

View file

@ -2,6 +2,8 @@ package insertutils
import ( import (
"fmt" "fmt"
"math"
"strconv"
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
@ -19,15 +21,49 @@ func ExtractTimestampRFC3339NanoFromFields(timeField string, fields []logstorage
if f.Name != timeField { if f.Name != timeField {
continue continue
} }
if f.Value == "" || f.Value == "0" { nsecs, err := parseTimestamp(f.Value)
return time.Now().UnixNano(), nil if err != nil {
} return 0, fmt.Errorf("cannot parse timestamp from field %q: %s", timeField, err)
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(f.Value)
if !ok {
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp from %s=%q", timeField, f.Value)
} }
f.Value = "" f.Value = ""
if nsecs == 0 {
nsecs = time.Now().UnixNano()
}
return nsecs, nil return nsecs, nil
} }
return time.Now().UnixNano(), nil return time.Now().UnixNano(), nil
} }
func parseTimestamp(s string) (int64, error) {
if s == "" || s == "0" {
return time.Now().UnixNano(), nil
}
if len(s) <= len("YYYY") || s[len("YYYY")] != '-' {
return ParseUnixTimestamp(s)
}
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(s)
if !ok {
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp %q", s)
}
return nsecs, nil
}
// ParseUnixTimestamp parses s as unix timestamp in either seconds or milliseconds and returns the parsed timestamp in nanoseconds.
func ParseUnixTimestamp(s string) (int64, error) {
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse unix timestamp from %q: %w", s, err)
}
if n < (1<<31) && n >= (-1<<31) {
// The timestamp is in seconds. Convert it to milliseconds
n *= 1e3
}
if n > int64(math.MaxInt64)/1e6 {
return 0, fmt.Errorf("too big timestamp in milliseconds: %d; mustn't exceed %d", n, int64(math.MaxInt64)/1e6)
}
if n < int64(math.MinInt64)/1e6 {
return 0, fmt.Errorf("too small timestamp in milliseconds: %d; must be bigger than %d", n, int64(math.MinInt64)/1e6)
}
n *= 1e6
return n, nil
}

View file

@ -27,25 +27,41 @@ func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
} }
} }
// UTC time
f("time", []logstorage.Field{ f("time", []logstorage.Field{
{Name: "foo", Value: "bar"}, {Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18T23:37:20Z"}, {Name: "time", Value: "2024-06-18T23:37:20Z"},
}, 1718753840000000000) }, 1718753840000000000)
// Time with timezone
f("time", []logstorage.Field{ f("time", []logstorage.Field{
{Name: "foo", Value: "bar"}, {Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18T23:37:20+08:00"}, {Name: "time", Value: "2024-06-18T23:37:20+08:00"},
}, 1718725040000000000) }, 1718725040000000000)
// SQL datetime format
f("time", []logstorage.Field{ f("time", []logstorage.Field{
{Name: "foo", Value: "bar"}, {Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18T23:37:20.123-05:30"}, {Name: "time", Value: "2024-06-18 23:37:20.123-05:30"},
}, 1718773640123000000) }, 1718773640123000000)
// Time with nanosecond precision
f("time", []logstorage.Field{ f("time", []logstorage.Field{
{Name: "time", Value: "2024-06-18T23:37:20.123456789-05:30"}, {Name: "time", Value: "2024-06-18T23:37:20.123456789-05:30"},
{Name: "foo", Value: "bar"}, {Name: "foo", Value: "bar"},
}, 1718773640123456789) }, 1718773640123456789)
// Unix timestamp in milliseconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640123"},
}, 1718773640123000000)
// Unix timestamp in seconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640"},
}, 1718773640000000000)
} }
func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) { func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) {

View file

@ -1,13 +1,13 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.c9cc37dd.css", "main.css": "./static/css/main.cbbca000.css",
"main.js": "./static/js/main.867f457f.js", "main.js": "./static/js/main.3d2eb957.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js", "static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md", "static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.c9cc37dd.css", "static/css/main.cbbca000.css",
"static/js/main.867f457f.js" "static/js/main.3d2eb957.js"
] ]
} }

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.867f457f.js"></script><link href="./static/css/main.c9cc37dd.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.3d2eb957.js"></script><link href="./static/css/main.cbbca000.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -76,7 +76,7 @@ absolute path to all .tpl files in root.
`Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. `+ `Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. `+
`If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`) `If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`)
externalLabels = flagutil.NewArrayString("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+ externalLabels = flagutil.NewArrayString("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
"Pass multiple -label flags in order to add multiple label sets.") "In case of conflicts, original labels are kept with prefix `exported_`.")
remoteReadIgnoreRestoreErrors = flag.Bool("remoteRead.ignoreRestoreErrors", true, "Whether to ignore errors from remote storage when restoring alerts state on startup. DEPRECATED - this flag has no effect and will be removed in the next releases.") remoteReadIgnoreRestoreErrors = flag.Bool("remoteRead.ignoreRestoreErrors", true, "Whether to ignore errors from remote storage when restoring alerts state on startup. DEPRECATED - this flag has no effect and will be removed in the next releases.")

View file

@ -4,7 +4,6 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"log" "log"
"net/http" "net/http"
"os" "os"
@ -12,6 +11,7 @@ import (
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
remote_read_integration "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test" remote_read_integration "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
@ -251,7 +251,7 @@ func deleteSeries(name, value string) (int, error) {
if err := tfs.Add([]byte(name), []byte(value), false, true); err != nil { if err := tfs.Add([]byte(name), []byte(value), false, true); err != nil {
return 0, fmt.Errorf("unexpected error in TagFilters.Add: %w", err) return 0, fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
} }
return vmstorage.DeleteSeries(nil, []*storage.TagFilters{tfs}) return vmstorage.DeleteSeries(nil, []*storage.TagFilters{tfs}, 1e3)
} }
func TestBuildMatchWithFilter_Failure(t *testing.T) { func TestBuildMatchWithFilter_Failure(t *testing.T) {

View file

@ -44,10 +44,7 @@ var (
var slowQueries = metrics.NewCounter(`vm_slow_queries_total`) var slowQueries = metrics.NewCounter(`vm_slow_queries_total`)
func getDefaultMaxConcurrentRequests() int { func getDefaultMaxConcurrentRequests() int {
n := cgroup.AvailableCPUs() n := cgroup.AvailableCPUs() * 2
if n <= 4 {
n *= 2
}
if n > 16 { if n > 16 {
// A single request can saturate all the CPU cores, so there is no sense // A single request can saturate all the CPU cores, so there is no sense
// in allowing higher number of concurrent requests - they will just contend // in allowing higher number of concurrent requests - they will just contend

View file

@ -765,7 +765,7 @@ func putSortBlocksHeap(sbh *sortBlocksHeap) {
var sbhPool sync.Pool var sbhPool sync.Pool
// DeleteSeries deletes time series matching the given tagFilterss. // DeleteSeries deletes time series matching the given search query.
func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline) (int, error) { func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline) (int, error) {
qt = qt.NewChild("delete series: %s", sq) qt = qt.NewChild("delete series: %s", sq)
defer qt.Done() defer qt.Done()
@ -774,7 +774,7 @@ func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
if err != nil { if err != nil {
return 0, err return 0, err
} }
return vmstorage.DeleteSeries(qt, tfss) return vmstorage.DeleteSeries(qt, tfss, sq.MaxMetrics)
} }
// LabelNames returns label names matching the given sq until the given deadline. // LabelNames returns label names matching the given sq until the given deadline.

View file

@ -3,7 +3,6 @@ package prometheus
import ( import (
"flag" "flag"
"fmt" "fmt"
"github.com/VictoriaMetrics/metricsql"
"math" "math"
"net/http" "net/http"
"runtime" "runtime"
@ -27,6 +26,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer" "github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metrics" "github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/metricsql"
"github.com/valyala/fastjson/fastfloat" "github.com/valyala/fastjson/fastfloat"
) )
@ -52,6 +52,7 @@ var (
maxExportSeries = flag.Int("search.maxExportSeries", 10e6, "The maximum number of time series, which can be returned from /api/v1/export* APIs. This option allows limiting memory usage") maxExportSeries = flag.Int("search.maxExportSeries", 10e6, "The maximum number of time series, which can be returned from /api/v1/export* APIs. This option allows limiting memory usage")
maxTSDBStatusSeries = flag.Int("search.maxTSDBStatusSeries", 10e6, "The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage") maxTSDBStatusSeries = flag.Int("search.maxTSDBStatusSeries", 10e6, "The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage")
maxSeriesLimit = flag.Int("search.maxSeries", 30e3, "The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage") maxSeriesLimit = flag.Int("search.maxSeries", 30e3, "The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage")
maxDeleteSeries = flag.Int("search.maxDeleteSeries", 1e6, "The maximum number of time series, which can be deleted using /api/v1/admin/tsdb/delete_series. This option allows limiting memory usage")
maxLabelsAPISeries = flag.Int("search.maxLabelsAPISeries", 1e6, "The maximum number of time series, which could be scanned when searching for the matching time series "+ maxLabelsAPISeries = flag.Int("search.maxLabelsAPISeries", 1e6, "The maximum number of time series, which could be scanned when searching for the matching time series "+
"at /api/v1/labels and /api/v1/label/.../values. This option allows limiting memory usage and CPU usage. See also -search.maxLabelsAPIDuration, "+ "at /api/v1/labels and /api/v1/label/.../values. This option allows limiting memory usage and CPU usage. See also -search.maxLabelsAPIDuration, "+
"-search.maxTagKeys, -search.maxTagValues and -search.ignoreExtraFiltersAtLabelsAPI") "-search.maxTagKeys, -search.maxTagValues and -search.ignoreExtraFiltersAtLabelsAPI")
@ -479,7 +480,7 @@ func DeleteHandler(startTime time.Time, r *http.Request) error {
if !cp.IsDefaultTimeRange() { if !cp.IsDefaultTimeRange() {
return fmt.Errorf("start=%d and end=%d args aren't supported. Remove these args from the query in order to delete all the matching metrics", cp.start, cp.end) return fmt.Errorf("start=%d and end=%d args aren't supported. Remove these args from the query in order to delete all the matching metrics", cp.start, cp.end)
} }
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, 0) sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxDeleteSeries)
deletedCount, err := netstorage.DeleteSeries(nil, sq, cp.deadline) deletedCount, err := netstorage.DeleteSeries(nil, sq, cp.deadline)
if err != nil { if err != nil {
return fmt.Errorf("cannot delete time series: %w", err) return fmt.Errorf("cannot delete time series: %w", err)

View file

@ -1,13 +1,13 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.81c6ec3a.css", "main.css": "./static/css/main.d871147a.css",
"main.js": "./static/js/main.0e69b7a9.js", "main.js": "./static/js/main.621c4b4d.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js", "static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md", "static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.81c6ec3a.css", "static/css/main.d871147a.css",
"static/js/main.0e69b7a9.js" "static/js/main.621c4b4d.js"
] ]
} }

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.0e69b7a9.js"></script><link href="./static/css/main.81c6ec3a.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.621c4b4d.js"></script><link href="./static/css/main.d871147a.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -171,9 +171,9 @@ func RegisterMetricNames(qt *querytracer.Tracer, mrs []storage.MetricRow) {
// DeleteSeries deletes series matching tfss. // DeleteSeries deletes series matching tfss.
// //
// Returns the number of deleted series. // Returns the number of deleted series.
func DeleteSeries(qt *querytracer.Tracer, tfss []*storage.TagFilters) (int, error) { func DeleteSeries(qt *querytracer.Tracer, tfss []*storage.TagFilters, maxMetrics int) (int, error) {
WG.Add(1) WG.Add(1)
n, err := Storage.DeleteSeries(qt, tfss) n, err := Storage.DeleteSeries(qt, tfss, maxMetrics)
WG.Done() WG.Done()
return n, err return n, err
} }

View file

@ -28,7 +28,7 @@ services:
restart: always restart: always
volumes: volumes:
- grafanadata:/var/lib/grafana - grafanadata:/var/lib/grafana
- ./provisioning/datasources/prometheus-datasource:/etc/grafana/provisioning/datasources - ./provisioning/datasources/prometheus-datasource/cluster.yml:/etc/grafana/provisioning/datasources/cluster.yml
- ./provisioning/dashboards:/etc/grafana/provisioning/dashboards - ./provisioning/dashboards:/etc/grafana/provisioning/dashboards
- ./../../dashboards/victoriametrics-cluster.json:/var/lib/grafana/dashboards/vm.json - ./../../dashboards/victoriametrics-cluster.json:/var/lib/grafana/dashboards/vm.json
- ./../../dashboards/vmagent.json:/var/lib/grafana/dashboards/vmagent.json - ./../../dashboards/vmagent.json:/var/lib/grafana/dashboards/vmagent.json

View file

@ -40,7 +40,7 @@ services:
# storing logs and serving read queries. # storing logs and serving read queries.
victorialogs: victorialogs:
container_name: victorialogs container_name: victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.30.1-victorialogs image: docker.io/victoriametrics/victoria-logs:v0.32.0-victorialogs
command: command:
- "--storageDataPath=/vlogs" - "--storageDataPath=/vlogs"
- "--httpListenAddr=:9428" - "--httpListenAddr=:9428"

View file

@ -53,7 +53,7 @@ services:
- 3000:3000 - 3000:3000
volumes: volumes:
- grafanadata:/var/lib/grafana - grafanadata:/var/lib/grafana
- ./provisioning/datasources/prometheus-datasource:/etc/grafana/provisioning/datasources - ./provisioning/datasources/prometheus-datasource/single.yml:/etc/grafana/provisioning/datasources/single.yml
- ./provisioning/dashboards:/etc/grafana/provisioning/dashboards - ./provisioning/dashboards:/etc/grafana/provisioning/dashboards
- ./../../dashboards/victoriametrics.json:/var/lib/grafana/dashboards/vm.json - ./../../dashboards/victoriametrics.json:/var/lib/grafana/dashboards/vm.json
- ./../../dashboards/vmagent.json:/var/lib/grafana/dashboards/vmagent.json - ./../../dashboards/vmagent.json:/var/lib/grafana/dashboards/vmagent.json

View file

@ -1,20 +1,11 @@
apiVersion: 1 apiVersion: 1
datasources: datasources:
- name: VictoriaMetrics
type: prometheus
access: proxy
url: http://victoriametrics:8428
isDefault: true
jsonData:
prometheusType: Prometheus
prometheusVersion: 2.24.0
- name: VictoriaMetrics - cluster - name: VictoriaMetrics - cluster
type: prometheus type: prometheus
access: proxy access: proxy
url: http://vmauth:8427/select/0/prometheus url: http://vmauth:8427/select/0/prometheus
isDefault: false isDefault: true
jsonData: jsonData:
prometheusType: Prometheus prometheusType: Prometheus
prometheusVersion: 2.24.0 prometheusVersion: 2.24.0

View file

@ -0,0 +1,11 @@
apiVersion: 1
datasources:
- name: VictoriaMetrics
type: prometheus
access: proxy
url: http://victoriametrics:8428
isDefault: true
jsonData:
prometheusType: Prometheus
prometheusVersion: 2.24.0

View file

@ -0,0 +1,13 @@
apiVersion: 1
datasources:
- name: VictoriaMetrics - cluster
type: victoriametrics-datasource
access: proxy
url: http://vmauth:8427/select/0/prometheus
isDefault: true
# see https://grafana.com/docs/grafana/latest/administration/provisioning/#json-data
jsonData:
# vmuiUrl will auto resolve into http://vmauth:8427 and won't work at user's machine
# so we override it
vmuiUrl: http://localhost:8427/select/0/vmui

View file

@ -0,0 +1,13 @@
apiVersion: 1
datasources:
- name: VictoriaMetrics
type: victoriametrics-datasource
access: proxy
url: http://victoriametrics:8428
isDefault: true
# see https://grafana.com/docs/grafana/latest/administration/provisioning/#json-data
jsonData:
# vmuiUrl will auto resolve into http://victoriametrics:8428 and won't work at user's machine
# so we override it
vmuiUrl: http://localhost:8428/vmui

View file

@ -1,46 +0,0 @@
apiVersion: 1
# List of data sources to insert/update depending on what's
# available in the database.
datasources:
# <string, required> Name of the VictoriaMetrics datasource
# displayed in Grafana panels and queries.
- name: VictoriaMetrics
# <string, required> Sets the data source type.
type: victoriametrics-datasource
# <string, required> Sets the access mode, either
# proxy or direct (Server or Browser in the UI).
# Some data sources are incompatible with any setting
# but proxy (Server).
access: proxy
# <string> Sets default URL of the single node version of VictoriaMetrics
url: http://victoriametrics:8428
# <string> Sets the pre-selected datasource for new panels.
# You can set only one default data source per organization.
isDefault: true
# see https://grafana.com/docs/grafana/latest/administration/provisioning/#json-data
jsonData:
# vmuiUrl will auto resolve into http://victoriametrics:8428 and won't work at user's machine
# so we override it
vmuiUrl: http://localhost:8428/vmui
# <string, required> Name of the VictoriaMetrics datasource
# displayed in Grafana panels and queries.
- name: VictoriaMetrics - cluster
# <string, required> Sets the data source type.
type: victoriametrics-datasource
# <string, required> Sets the access mode, either
# proxy or direct (Server or Browser in the UI).
# Some data sources are incompatible with any setting
# but proxy (Server).
access: proxy
# <string> Sets default URL of the cluster version of VictoriaMetrics
url: http://vmauth:8427/select/0/prometheus
# <string> Sets the pre-selected datasource for new panels.
# You can set only one default data source per organization.
isDefault: false
# see https://grafana.com/docs/grafana/latest/administration/provisioning/#json-data
jsonData:
# vmuiUrl will auto resolve into http://vmauth:8427 and won't work at user's machine
# so we override it
vmuiUrl: http://localhost:8427/select/0/vmui

View file

@ -1,7 +1,7 @@
services: services:
# meta service will be ignored by compose # meta service will be ignored by compose
.victorialogs: .victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.30.1-victorialogs image: docker.io/victoriametrics/victoria-logs:v0.32.0-victorialogs
command: command:
- -storageDataPath=/vlogs - -storageDataPath=/vlogs
- -loggerFormat=json - -loggerFormat=json

View file

@ -1,10 +1,10 @@
# Docker compose Filebeat integration with VictoriaLogs using listed below protocols: # Docker compose Filebeat integration with VictoriaLogs
The folder contains examples of [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html) integration with VictoriaLogs using protocols:
* [syslog](./syslog) * [syslog](./syslog)
* [elasticsearch](./elasticsearch) * [elasticsearch](./elasticsearch)
The folder contains the example of integration of [filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html) with Victorialogs
To spin-up environment `cd` to any of listed above directories run the following command: To spin-up environment `cd` to any of listed above directories run the following command:
``` ```
docker compose up -d docker compose up -d
@ -18,9 +18,9 @@ docker compose rm -f
The docker compose file contains the following components: The docker compose file contains the following components:
* filebeat - fileabeat is configured to collect logs from the `docker`, you can find configuration in the `filebeat.yml`. It writes data in VictoriaLogs * filebeat - logs collection agent configured to collect and write data to `victorialogs`
* VictoriaLogs - the log database, it accepts the data from `filebeat` by elastic protocol * victorialogs - logs database, receives data from `filebeat` agent
* VictoriaMetrics - collects metrics from `filebeat` via `filebeat-exporter`, `VictoriaLogs` and `VictoriaMetrics` * victoriametrics - metrics database, which collects metrics from `victorialogs` and `filebeat` for observability purposes
Querying the data Querying the data

View file

@ -1,8 +1,8 @@
include: include:
- ../compose.yml - ../compose-base.yml
services: services:
filebeat-victorialogs: filebeat:
image: docker.elastic.co/beats/filebeat:8.15.0 image: docker.elastic.co/beats/filebeat:8.15.1
restart: on-failure restart: on-failure
volumes: volumes:
- type: bind - type: bind

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: filebeat-elasticsearch name: filebeat-elasticsearch

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: filebeat-syslog name: filebeat-syslog

View file

@ -1,11 +1,11 @@
# Docker compose Fluentbit integration with VictoriaLogs using given below protocols: # Docker compose FluentBit integration with VictoriaLogs
The folder contains examples of [FluentBit](https://docs.fluentbit.io/manual) integration with VictoriaLogs using protocols:
* [loki](./loki) * [loki](./loki)
* [jsonline single node](./jsonline) * [jsonline single node](./jsonline)
* [jsonline HA setup](./jsonline-ha) * [jsonline HA setup](./jsonline-ha)
The folder contains the example of integration of [fluentbit](https://docs.fluentbit.io/manual) with Victorialogs
To spin-up environment `cd` to any of listed above directories run the following command: To spin-up environment `cd` to any of listed above directories run the following command:
``` ```
docker compose up -d docker compose up -d
@ -19,8 +19,9 @@ docker compose rm -f
The docker compose file contains the following components: The docker compose file contains the following components:
* fluentbit - fluentbit is configured to collect logs from the `docker`, you can find configuration in the `fluent-bit.conf`. It writes data in VictoriaLogs * fluentbit - logs collection agent configured to collect and write data to `victorialogs`
* VictoriaLogs - the log database, it accepts the data from `fluentbit` by json line protocol * victorialogs - logs database, receives data from `fluentbit` agent
* victoriametrics - metrics database, which collects metrics from `victorialogs` and `fluentbit` for observability purposes
Querying the data Querying the data

View file

@ -1,5 +1,5 @@
include: include:
- ../compose.yml - ../compose-base.yml
services: services:
fluentbit: fluentbit:
image: cr.fluentbit.io/fluent/fluent-bit:3.1.7 image: cr.fluentbit.io/fluent/fluent-bit:3.1.7

View file

@ -1,5 +1,5 @@
include: include:
- path: - path:
- ../compose.yml - ../compose-base.yml
- ../../compose-ha.yml - ../../compose-ha.yml
name: fluentbit-jsonline-ha name: fluentbit-jsonline-ha

View file

@ -13,11 +13,23 @@
Parser syslog-rfc3164 Parser syslog-rfc3164
Mode tcp Mode tcp
[INPUT]
name fluentbit_metrics
tag internal_metrics
scrape_interval 2
[SERVICE] [SERVICE]
Flush 1 Flush 1
Parsers_File parsers.conf Parsers_File parsers.conf
[Output] [OUTPUT]
Name prometheus_remote_write
Match internal_metrics
Host victoriametrics
Port 8428
Uri /api/v1/write
[OUTPUT]
Name http Name http
Match * Match *
host victorialogs host victorialogs
@ -29,7 +41,7 @@
header AccountID 0 header AccountID 0
header ProjectID 0 header ProjectID 0
[Output] [OUTPUT]
Name http Name http
Match * Match *
host victorialogs-2 host victorialogs-2

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: fluentbit-jsonline name: fluentbit-jsonline

View file

@ -13,11 +13,23 @@
Parser syslog-rfc3164 Parser syslog-rfc3164
Mode tcp Mode tcp
[INPUT]
name fluentbit_metrics
tag internal_metrics
scrape_interval 2
[SERVICE] [SERVICE]
Flush 1 Flush 1
Parsers_File parsers.conf Parsers_File parsers.conf
[Output] [OUTPUT]
Name prometheus_remote_write
Match internal_metrics
Host victoriametrics
Port 8428
Uri /api/v1/write
[OUTPUT]
Name http Name http
Match * Match *
host victorialogs host victorialogs

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: fluentbit-loki name: fluentbit-loki

View file

@ -13,10 +13,22 @@
Parser syslog-rfc3164 Parser syslog-rfc3164
Mode tcp Mode tcp
[INPUT]
name fluentbit_metrics
tag internal_metrics
scrape_interval 2
[SERVICE] [SERVICE]
Flush 1 Flush 1
Parsers_File parsers.conf Parsers_File parsers.conf
[OUTPUT]
Name prometheus_remote_write
Match internal_metrics
Host victoriametrics
Port 8428
Uri /api/v1/write
[OUTPUT] [OUTPUT]
name loki name loki
match * match *

View file

@ -1,8 +1,11 @@
FROM fluent/fluentd:v1.17 FROM fluent/fluentd:v1.17-debian-1
USER root USER root
RUN \ RUN \
apt update && \
apt install -y netcat-traditional && \
gem install \ gem install \
fluent-plugin-datadog \ fluent-plugin-datadog \
fluent-plugin-grafana-loki \ fluent-plugin-grafana-loki \
fluent-plugin-elasticsearch fluent-plugin-elasticsearch \
fluent-plugin-remote_syslog
USER fluent USER fluent

View file

@ -1,10 +1,12 @@
# Docker compose Fluentd integration with VictoriaLogs using given below protocols: # Docker compose Fluentd integration with VictoriaLogs
The folder contains examples of [Fluentd](https://www.fluentd.org/) integration with VictoriaLogs using protocols:
* [loki](./loki) * [loki](./loki)
* [jsonline](./jsonline) * [jsonline](./jsonline)
* [elasticsearch](./elasticsearch) * [elasticsearch](./elasticsearch)
The folder contains the example of integration of [fluentd](https://www.fluentd.org/) with Victorialogs All required plugins, that should be installed in order to support protocols listed above can be found in a [Dockerfile](./Dockerfile)
To spin-up environment `cd` to any of listed above directories run the following command: To spin-up environment `cd` to any of listed above directories run the following command:
``` ```
@ -19,8 +21,9 @@ docker compose rm -f
The docker compose file contains the following components: The docker compose file contains the following components:
* fluentd - fluentd is configured to collect logs from the `docker`, you can find configuration in the `fluent-bit.conf`. It writes data in VictoriaLogs * fluentd - logs collection agent configured to collect and write data to `victorialogs`
* VictoriaLogs - the log database, it accepts the data from `fluentd` by json line protocol * victorialogs - logs database, receives data from `fluentd` agent
* victoriametrics - metrics database, which collects metrics from `victorialogs` and `fluentd` for observability purposes
Querying the data Querying the data

View file

@ -0,0 +1,30 @@
include:
- ../compose-base.yml
services:
fluentd:
build: .
volumes:
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- ${PWD}/fluent.conf:/fluentd/etc/fluent.conf
depends_on: [victorialogs]
ports:
- "5140:5140"
- "24224:24224"
healthcheck:
test: ["CMD", "nc", "-z", "-n", "-v", "127.0.0.1", "24224"]
start_period: 3s
nginx:
image: nginx:1.27
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost"]
start_period: 3s
ports:
- "8080:80"
logging:
driver: fluentd
options:
fluentd-address: 127.0.0.1:24224
labels: com.docker.compose.service
depends_on:
fluentd:
condition: service_healthy

View file

@ -1,11 +0,0 @@
include:
- ../compose.yml
services:
fluentd:
build: .
volumes:
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- ${PWD}/fluent.conf:/fluentd/etc/fluent.conf
depends_on: [victorialogs]
ports:
- "5140:5140"

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: fluentd-elasticsearch name: fluentd-elasticsearch

View file

@ -1,13 +1,13 @@
<source> <source>
@type tail @type forward
format none port 24224
tag docker.testlog bind 0.0.0.0
path /var/lib/docker/containers/**/*.log
</source> </source>
<match **> <match **>
@type elasticsearch @type elasticsearch
host victorialogs host victorialogs
path /insert/elasticsearch path /insert/elasticsearch
custom_headers {"VL-Msg-Field": "log", "VL-Stream-Fields": "com.docker.compose.service"}
port 9428 port 9428
</match> </match>

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: fluentd-jsonline name: fluentd-jsonline

View file

@ -1,12 +1,11 @@
<source> <source>
@type tail @type forward
format none port 24224
tag docker.testlog bind 0.0.0.0
path /var/lib/docker/containers/**/*.log
</source> </source>
<match **> <match **>
@type http @type http
endpoint "http://victorialogs:9428/insert/jsonline" endpoint "http://victorialogs:9428/insert/jsonline"
headers {"VL-Msg-Field": "log", "VL-Time-Field": "time", "VL-Stream-Fields": "path"} headers {"VL-Msg-Field": "log", "VL-Stream-Fields": "com.docker.compose.service"}
</match> </match>

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: fluentbit-loki name: fluentbit-loki

View file

@ -1,10 +1,17 @@
<source> <source>
@type tail @type forward
format none port 24224
tag docker.testlog bind 0.0.0.0
path /var/lib/docker/containers/**/*.log
</source> </source>
<filter **>
@type record_transformer
enable_ruby true
<record>
service ${record['com.docker.compose.service']}
</record>
</filter>
<match **> <match **>
@type loki @type loki
url "http://victorialogs:9428/insert" url "http://victorialogs:9428/insert"
@ -12,6 +19,8 @@
flush_interval 10s flush_interval 10s
flush_at_shutdown true flush_at_shutdown true
</buffer> </buffer>
custom_headers {"VL-Msg-Field": "log", "VL-Time-Field": "time", "VL-Stream-Fields": "path"} <label>
service
</label>
buffer_chunk_limit 1m buffer_chunk_limit 1m
</match> </match>

View file

@ -0,0 +1,3 @@
include:
- ../compose-base.yml
name: fluentbit-syslog

View file

@ -0,0 +1,19 @@
<source>
@type tail
format none
tag docker.testlog
path /var/lib/docker/containers/**/*.log
</source>
<match **>
@type remote_syslog
host victorialogs
port 8094
severity debug
program fluentd
protocol tcp
<format>
@type single_value
message_key message
</format>
</match>

View file

@ -1,16 +1,13 @@
# Docker compose Logstash integration with VictoriaLogs for given below protocols: # Docker compose Logstash integration with VictoriaLogs
The folder contains examples of [Logstash](https://www.elastic.co/logstash) integration with VictoriaLogs using protocols:
* [loki](./loki) * [loki](./loki)
* [jsonline single node](./jsonline) * [jsonline single node](./jsonline)
* [jsonline HA setup](./jsonline-ha) * [jsonline HA setup](./jsonline-ha)
* [elasticsearch](./elasticsearch) * [elasticsearch](./elasticsearch)
It is required to use [OpenSearch plugin](https://github.com/opensearch-project/logstash-output-opensearch) for output configuration. All required plugins, that should be installed in order to support protocols listed above can be found in a [Dockerfile](./Dockerfile)
Plugin can be installed by using the following command:
```
bin/logstash-plugin install logstash-output-opensearch
```
OpenSearch plugin is required because elasticsearch output plugin performs various checks for Elasticsearch version and license which are not applicable for VictoriaLogs.
To spin-up environment `cd` to any of listed above directories run the following command: To spin-up environment `cd` to any of listed above directories run the following command:
``` ```
@ -25,8 +22,9 @@ docker compose rm -f
The docker compose file contains the following components: The docker compose file contains the following components:
* logstash - logstash is configured to accept `syslog` on `5140` port, you can find configuration in the `pipeline.conf`. It writes data in VictoriaLogs * logstash - logs collection agent configured to collect and write data to `victorialogs`
* VictoriaLogs - the log database, it accepts the data from `logstash` by elastic protocol * victorialogs - logs database, receives data from `logstash` agent
* victoriametrics - metrics database, which collects metrics from `victorialogs` and `logstash` for observability purposes
Querying the data Querying the data

View file

@ -1,5 +1,5 @@
include: include:
- ../compose.yml - ../compose-base.yml
services: services:
logstash: logstash:
build: build:

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: logstash-elasticsearch name: logstash-elasticsearch

View file

@ -1,5 +1,5 @@
include: include:
- path: - path:
- ../compose.yml - ../compose-base.yml
- ../../compose-ha.yml - ../../compose-ha.yml
name: logstash-jsonline-ha name: logstash-jsonline-ha

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: logstash-jsonline name: logstash-jsonline

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: logstash-loki name: logstash-loki

View file

@ -1,4 +1,6 @@
# Docker compose OpenTelemetry integration with VictoriaLogs using protocols: # Docker compose OpenTelemetry collector integration with VictoriaLogs
The folder contains examples of [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) integration with VictoriaLogs using protocols:
* [loki](./loki) * [loki](./loki)
* [otlp](./otlp) * [otlp](./otlp)
@ -6,8 +8,6 @@
* [elasticsearch single node](./elasticsearch) * [elasticsearch single node](./elasticsearch)
* [elasticsearch HA mode](./elasticsearch-ha/) * [elasticsearch HA mode](./elasticsearch-ha/)
The folder contains the example of integration of [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) with Victorialogs
To spin-up environment `cd` to any of listed above directories run the following command: To spin-up environment `cd` to any of listed above directories run the following command:
``` ```
docker compose up -d docker compose up -d
@ -21,9 +21,9 @@ docker compose rm -f
The docker compose file contains the following components: The docker compose file contains the following components:
* collector - vector is configured to collect logs from the `docker`, you can find configuration in the `config.yaml`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics. * collector - logs collection agent configured to collect and write data to `victorialogs`
* VictoriaLogs - the log database, it accepts the data from `collector` by elastic protocol * victorialogs - logs database, receives data from `collector` agent
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics` * victoriametrics - metrics database, which collects metrics from `victorialogs` and `collector` for observability purposes
Querying the data Querying the data

View file

@ -1,5 +1,5 @@
include: include:
- ../compose.yml - ../compose-base.yml
services: services:
collector: collector:
image: docker.io/otel/opentelemetry-collector-contrib:0.108.0 image: docker.io/otel/opentelemetry-collector-contrib:0.108.0

View file

@ -1,5 +1,5 @@
include: include:
- path: - path:
- ../compose.yml - ../compose-base.yml
- ../../compose-ha.yml - ../../compose-ha.yml
name: collector-elasticsearch-ha name: collector-elasticsearch-ha

View file

@ -9,6 +9,15 @@ receivers:
resource: resource:
region: us-east-1 region: us-east-1
service: service:
telemetry:
metrics:
readers:
- periodic:
interval: 5000
exporter:
otlp:
protocol: http/protobuf
endpoint: http://victoriametrics:8428/opentelemetry/api/v1/push
pipelines: pipelines:
logs: logs:
receivers: [filelog] receivers: [filelog]

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: collector-elasticsearch name: collector-elasticsearch

View file

@ -8,6 +8,15 @@ receivers:
resource: resource:
region: us-east-1 region: us-east-1
service: service:
telemetry:
metrics:
readers:
- periodic:
interval: 5000
exporter:
otlp:
protocol: http/protobuf
endpoint: http://victoriametrics:8428/opentelemetry/api/v1/push
pipelines: pipelines:
logs: logs:
receivers: [filelog] receivers: [filelog]

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: collector-loki name: collector-loki

View file

@ -7,6 +7,15 @@ receivers:
resource: resource:
region: us-east-1 region: us-east-1
service: service:
telemetry:
metrics:
readers:
- periodic:
interval: 5000
exporter:
otlp:
protocol: http/protobuf
endpoint: http://victoriametrics:8428/opentelemetry/api/v1/push
pipelines: pipelines:
logs: logs:
receivers: [filelog] receivers: [filelog]

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: collector-otlp name: collector-otlp

View file

@ -9,6 +9,15 @@ receivers:
resource: resource:
region: us-east-1 region: us-east-1
service: service:
telemetry:
metrics:
readers:
- periodic:
interval: 5000
exporter:
otlp:
protocol: http/protobuf
endpoint: http://victoriametrics:8428/opentelemetry/api/v1/push
pipelines: pipelines:
logs: logs:
receivers: [filelog] receivers: [filelog]

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: collector-syslog name: collector-syslog

View file

@ -17,6 +17,15 @@ receivers:
filelog: filelog:
include: [/tmp/logs/*.log] include: [/tmp/logs/*.log]
service: service:
telemetry:
metrics:
readers:
- periodic:
interval: 5000
exporter:
otlp:
protocol: http/protobuf
endpoint: http://victoriametrics:8428/opentelemetry/api/v1/push
pipelines: pipelines:
logs: logs:
receivers: [filelog] receivers: [filelog]

View file

@ -0,0 +1,32 @@
# Docker compose Promtail integration with VictoriaLogs
The folder contains the example of integration of [Promtail agent](https://grafana.com/docs/loki/latest/send-data/promtail/) with VictoriaLogs using protocols:
* [loki](./loki)
To spin-up environment `cd` to any of listed above directories run the following command:
```
docker compose up -d
```
To shut down the docker-compose environment run the following command:
```
docker compose down
docker compose rm -f
```
The docker compose file contains the following components:
* promtail - logs collection agent configured to collect and write data to `victorialogs`
* victorialogs - logs database, receives data from `promtail` agent
* victoriametrics - metrics database, which collects metrics from `victorialogs` and `promtail` for observability purposes
Querying the data
* [vmui](https://docs.victoriametrics.com/victorialogs/querying/#vmui) - a web UI is accessible by `http://localhost:9428/select/vmui`
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
Promtail agent configuration example can be found below:
* [loki](./loki/config.yml)
Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance.

View file

@ -1,5 +1,5 @@
include: include:
- ../compose.yml - ../compose-base.yml
services: services:
promtail: promtail:
image: grafana/promtail:3.0.1 image: grafana/promtail:3.0.1

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: promtail-elasticsearch name: promtail-elasticsearch

View file

@ -1,6 +1,6 @@
# Docker compose Telegraf integration with VictoriaLogs for docker # Docker compose Telegraf integration with VictoriaLogs
The folder contains the examples of integration of [telegraf](https://www.influxdata.com/time-series-platform/telegraf/) with VictoriaLogs using: The folder contains examples of [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) integration with VictoriaLogs using protocols:
* [elasticsearch](./elasticsearch) * [elasticsearch](./elasticsearch)
* [loki](./loki) * [loki](./loki)
@ -20,9 +20,9 @@ docker compose rm -f
The docker compose file contains the following components: The docker compose file contains the following components:
* telegraf - telegraf is configured to collect logs from the `docker`, you can find configuration in the `telegraf.conf`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics. * telegraf - logs collection agent configured to collect and write data to `victorialogs`
* VictoriaLogs - the log database, it accepts the data from `telegraf` by elastic protocol * victorialogs - logs database, receives data from `telegraf` agent
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics` * victoriametrics - metrics database, which collects metrics from `victorialogs` and `telegraf` for observability purposes
Querying the data Querying the data

View file

@ -1,5 +1,5 @@
include: include:
- ../compose.yml - ../compose-base.yml
services: services:
telegraf: telegraf:
image: bitnami/telegraf:1.31.3 image: bitnami/telegraf:1.31.3

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: telegraf-elasticsearch name: telegraf-elasticsearch

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: telegraf-jsonline name: telegraf-jsonline

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: telegraf-loki name: telegraf-loki

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: telegraf-syslog name: telegraf-syslog

View file

@ -1,12 +1,12 @@
# Docker compose Vector integration with VictoriaLogs using given below protocols: # Docker compose Vector integration with VictoriaLogs
The folder contains examples of [Vector](https://vector.dev/docs/) integration with VictoriaLogs using protocols:
* [elasticsearch](./elasticsearch) * [elasticsearch](./elasticsearch)
* [loki](./loki) * [loki](./loki)
* [jsonline single node](./jsonline) * [jsonline single node](./jsonline)
* [jsonline HA setup](./jsonline-ha) * [jsonline HA setup](./jsonline-ha)
The folder contains the example of integration of [vector](https://vector.dev/docs/) with Victorialogs
To spin-up environment `cd` to any of listed above directories run the following command: To spin-up environment `cd` to any of listed above directories run the following command:
``` ```
docker compose up -d docker compose up -d
@ -20,9 +20,9 @@ docker compose rm -f
The docker compose file contains the following components: The docker compose file contains the following components:
* vector - vector is configured to collect logs from the `docker`, you can find configuration in the `vector.yaml`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics. * vector - logs collection agent configured to collect and write data to `victorialogs`
* VictoriaLogs - the log database, it accepts the data from `vector` by DataDog protocol * victorialogs - logs database, receives data from `vector` agent
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics` * victoriametrics - metrics database, which collects metrics from `victorialogs` and `vector` for observability purposes
Querying the data Querying the data

View file

@ -1,5 +1,5 @@
include: include:
- ../compose.yml - ../compose-base.yml
services: services:
vector: vector:
image: docker.io/timberio/vector:0.40.0-distroless-static image: docker.io/timberio/vector:0.40.0-distroless-static

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: vector-elasticsearch name: vector-elasticsearch

View file

@ -1,5 +1,5 @@
include: include:
- path: - path:
- ../compose.yml - ../compose-base.yml
- ../../compose-ha.yml - ../../compose-ha.yml
name: vector-jsonline-ha name: vector-jsonline-ha

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: vector-jsonline name: vector-jsonline

View file

@ -1,3 +1,3 @@
include: include:
- ../compose.yml - ../compose-base.yml
name: vector-loki name: vector-loki

View file

@ -8,7 +8,7 @@ services:
- 3000:3000 - 3000:3000
volumes: volumes:
- grafanadata:/var/lib/grafana - grafanadata:/var/lib/grafana
- ./provisioning/datasources/victoriametrics-datasource:/etc/grafana/provisioning/datasources - ./provisioning/datasources/victoriametrics-datasource/cluster.yml:/etc/grafana/provisioning/datasources/cluster.yml
- ./provisioning/dashboards:/etc/grafana/provisioning/dashboards - ./provisioning/dashboards:/etc/grafana/provisioning/dashboards
- ./provisioning/plugins/:/var/lib/grafana/plugins - ./provisioning/plugins/:/var/lib/grafana/plugins
- ./../../dashboards/vm/victoriametrics-cluster.json:/var/lib/grafana/dashboards/vm.json - ./../../dashboards/vm/victoriametrics-cluster.json:/var/lib/grafana/dashboards/vm.json

View file

@ -8,7 +8,7 @@ services:
- 3000:3000 - 3000:3000
volumes: volumes:
- grafanadata:/var/lib/grafana - grafanadata:/var/lib/grafana
- ./provisioning/datasources/victoriametrics-datasource:/etc/grafana/provisioning/datasources - ./provisioning/datasources/victoriametrics-datasource/single.yml:/etc/grafana/provisioning/datasources/single.yml
- ./provisioning/dashboards:/etc/grafana/provisioning/dashboards - ./provisioning/dashboards:/etc/grafana/provisioning/dashboards
- ./provisioning/plugins/:/var/lib/grafana/plugins - ./provisioning/plugins/:/var/lib/grafana/plugins
- ./../../dashboards/vm/victoriametrics.json:/var/lib/grafana/dashboards/vm.json - ./../../dashboards/vm/victoriametrics.json:/var/lib/grafana/dashboards/vm.json

View file

@ -3,7 +3,7 @@ version: '3'
services: services:
# Run `make package-victoria-logs` to build victoria-logs image # Run `make package-victoria-logs` to build victoria-logs image
vlogs: vlogs:
image: docker.io/victoriametrics/victoria-logs:v0.30.1-victorialogs image: docker.io/victoriametrics/victoria-logs:v0.32.0-victorialogs
volumes: volumes:
- vlogs:/vlogs - vlogs:/vlogs
ports: ports:

View file

@ -1184,7 +1184,10 @@ In this case [forced merge](#forced-merge) may help freeing up storage space.
It is recommended verifying which metrics will be deleted with the call to `http://<victoria-metrics-addr>:8428/api/v1/series?match[]=<timeseries_selector_for_delete>` It is recommended verifying which metrics will be deleted with the call to `http://<victoria-metrics-addr>:8428/api/v1/series?match[]=<timeseries_selector_for_delete>`
before actually deleting the metrics. By default, this query will only scan series in the past 5 minutes, so you may need to before actually deleting the metrics. By default, this query will only scan series in the past 5 minutes, so you may need to
adjust `start` and `end` to a suitable range to achieve match hits. adjust `start` and `end` to a suitable range to achieve match hits. Also, if the
number of returned time series is rather big you will need to set
`-search.maxDeleteSeries` flag (see
[Resource usage limits](#resource-usage-limits)).
The `/api/v1/admin/tsdb/delete_series` handler may be protected with `authKey` if `-deleteAuthKey` command-line flag is set. The `/api/v1/admin/tsdb/delete_series` handler may be protected with `authKey` if `-deleteAuthKey` command-line flag is set.
Note that handler accepts any HTTP method, so sending a `GET` request to `/api/v1/admin/tsdb/delete_series` will result in deletion of time series. Note that handler accepts any HTTP method, so sending a `GET` request to `/api/v1/admin/tsdb/delete_series` will result in deletion of time series.
@ -1721,6 +1724,13 @@ By default, VictoriaMetrics is tuned for an optimal resource usage under typical
of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/faq/#what-is-high-churn-rate). of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/faq/#what-is-high-churn-rate).
In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage. In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
See also `-search.maxLabelsAPIDuration` and `-search.maxLabelsAPISeries`. See also `-search.maxLabelsAPIDuration` and `-search.maxLabelsAPISeries`.
- `-search.maxDeleteSeries` limits the number of unique time series that can be
deleted by a single
[/api/v1/admin/tsdb/delete_series](https://docs.victoriametrics.com/url-examples/#apiv1admintsdbdelete_series)
call. Deleting too many time series may require big amount of CPU and memory
and this limit guards against unplanned resource usage spikes. Also see
[How to delete time series](#how-to-delete-time-series) section to learn about
different ways of deleting series.
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels). - `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels).
This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/faq/#what-is-high-churn-rate). when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/faq/#what-is-high-churn-rate).

View file

@ -16,6 +16,23 @@ according to [these docs](https://docs.victoriametrics.com/victorialogs/quicksta
## tip ## tip
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add support for autocomplete in LogsQL queries. This feature provides suggestions for field names, field values, and pipe names. * FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add support for autocomplete in LogsQL queries. This feature provides suggestions for field names, field values, and pipe names.
* BUGFIX: do not return field values with zero matching logs from [`field_values`](https://docs.victoriametrics.com/victorialogs/logsql/#field_values-pipe), [`top`](https://docs.victoriametrics.com/victorialogs/logsql/#top-pipe) and [`uniq`](https://docs.victoriametrics.com/victorialogs/logsql/#uniq-pipe) pipes. See [this issue](https://github.com/VictoriaMetrics/victorialogs-datasource/issues/72#issuecomment-2352078483).
## [v0.32.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.32.0-victorialogs)
Released at 2024-09-29
* FEATURE: [data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/): accept Unix timestamps in seconds in the ingested logs. This simplifies integration with systems, which prefer Unix timestamps over text-based representation of time.
* FEATURE: [`sort` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#sort-pipe): allow using `order` alias instead of `sort`. For example, `_time:5s | order by (_time)` query works the same as `_time:5s | sort by (_time)`. This simplifies the to [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) transition from SQL-like query languages.
* FEATURE: [`stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe): allow using multiple identical [stats functions](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe-functions) with distinct [filters](https://docs.victoriametrics.com/victorialogs/logsql/#stats-with-additional-filters) and automatically generated result names. For example, `_time:5m | count(), count() if (error)` query works as expected now, e.g. it returns two results over the last 5 minutes: the total number of logs and the number of logs with `error` [word](https://docs.victoriametrics.com/victorialogs/logsql/#word). Previously this query couldn't be executed because the `if (...)` condition wasn't included in the automatically generate result name, so both results had the same name - `count(*)`.
* BUGFIX: properly calculate [`uniq`](https://docs.victoriametrics.com/victorialogs/logsql/#uniq-pipe) and [`top`](https://docs.victoriametrics.com/victorialogs/logsql/#top-pipe) pipes. Previously they could return invalid results in some cases.
## [v0.31.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.31.0-victorialogs)
Released at 2024-09-27
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): improved readability of staircase graphs and tooltip usability. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6545#issuecomment-2336805237). * FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): improved readability of staircase graphs and tooltip usability. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6545#issuecomment-2336805237).
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): simplify query input by adding only the label name when `ctrl`+clicking the line legend. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6545#issuecomment-2336805237). * FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): simplify query input by adding only the label name when `ctrl`+clicking the line legend. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6545#issuecomment-2336805237).
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): keep selected columns in table view on page reloads. Before, selected columns were reset on each update. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7016). * FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): keep selected columns in table view on page reloads. Before, selected columns were reset on each update. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7016).

View file

@ -2110,6 +2110,12 @@ The `by` keyword can be skipped in `sort ...` pipe. For example, the following q
_time:5m | sort (foo, bar) desc _time:5m | sort (foo, bar) desc
``` ```
The `order` alias can be used instead of `sort`, so the following query is equivalent to the previous one:
```logsql
_time:5m | order by (foo, bar) desc
```
Sorting of big number of logs can consume a lot of CPU time and memory. Sometimes it is enough to return the first `N` entries with the biggest Sorting of big number of logs can consume a lot of CPU time and memory. Sometimes it is enough to return the first `N` entries with the biggest
or the smallest values. This can be done by adding `limit N` to the end of `sort ...` pipe. or the smallest values. This can be done by adding `limit N` to the end of `sort ...` pipe.
Such a query consumes lower amounts of memory when sorting big number of logs, since it keeps in memory only `N` log entries. Such a query consumes lower amounts of memory when sorting big number of logs, since it keeps in memory only `N` log entries.

View file

@ -33,8 +33,8 @@ Just download archive for the needed Operating system and architecture, unpack i
For example, the following commands download VictoriaLogs archive for Linux/amd64, unpack and run it: For example, the following commands download VictoriaLogs archive for Linux/amd64, unpack and run it:
```sh ```sh
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v0.30.1-victorialogs/victoria-logs-linux-amd64-v0.30.1-victorialogs.tar.gz curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v0.32.0-victorialogs/victoria-logs-linux-amd64-v0.32.0-victorialogs.tar.gz
tar xzf victoria-logs-linux-amd64-v0.30.1-victorialogs.tar.gz tar xzf victoria-logs-linux-amd64-v0.32.0-victorialogs.tar.gz
./victoria-logs-prod ./victoria-logs-prod
``` ```
@ -58,7 +58,7 @@ Here is the command to run VictoriaLogs in a Docker container:
```sh ```sh
docker run --rm -it -p 9428:9428 -v ./victoria-logs-data:/victoria-logs-data \ docker run --rm -it -p 9428:9428 -v ./victoria-logs-data:/victoria-logs-data \
docker.io/victoriametrics/victoria-logs:v0.30.1-victorialogs docker.io/victoriametrics/victoria-logs:v0.32.0-victorialogs
``` ```
See also: See also:

View file

@ -22,7 +22,7 @@ The following functionality is planned in the future versions of VictoriaLogs:
- Support for [data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/) from popular log collectors and formats: - Support for [data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/) from popular log collectors and formats:
- [x] [OpenTelemetry for logs](https://docs.victoriametrics.com/victorialogs/data-ingestion/opentelemetry/) - [x] [OpenTelemetry for logs](https://docs.victoriametrics.com/victorialogs/data-ingestion/opentelemetry/)
- [ ] Fluentd - [x] [Fluentd](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentd/)
- [ ] [Journald](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4618) (systemd) - [ ] [Journald](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4618) (systemd)
- [ ] [Datadog protocol for logs](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6632) - [ ] [Datadog protocol for logs](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6632)
- [x] [Telegraf](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/) - [x] [Telegraf](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/)

View file

@ -11,9 +11,6 @@ aliases:
- /victorialogs/data-ingestion/fluentbit.html - /victorialogs/data-ingestion/fluentbit.html
- /victorialogs/data-ingestion/Fluentbit.html - /victorialogs/data-ingestion/Fluentbit.html
--- ---
# Fluentbit setup
VictoriaLogs supports given below Fluentbit outputs: VictoriaLogs supports given below Fluentbit outputs:
- [Loki](#loki) - [Loki](#loki)
- [HTTP JSON](#http) - [HTTP JSON](#http)

View file

@ -0,0 +1,109 @@
---
weight: 2
title: Fluentd setup
disableToc: true
menu:
docs:
parent: "victorialogs-data-ingestion"
weight: 2
aliases:
- /VictoriaLogs/data-ingestion/Fluentd.html
- /victorialogs/data-ingestion/fluentd.html
- /victorialogs/data-ingestion/Fluentd.html
---
VictoriaLogs supports given below Fluentd outputs:
- [Loki](#loki)
- [HTTP JSON](#http)
## Loki
Specify [loki output](https://docs.fluentd.io/manual/pipeline/outputs/loki) section in the `fluentd.conf`
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
```conf
<match **>
@type loki
url "http://localhost:9428/insert"
<buffer>
flush_interval 10s
flush_at_shutdown true
</buffer>
custom_headers {"VL-Msg-Field": "log", "VL-Time-Field": "time", "VL-Stream-Fields": "path"}
buffer_chunk_limit 1m
</match>
```
## HTTP
Specify [http output](https://docs.fluentd.io/manual/pipeline/outputs/http) section in the `fluentd.conf`
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
```fluentd
<match **>
@type http
endpoint "http://localhost:9428/insert/jsonline"
headers {"VL-Msg-Field": "log", "VL-Time-Field": "time", "VL-Stream-Fields": "path"}
</match>
```
Substitute the host (`localhost`) and port (`9428`) with the real TCP address of VictoriaLogs.
See [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters) for details on the query args specified in the `endpoint`.
It is recommended verifying whether the initial setup generates the needed [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
and uses the correct [stream fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields).
This can be done by specifying `debug` [parameter](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters) in the `endpoint`
and inspecting VictoriaLogs logs then:
```fluentd
<match **>
@type http
endpoint "http://localhost:9428/insert/jsonline&debug=1"
headers {"VL-Msg-Field": "log", "VL-Time-Field": "time", "VL-Stream-Fields": "path"}
</match>
```
If some [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) must be skipped
during data ingestion, then they can be put into `ignore_fields` [parameter](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters).
For example, the following config instructs VictoriaLogs to ignore `log.offset` and `event.original` fields in the ingested logs:
```fluentd
<match **>
@type http
endpoint "http://localhost:9428/insert/jsonline&ignore_fields=log.offset,event.original"
headers {"VL-Msg-Field": "log", "VL-Time-Field": "time", "VL-Stream-Fields": "path"}
</match>
```
If the Fluentd sends logs to VictoriaLogs in another datacenter, then it may be useful enabling data compression via `compress gzip` option.
This usually allows saving network bandwidth and costs by up to 5 times:
```fluentd
<match **>
@type http
endpoint "http://localhost:9428/insert/jsonline&ignore_fields=log.offset,event.original"
headers {"VL-Msg-Field": "log", "VL-Time-Field": "time", "VL-Stream-Fields": "path"}
compress gzip
</match>
```
By default, the ingested logs are stored in the `(AccountID=0, ProjectID=0)` [tenant](https://docs.victoriametrics.com/victorialogs/keyconcepts/#multitenancy).
If you need storing logs in other tenant, then specify the needed tenant via `header` options.
For example, the following `fluentd.conf` config instructs Fluentd to store the data to `(AccountID=12, ProjectID=34)` tenant:
```fluentd
<match **>
@type http
endpoint "http://localhost:9428/insert/jsonline"
headers {"VL-Msg-Field": "log", "VL-Time-Field": "time", "VL-Stream-Fields": "path"}
header AccountID 12
header ProjectID 23
</match>
```
See also:
- [Data ingestion troubleshooting](https://docs.victoriametrics.com/victorialogs/data-ingestion/#troubleshooting).
- [How to query VictoriaLogs](https://docs.victoriametrics.com/victorialogs/querying/).
- [Fluentd HTTP output config docs](https://docs.fluentd.org/output/http)
- [Docker-compose demo for Fluentd integration with VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/fluentd).

View file

@ -3,6 +3,7 @@
- Syslog, Rsyslog and Syslog-ng - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/). - Syslog, Rsyslog and Syslog-ng - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/).
- Filebeat - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/). - Filebeat - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/).
- Fluentbit - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentbit/). - Fluentbit - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentbit/).
- Fluentd - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentd/).
- Logstash - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/). - Logstash - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/).
- Vector - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/). - Vector - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/).
- Promtail (aka Grafana Loki) - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/). - Promtail (aka Grafana Loki) - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/).
@ -45,9 +46,14 @@ It is possible to push thousands of log lines in a single request to this API.
If the [timestamp field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) is set to `"0"`, If the [timestamp field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) is set to `"0"`,
then the current timestamp at VictoriaLogs side is used per each ingested log line. then the current timestamp at VictoriaLogs side is used per each ingested log line.
Otherwise the timestamp field must be in the [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) or [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) format. Otherwise the timestamp field must be in one of the following formats:
For example, `2023-06-20T15:32:10Z` or `2023-06-20 15:32:10.123456789+02:00`.
If timezone information is missing (for example, `2023-06-20 15:32:10`), then the time is parsed in the local timezone of the host where VictoriaLogs runs. - [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) or [RFC3339](https://www.rfc-editor.org/rfc/rfc3339).
For example, `2023-06-20T15:32:10Z` or `2023-06-20 15:32:10.123456789+02:00`.
If timezone information is missing (for example, `2023-06-20 15:32:10`),
then the time is parsed in the local timezone of the host where VictoriaLogs runs.
- Unix timestamp in seconds or in milliseconds. For example, `1686026893` (seconds) or `1686026893735` (milliseconds).
See [these docs](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) for details on fields, See [these docs](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) for details on fields,
which must be present in the ingested log messages. which must be present in the ingested log messages.
@ -95,9 +101,14 @@ It is possible to push unlimited number of log lines in a single request to this
If the [timestamp field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) is set to `"0"`, If the [timestamp field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) is set to `"0"`,
then the current timestamp at VictoriaLogs side is used per each ingested log line. then the current timestamp at VictoriaLogs side is used per each ingested log line.
Otherwise the timestamp field must be in the [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) or [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) format. Otherwise the timestamp field must be in one of the following formats:
For example, `2023-06-20T15:32:10Z` or `2023-06-20 15:32:10.123456789+02:00`.
If timezone information is missing (for example, `2023-06-20 15:32:10`), then the time is parsed in the local timezone of the host where VictoriaLogs runs. - [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) or [RFC3339](https://www.rfc-editor.org/rfc/rfc3339).
For example, `2023-06-20T15:32:10Z` or `2023-06-20 15:32:10.123456789+02:00`.
If timezone information is missing (for example, `2023-06-20 15:32:10`),
then the time is parsed in the local timezone of the host where VictoriaLogs runs.
- Unix timestamp in seconds or in milliseconds. For example, `1686026893` (seconds) or `1686026893735` (milliseconds).
See [these docs](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) for details on fields, See [these docs](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) for details on fields,
which must be present in the ingested log messages. which must be present in the ingested log messages.
@ -286,3 +297,5 @@ Here is the list of log collectors and their ingestion formats supported by Vict
| [Promtail](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/) | No | No | [Yes](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#clients) | No | No | | [Promtail](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/) | No | No | [Yes](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#clients) | No | No |
| [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter) | No | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/lokiexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/syslogexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) | | [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter) | No | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/lokiexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/syslogexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) |
| [Telegraf](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/elasticsearch) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/http) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) | [Yes](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/syslog) | Yes | | [Telegraf](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/elasticsearch) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/http) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) | [Yes](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/syslog) | Yes |
| [Fluentd](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentd/) | [Yes](https://github.com/uken/fluent-plugin-elasticsearch) | [Yes](https://docs.fluentd.org/output/http) | [Yes](https://grafana.com/docs/loki/latest/send-data/fluentd/) | [Yes](https://github.com/fluent-plugins-nursery/fluent-plugin-remote_syslog) | No |

Some files were not shown because too many files have changed in this diff Show more