mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-03-11 15:34:56 +00:00
Merge branch 'refs/heads/master' into vmui/logs/add-autocomplete
This commit is contained in:
commit
13fbeea678
108 changed files with 2863 additions and 671 deletions
2
Makefile
2
Makefile
|
@ -495,7 +495,7 @@ golangci-lint: install-golangci-lint
|
|||
golangci-lint run
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.60.1
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.60.3
|
||||
|
||||
remove-golangci-lint:
|
||||
rm -rf `which golangci-lint`
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
ARG base_image
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 9428
|
||||
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG src_binary
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./victoria-logs-prod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
|
@ -8,5 +8,5 @@ FROM $root_image
|
|||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 9428
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY victoria-logs-linux-${TARGETARCH}-prod ./victoria-logs-prod
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
ARG base_image
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8428
|
||||
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
||||
ARG src_binary
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./victoria-metrics-prod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
|
@ -8,5 +8,5 @@ FROM $root_image
|
|||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY victoria-metrics-linux-${TARGETARCH}-prod ./victoria-metrics-prod
|
||||
|
|
|
@ -2,6 +2,7 @@ package insertutils
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -38,22 +39,46 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Extract time field name from _time_field query arg
|
||||
var timeField = "_time"
|
||||
// Extract time field name from _time_field query arg or header
|
||||
timeField := "_time"
|
||||
if tf := r.FormValue("_time_field"); tf != "" {
|
||||
timeField = tf
|
||||
} else if tf = r.Header.Get("VL-Time-Field"); tf != "" {
|
||||
timeField = tf
|
||||
}
|
||||
|
||||
// Extract message field name from _msg_field query arg
|
||||
var msgField = ""
|
||||
// Extract message field name from _msg_field query arg or header
|
||||
msgField := ""
|
||||
if msgf := r.FormValue("_msg_field"); msgf != "" {
|
||||
msgField = msgf
|
||||
} else if msgf = r.Header.Get("VL-Msg-Field"); msgf != "" {
|
||||
msgField = msgf
|
||||
}
|
||||
|
||||
streamFields := httputils.GetArray(r, "_stream_fields")
|
||||
if len(streamFields) == 0 {
|
||||
if sf := r.Header.Get("VL-Stream-Fields"); len(sf) > 0 {
|
||||
streamFields = strings.Split(sf, ",")
|
||||
}
|
||||
}
|
||||
ignoreFields := httputils.GetArray(r, "ignore_fields")
|
||||
if len(ignoreFields) == 0 {
|
||||
if f := r.Header.Get("VL-Ignore-Fields"); len(f) > 0 {
|
||||
ignoreFields = strings.Split(f, ",")
|
||||
}
|
||||
}
|
||||
|
||||
debug := httputils.GetBool(r, "debug")
|
||||
if !debug {
|
||||
if dh := r.Header.Get("VL-Debug"); len(dh) > 0 {
|
||||
hv := strings.ToLower(dh)
|
||||
switch hv {
|
||||
case "", "0", "f", "false", "no":
|
||||
default:
|
||||
debug = true
|
||||
}
|
||||
}
|
||||
}
|
||||
debugRequestURI := ""
|
||||
debugRemoteAddr := ""
|
||||
if debug {
|
||||
|
@ -71,6 +96,7 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
|||
DebugRequestURI: debugRequestURI,
|
||||
DebugRemoteAddr: debugRemoteAddr,
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
|
@ -154,7 +180,7 @@ func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Fiel
|
|||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` query arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
|
@ -196,5 +222,7 @@ func (cp *CommonParams) NewLogMessageProcessor() LogMessageProcessor {
|
|||
return lmp
|
||||
}
|
||||
|
||||
var rowsDroppedTotalDebug = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
||||
var rowsDroppedTotalTooManyFields = metrics.NewCounter(`vl_rows_dropped_total{reason="too_many_fields"}`)
|
||||
var (
|
||||
rowsDroppedTotalDebug = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
||||
rowsDroppedTotalTooManyFields = metrics.NewCounter(`vl_rows_dropped_total{reason="too_many_fields"}`)
|
||||
)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/loki"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/opentelemetry"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/syslog"
|
||||
)
|
||||
|
||||
|
@ -41,6 +42,9 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
case strings.HasPrefix(path, "/loki/"):
|
||||
path = strings.TrimPrefix(path, "/loki")
|
||||
return loki.RequestHandler(path, w, r)
|
||||
case strings.HasPrefix(path, "/opentelemetry/"):
|
||||
path = strings.TrimPrefix(path, "/opentelemetry")
|
||||
return opentelemetry.RequestHandler(path, w, r)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
|
143
app/vlinsert/opentelemetry/opentelemetry.go
Normal file
143
app/vlinsert/opentelemetry/opentelemetry.go
Normal file
|
@ -0,0 +1,143 @@
|
|||
package opentelemetry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// RequestHandler processes Opentelemetry insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
// use the same path as opentelemetry collector
|
||||
// https://opentelemetry.io/docs/specs/otlp/#otlphttp-request
|
||||
case "/v1/logs":
|
||||
if r.Header.Get("Content-Type") == "application/json" {
|
||||
httpserver.Errorf(w, r, "json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
return true
|
||||
}
|
||||
handleProtobuf(r, w)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
startTime := time.Now()
|
||||
requestsProtobufTotal.Inc()
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot initialize gzip reader: %s", err)
|
||||
return
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := pushProtobufRequest(data, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse OpenTelemetry protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedProtobufTotal.Add(n)
|
||||
|
||||
// update requestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating requestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestProtobufDuration.UpdateDuration(startTime)
|
||||
}
|
||||
|
||||
var (
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="opentelemetry",format="protobuf"}`)
|
||||
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
var req pb.ExportLogsServiceRequest
|
||||
if err := req.UnmarshalProtobuf(data); err != nil {
|
||||
errorsTotal.Inc()
|
||||
return 0, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(data), err)
|
||||
}
|
||||
|
||||
var rowsIngested int
|
||||
var commonFields []logstorage.Field
|
||||
for _, rl := range req.ResourceLogs {
|
||||
attributes := rl.Resource.Attributes
|
||||
commonFields = slicesutil.SetLength(commonFields, len(attributes))
|
||||
for i, attr := range attributes {
|
||||
commonFields[i].Name = attr.Key
|
||||
commonFields[i].Value = attr.Value.FormatString()
|
||||
}
|
||||
commonFieldsLen := len(commonFields)
|
||||
for _, sc := range rl.ScopeLogs {
|
||||
var scopeIngested int
|
||||
commonFields, scopeIngested = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp)
|
||||
rowsIngested += scopeIngested
|
||||
}
|
||||
}
|
||||
|
||||
return rowsIngested, nil
|
||||
}
|
||||
|
||||
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor) ([]logstorage.Field, int) {
|
||||
fields := commonFields
|
||||
for _, lr := range sc.LogRecords {
|
||||
fields = fields[:len(commonFields)]
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: lr.Body.FormatString(),
|
||||
})
|
||||
for _, attr := range lr.Attributes {
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: attr.Key,
|
||||
Value: attr.Value.FormatString(),
|
||||
})
|
||||
}
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "severity",
|
||||
Value: lr.FormatSeverity(),
|
||||
})
|
||||
|
||||
lmp.AddRow(lr.ExtractTimestampNano(), fields)
|
||||
}
|
||||
return fields, len(sc.LogRecords)
|
||||
}
|
128
app/vlinsert/opentelemetry/opentelemetry_test.go
Normal file
128
app/vlinsert/opentelemetry/opentelemetry_test.go
Normal file
|
@ -0,0 +1,128 @@
|
|||
package opentelemetry
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
)
|
||||
|
||||
func TestPushProtoOk(t *testing.T) {
|
||||
f := func(src []pb.ResourceLogs, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
lr := pb.ExportLogsServiceRequest{
|
||||
ResourceLogs: src,
|
||||
}
|
||||
|
||||
pData := lr.MarshalProtobuf(nil)
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
n, err := pushProtobufRequest(pData, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// single line without resource attributes
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
ScopeLogs: []pb.ScopeLogs{
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
[]int64{1234},
|
||||
`{"_msg":"log-line-message","severity":"Trace"}`,
|
||||
)
|
||||
// multi-line with resource attributes
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
Resource: pb.Resource{
|
||||
Attributes: []*pb.KeyValue{
|
||||
{Key: "logger", Value: &pb.AnyValue{StringValue: ptrTo("context")}},
|
||||
{Key: "instance_id", Value: &pb.AnyValue{IntValue: ptrTo[int64](10)}},
|
||||
{Key: "node_taints", Value: &pb.AnyValue{KeyValueList: &pb.KeyValueList{
|
||||
Values: []*pb.KeyValue{
|
||||
{Key: "role", Value: &pb.AnyValue{StringValue: ptrTo("dev")}},
|
||||
{Key: "cluster_load_percent", Value: &pb.AnyValue{DoubleValue: ptrTo(0.55)}},
|
||||
},
|
||||
}}},
|
||||
},
|
||||
},
|
||||
ScopeLogs: []pb.ScopeLogs{
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1235, SeverityNumber: 21, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1236, SeverityNumber: -1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
[]int64{1234, 1235, 1236},
|
||||
`{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message","severity":"Trace"}
|
||||
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Unspecified"}
|
||||
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Unspecified"}`,
|
||||
)
|
||||
|
||||
// multi-scope with resource attributes and multi-line
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
Resource: pb.Resource{
|
||||
Attributes: []*pb.KeyValue{
|
||||
{Key: "logger", Value: &pb.AnyValue{StringValue: ptrTo("context")}},
|
||||
{Key: "instance_id", Value: &pb.AnyValue{IntValue: ptrTo[int64](10)}},
|
||||
{Key: "node_taints", Value: &pb.AnyValue{KeyValueList: &pb.KeyValueList{
|
||||
Values: []*pb.KeyValue{
|
||||
{Key: "role", Value: &pb.AnyValue{StringValue: ptrTo("dev")}},
|
||||
{Key: "cluster_load_percent", Value: &pb.AnyValue{DoubleValue: ptrTo(0.55)}},
|
||||
},
|
||||
}}},
|
||||
},
|
||||
},
|
||||
ScopeLogs: []pb.ScopeLogs{
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
{TimeUnixNano: 1235, SeverityNumber: 5, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ScopeLogs: []pb.ScopeLogs{
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{TimeUnixNano: 2345, SeverityNumber: 10, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-0-0")}},
|
||||
{TimeUnixNano: 2346, SeverityNumber: 10, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-0-1")}},
|
||||
},
|
||||
},
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{TimeUnixNano: 2347, SeverityNumber: 12, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-1-0")}},
|
||||
{ObservedTimeUnixNano: 2348, SeverityNumber: 12, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-1-1")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
[]int64{1234, 1235, 2345, 2346, 2347, 2348},
|
||||
`{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message","severity":"Trace"}
|
||||
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Debug"}
|
||||
{"_msg":"log-line-resource-scope-1-0-0","severity":"Info2"}
|
||||
{"_msg":"log-line-resource-scope-1-0-1","severity":"Info2"}
|
||||
{"_msg":"log-line-resource-scope-1-1-0","severity":"Info4"}
|
||||
{"_msg":"log-line-resource-scope-1-1-1","severity":"Info4"}`,
|
||||
)
|
||||
}
|
||||
|
||||
func ptrTo[T any](s T) *T {
|
||||
return &s
|
||||
}
|
79
app/vlinsert/opentelemetry/opentemetry_timing_test.go
Normal file
79
app/vlinsert/opentelemetry/opentemetry_timing_test.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package opentelemetry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
)
|
||||
|
||||
func BenchmarkParseProtobufRequest(b *testing.B) {
|
||||
for _, scopes := range []int{1, 2} {
|
||||
for _, rows := range []int{100, 1000} {
|
||||
for _, attributes := range []int{5, 10} {
|
||||
b.Run(fmt.Sprintf("scopes_%d/rows_%d/attributes_%d", scopes, rows, attributes), func(b *testing.B) {
|
||||
benchmarkParseProtobufRequest(b, scopes, rows, attributes)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(streams * rows))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := pushProtobufRequest(body, blp)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func getProtobufBody(scopesCount, rowsCount, attributesCount int) []byte {
|
||||
msg := "12345678910"
|
||||
|
||||
attrValues := []*pb.AnyValue{
|
||||
{StringValue: ptrTo("string-attribute")},
|
||||
{IntValue: ptrTo[int64](12345)},
|
||||
{DoubleValue: ptrTo(3.14)},
|
||||
}
|
||||
attrs := make([]*pb.KeyValue, attributesCount)
|
||||
for j := 0; j < attributesCount; j++ {
|
||||
attrs[j] = &pb.KeyValue{
|
||||
Key: fmt.Sprintf("key-%d", j),
|
||||
Value: attrValues[j%3],
|
||||
}
|
||||
}
|
||||
entries := make([]pb.LogRecord, rowsCount)
|
||||
for j := 0; j < rowsCount; j++ {
|
||||
entries[j] = pb.LogRecord{
|
||||
TimeUnixNano: 12345678910, ObservedTimeUnixNano: 12345678910, Body: pb.AnyValue{StringValue: &msg},
|
||||
}
|
||||
}
|
||||
scopes := make([]pb.ScopeLogs, scopesCount)
|
||||
|
||||
for j := 0; j < scopesCount; j++ {
|
||||
scopes[j] = pb.ScopeLogs{
|
||||
LogRecords: entries,
|
||||
}
|
||||
}
|
||||
|
||||
pr := pb.ExportLogsServiceRequest{
|
||||
ResourceLogs: []pb.ResourceLogs{
|
||||
{
|
||||
Resource: pb.Resource{
|
||||
Attributes: attrs,
|
||||
},
|
||||
ScopeLogs: scopes,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return pr.MarshalProtobuf(nil)
|
||||
}
|
|
@ -1,8 +1,8 @@
|
|||
ARG base_image
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8429
|
||||
|
||||
ENTRYPOINT ["/vmagent-prod"]
|
||||
ARG src_binary
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./vmagent-prod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
|
@ -8,5 +8,5 @@ FROM $root_image
|
|||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8429
|
||||
ENTRYPOINT ["/vmagent-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY vmagent-linux-${TARGETARCH}-prod ./vmagent-prod
|
||||
|
|
|
@ -494,7 +494,8 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnF
|
|||
tssBlock = dropAggregatedSeries(tssBlock, matchIdxs.B, *streamAggrGlobalDropInput)
|
||||
}
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
} else if deduplicatorGlobal != nil {
|
||||
}
|
||||
if deduplicatorGlobal != nil {
|
||||
deduplicatorGlobal.Push(tssBlock)
|
||||
tssBlock = tssBlock[:0]
|
||||
}
|
||||
|
@ -922,7 +923,8 @@ func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries, forceDropSa
|
|||
tss = dropAggregatedSeries(tss, matchIdxs.B, rwctx.streamAggrDropInput)
|
||||
}
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
} else if rwctx.deduplicator != nil {
|
||||
}
|
||||
if rwctx.deduplicator != nil {
|
||||
rwctx.deduplicator.Push(tss)
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -130,11 +130,10 @@ func initStreamAggrConfigGlobal() {
|
|||
sasGlobal.Store(sas)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, filePath)).Set(1)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_success_timestamp_seconds{path=%q}`, filePath)).Set(fasttime.UnixTimestamp())
|
||||
} else {
|
||||
dedupInterval := streamAggrGlobalDedupInterval.Duration()
|
||||
if dedupInterval > 0 {
|
||||
deduplicatorGlobal = streamaggr.NewDeduplicator(pushToRemoteStoragesTrackDropped, dedupInterval, *streamAggrGlobalDropInputLabels, "dedup-global")
|
||||
}
|
||||
}
|
||||
dedupInterval := streamAggrGlobalDedupInterval.Duration()
|
||||
if dedupInterval > 0 {
|
||||
deduplicatorGlobal = streamaggr.NewDeduplicator(pushToRemoteStoragesTrackDropped, dedupInterval, *streamAggrGlobalDropInputLabels, "dedup-global")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,12 +151,11 @@ func (rwctx *remoteWriteCtx) initStreamAggrConfig() {
|
|||
rwctx.streamAggrDropInput = streamAggrDropInput.GetOptionalArg(idx)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, filePath)).Set(1)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_success_timestamp_seconds{path=%q}`, filePath)).Set(fasttime.UnixTimestamp())
|
||||
} else {
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(idx)
|
||||
if dedupInterval > 0 {
|
||||
alias := fmt.Sprintf("dedup-%d", idx+1)
|
||||
rwctx.deduplicator = streamaggr.NewDeduplicator(rwctx.pushInternalTrackDropped, dedupInterval, *streamAggrDropInputLabels, alias)
|
||||
}
|
||||
}
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(idx)
|
||||
if dedupInterval > 0 {
|
||||
alias := fmt.Sprintf("dedup-%d", idx+1)
|
||||
rwctx.deduplicator = streamaggr.NewDeduplicator(rwctx.pushInternalTrackDropped, dedupInterval, *streamAggrDropInputLabels, alias)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
|
@ -8,5 +8,5 @@ FROM $root_image
|
|||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8429
|
||||
ENTRYPOINT ["/vmalert-tool-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY vmalert-tool-linux-${TARGETARCH}-prod ./vmalert-tool-prod
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
ARG base_image
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8880
|
||||
|
||||
ENTRYPOINT ["/vmalert-prod"]
|
||||
ARG src_binary
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./vmalert-prod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
|
@ -8,5 +8,5 @@ FROM $root_image
|
|||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8880
|
||||
ENTRYPOINT ["/vmalert-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY vmalert-linux-${TARGETARCH}-prod ./vmalert-prod
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
ARG base_image
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8427
|
||||
|
||||
ENTRYPOINT ["/vmauth-prod"]
|
||||
ARG src_binary
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./vmauth-prod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
|
@ -8,5 +8,5 @@ FROM $root_image
|
|||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8427
|
||||
ENTRYPOINT ["/vmauth-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY vmauth-linux-${TARGETARCH}-prod ./vmauth-prod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
ARG base_image
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
ENTRYPOINT ["/vmbackup-prod"]
|
||||
ARG src_binary
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./vmbackup-prod
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ENTRYPOINT ["/vmbackup-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY vmbackup-linux-${TARGETARCH}-prod ./vmbackup-prod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
ARG base_image
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
ENTRYPOINT ["/vmctl-prod"]
|
||||
ARG src_binary
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./vmctl-prod
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ENTRYPOINT ["/vmctl-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY vmctl-linux-${TARGETARCH}-prod ./vmctl-prod
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
ARG base_image
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
ENTRYPOINT ["/vmrestore-prod"]
|
||||
ARG src_binary
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./vmrestore-prod
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ENTRYPOINT ["/vmrestore-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG TARGETARCH=non-existing
|
||||
COPY vmrestore-linux-${TARGETARCH}-prod ./vmrestore-prod
|
||||
|
|
|
@ -4496,7 +4496,7 @@
|
|||
"type": "prometheus",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"description": "The number of rows or bytes that vminesrt internal buffer contains at the moment.",
|
||||
"description": "The number of rows or bytes that vminsert internal buffer contains at the moment.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
|
|
|
@ -4497,7 +4497,7 @@
|
|||
"type": "victoriametrics-datasource",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"description": "The number of rows or bytes that vminesrt internal buffer contains at the moment.",
|
||||
"description": "The number of rows or bytes that vminsert internal buffer contains at the moment.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
|
|
|
@ -5088,7 +5088,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "sum(rate(vm_streamaggr_matched_samples_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) without (instance, pod) > 0",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
@ -5184,7 +5184,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "sum(rate(vm_streamaggr_ignored_samples_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval]) > 0) without (instance, pod)",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
@ -5286,7 +5286,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "sum(rate(vm_streamaggr_output_samples_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) without (instance, pod) > 0",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; output={{output}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
@ -5383,7 +5383,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "increase(vm_streamaggr_flush_timeouts_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval]) > 0",
|
||||
"instant": false,
|
||||
"legendFormat": "aggregation: {{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "aggregation: {{url}} ({{job}}): {{path}}:{{position}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
},
|
||||
|
@ -5396,7 +5396,7 @@
|
|||
"expr": "increase(vm_streamaggr_dedup_flush_timeouts_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval]) > 0",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "deduplication: {{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "deduplication: {{url}} ({{job}}): {{path}}:{{position}}",
|
||||
"range": true,
|
||||
"refId": "B"
|
||||
}
|
||||
|
@ -5494,7 +5494,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum(rate(vm_streamaggr_samples_lag_seconds_bucket{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) without (instance, pod))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
@ -5591,7 +5591,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, rate(vm_streamaggr_dedup_flush_duration_seconds_bucket{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
|
|
@ -5087,7 +5087,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "sum(rate(vm_streamaggr_matched_samples_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) without (instance, pod) > 0",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
@ -5183,7 +5183,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "sum(rate(vm_streamaggr_ignored_samples_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval]) > 0) without (instance, pod)",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
@ -5285,7 +5285,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "sum(rate(vm_streamaggr_output_samples_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) without (instance, pod) > 0",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; output={{output}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
@ -5382,7 +5382,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "increase(vm_streamaggr_flush_timeouts_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval]) > 0",
|
||||
"instant": false,
|
||||
"legendFormat": "aggregation: {{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "aggregation: {{url}} ({{job}}): {{path}}:{{position}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
},
|
||||
|
@ -5395,7 +5395,7 @@
|
|||
"expr": "increase(vm_streamaggr_dedup_flush_timeouts_total{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval]) > 0",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "deduplication: {{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "deduplication: {{url}} ({{job}}): {{path}}:{{position}}",
|
||||
"range": true,
|
||||
"refId": "B"
|
||||
}
|
||||
|
@ -5493,7 +5493,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum(rate(vm_streamaggr_samples_lag_seconds_bucket{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) without (instance, pod))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
@ -5590,7 +5590,7 @@
|
|||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, rate(vm_streamaggr_dedup_flush_duration_seconds_bucket{job=~\"$job\",instance=~\"$instance\", url=~\"$url\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{url}} ({{job}}): match={{match}}; outputs={{outputs}}",
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
|
|
|
@ -27,8 +27,9 @@ groups:
|
|||
severity: warning
|
||||
annotations:
|
||||
dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=79&var-instance={{ $labels.instance }}"
|
||||
summary: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} drops the rejected by
|
||||
remote-write server data blocks. Check the logs to find the reason for rejects."
|
||||
summary: "Vmagent is dropping data blocks that are rejected by remote storage"
|
||||
description: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} drops the rejected by
|
||||
remote-write server data blocks. Check the logs to find the reason for rejects."
|
||||
|
||||
- alert: TooManyScrapeErrors
|
||||
expr: increase(vm_promscrape_scrapes_failed_total[5m]) > 0
|
||||
|
@ -37,7 +38,8 @@ groups:
|
|||
severity: warning
|
||||
annotations:
|
||||
dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=31&var-instance={{ $labels.instance }}"
|
||||
summary: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} fails to scrape targets for last 15m"
|
||||
summary: "Vmagent fails to scrape one or more targets"
|
||||
description: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} fails to scrape targets for last 15m"
|
||||
|
||||
- alert: TooManyWriteErrors
|
||||
expr: |
|
||||
|
@ -49,7 +51,8 @@ groups:
|
|||
severity: warning
|
||||
annotations:
|
||||
dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=77&var-instance={{ $labels.instance }}"
|
||||
summary: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} responds with errors to write requests for last 15m."
|
||||
summary: "Vmagent responds with too many errors on data ingestion protocols"
|
||||
description: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} responds with errors to write requests for last 15m."
|
||||
|
||||
- alert: TooManyRemoteWriteErrors
|
||||
expr: rate(vmagent_remotewrite_retries_count_total[5m]) > 0
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image==non-existing
|
||||
FROM $certs_image AS certs
|
||||
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ARG go_builder_image
|
||||
ARG go_builder_image=non-existing
|
||||
FROM $go_builder_image
|
||||
STOPSIGNAL SIGINT
|
||||
RUN apk add git gcc musl-dev make wget --no-cache && \
|
||||
|
|
|
@ -16,7 +16,7 @@ services:
|
|||
- ./../../dashboards/victoriametrics.json:/var/lib/grafana/dashboards/vm.json
|
||||
- ./../../dashboards/victorialogs.json:/var/lib/grafana/dashboards/vl.json
|
||||
environment:
|
||||
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.2.2/victorialogs-datasource-v0.2.2.zip;victorialogs-datasource"
|
||||
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.4.0/victorialogs-datasource-v0.4.0.zip;victorialogs-datasource"
|
||||
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victorialogs-datasource"
|
||||
networks:
|
||||
- vm_net
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
filebeat-vlogs:
|
||||
image: docker.elastic.co/beats/filebeat:8.8.1
|
|
@ -1,5 +1,3 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
filebeat-victorialogs:
|
||||
image: docker.elastic.co/beats/filebeat:8.8.1
|
|
@ -1,30 +0,0 @@
|
|||
[INPUT]
|
||||
name tail
|
||||
path /var/lib/docker/containers/**/*.log
|
||||
path_key path
|
||||
multiline.parser docker, cri
|
||||
Parser docker
|
||||
Docker_Mode On
|
||||
|
||||
[INPUT]
|
||||
Name syslog
|
||||
Listen 0.0.0.0
|
||||
Port 5140
|
||||
Parser syslog-rfc3164
|
||||
Mode tcp
|
||||
|
||||
[SERVICE]
|
||||
Flush 1
|
||||
Parsers_File parsers.conf
|
||||
|
||||
[Output]
|
||||
Name http
|
||||
Match *
|
||||
host victorialogs
|
||||
port 9428
|
||||
compress gzip
|
||||
uri /insert/jsonline?_stream_fields=stream,path&_msg_field=log&_time_field=date
|
||||
format json_lines
|
||||
json_date_format iso8601
|
||||
header AccountID 0
|
||||
header ProjectID 0
|
|
@ -1,8 +1,6 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
fluentbit:
|
||||
image: cr.fluentbit.io/fluent/fluent-bit:3.0.2
|
||||
image: cr.fluentbit.io/fluent/fluent-bit:3.0.7
|
||||
volumes:
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- ./fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
|
54
deployment/docker/victorialogs/fluentbit/fluent-bit.conf
Normal file
54
deployment/docker/victorialogs/fluentbit/fluent-bit.conf
Normal file
|
@ -0,0 +1,54 @@
|
|||
[INPUT]
|
||||
name tail
|
||||
path /var/lib/docker/containers/**/*.log
|
||||
path_key path
|
||||
multiline.parser docker, cri
|
||||
Parser docker
|
||||
Docker_Mode On
|
||||
|
||||
[INPUT]
|
||||
Name syslog
|
||||
Listen 0.0.0.0
|
||||
Port 5140
|
||||
Parser syslog-rfc3164
|
||||
Mode tcp
|
||||
|
||||
[SERVICE]
|
||||
Flush 1
|
||||
Parsers_File parsers.conf
|
||||
|
||||
[OUTPUT]
|
||||
Name http
|
||||
Match *
|
||||
host victorialogs
|
||||
port 9428
|
||||
compress gzip
|
||||
uri /insert/jsonline?_stream_fields=stream,path&_msg_field=log&_time_field=date
|
||||
format json_lines
|
||||
json_date_format iso8601
|
||||
header AccountID 0
|
||||
header ProjectID 0
|
||||
|
||||
[OUTPUT]
|
||||
Name es
|
||||
Match *
|
||||
host victorialogs
|
||||
port 9428
|
||||
compress gzip
|
||||
path /insert/elasticsearch
|
||||
header AccountID 0
|
||||
header ProjectID 0
|
||||
header VL-Stream-Fields path
|
||||
header VL-Msg-Field log
|
||||
header VL-Time-Field @timestamp
|
||||
|
||||
[OUTPUT]
|
||||
name loki
|
||||
match *
|
||||
host victorialogs
|
||||
uri /insert/loki/api/v1/push
|
||||
port 9428
|
||||
label_keys $path,$log,$time
|
||||
header VL-Msg-Field log
|
||||
header VL-Time-Field time
|
||||
header VL-Stream-Fields path
|
|
@ -1,3 +1,5 @@
|
|||
FROM docker.elastic.co/logstash/logstash:8.8.1
|
||||
|
||||
RUN bin/logstash-plugin install logstash-output-opensearch
|
||||
RUN bin/logstash-plugin install \
|
||||
logstash-output-opensearch \
|
||||
logstash-output-loki
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
logstash:
|
||||
build:
|
|
@ -17,4 +17,12 @@ output {
|
|||
"_time_field" => "@timestamp"
|
||||
}
|
||||
}
|
||||
}
|
||||
http {
|
||||
url => "http://victorialogs:9428/insert/jsonline?_stream_fields=host.ip,process.name&_msg_field=message&_time_field=@timestamp"
|
||||
format => "json"
|
||||
http_method => "post"
|
||||
}
|
||||
loki {
|
||||
url => "http://victorialogs:9428/insert/loki/api/v1/push?_stream_fields=host.ip,process.name&_msg_field=message&_time_field=@timestamp"
|
||||
}
|
||||
}
|
||||
|
|
1
deployment/docker/victorialogs/opentelemetry-collector/.gitignore
vendored
Normal file
1
deployment/docker/victorialogs/opentelemetry-collector/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
**/logs
|
|
@ -0,0 +1,27 @@
|
|||
# Docker compose OpenTelemetry Elasticsearch integration with VictoriaLogs for docker
|
||||
|
||||
The folder contains the example of integration of [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) with Victorialogs
|
||||
|
||||
To spin-up environment run the following command:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To shut down the docker-compose environment run the following command:
|
||||
```
|
||||
docker compose down
|
||||
docker compose rm -f
|
||||
```
|
||||
|
||||
The docker compose file contains the following components:
|
||||
|
||||
* collector - vector is configured to collect logs from the `docker`, you can find configuration in the `config.yaml`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics.
|
||||
* VictoriaLogs - the log database, it accepts the data from `collector` by elastic protocol
|
||||
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics`
|
||||
|
||||
Querying the data
|
||||
|
||||
* [vmui](https://docs.victoriametrics.com/victorialogs/querying/#vmui) - a web UI is accessible by `http://localhost:9428/select/vmui`
|
||||
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
|
||||
|
||||
Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance.
|
|
@ -0,0 +1,48 @@
|
|||
services:
|
||||
collector:
|
||||
image: docker.io/otel/opentelemetry-collector-contrib:0.102.1
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- $PWD/logs:/tmp/logs
|
||||
- $PWD/config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
depends_on:
|
||||
victorialogs:
|
||||
condition: service_healthy
|
||||
victoriametrics:
|
||||
condition: service_healthy
|
||||
|
||||
victorialogs:
|
||||
image: docker.io/victoriametrics/victoria-logs:v0.28.0-victorialogs
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vl:/vlogs
|
||||
ports:
|
||||
- '9428:9428'
|
||||
command:
|
||||
- -storageDataPath=/vlogs
|
||||
- -loggerFormat=json
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:9428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:latest
|
||||
ports:
|
||||
- '8428:8428'
|
||||
command:
|
||||
- -storageDataPath=/vmsingle
|
||||
- -promscrape.config=/promscrape.yml
|
||||
- -loggerFormat=json
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vm:/vmsingle
|
||||
- ./scrape.yml:/promscrape.yml
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
volumes:
|
||||
victorialogs-vector-docker-vl:
|
||||
victorialogs-vector-docker-vm:
|
|
@ -0,0 +1,14 @@
|
|||
exporters:
|
||||
elasticsearch:
|
||||
endpoints:
|
||||
- http://victorialogs:9428/insert/elasticsearch
|
||||
receivers:
|
||||
filelog:
|
||||
include: [/tmp/logs/*.log]
|
||||
resource:
|
||||
region: us-east-1
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
receivers: [filelog]
|
||||
exporters: [elasticsearch]
|
|
@ -0,0 +1,27 @@
|
|||
# Docker compose OpenTelemetry Loki integration with VictoriaLogs for docker
|
||||
|
||||
The folder contains the example of integration of [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) with Victorialogs
|
||||
|
||||
To spin-up environment run the following command:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To shut down the docker-compose environment run the following command:
|
||||
```
|
||||
docker compose down
|
||||
docker compose rm -f
|
||||
```
|
||||
|
||||
The docker compose file contains the following components:
|
||||
|
||||
* collector - vector is configured to collect logs from the `docker`, you can find configuration in the `config.yaml`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics.
|
||||
* VictoriaLogs - the log database, it accepts the data from `collector` by Loki protocol
|
||||
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics`
|
||||
|
||||
Querying the data
|
||||
|
||||
* [vmui](https://docs.victoriametrics.com/victorialogs/querying/#vmui) - a web UI is accessible by `http://localhost:9428/select/vmui`
|
||||
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
|
||||
|
||||
Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance.
|
|
@ -0,0 +1,48 @@
|
|||
services:
|
||||
collector:
|
||||
image: docker.io/otel/opentelemetry-collector-contrib:0.102.1
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- $PWD/logs:/tmp/logs
|
||||
- $PWD/config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
depends_on:
|
||||
victorialogs:
|
||||
condition: service_healthy
|
||||
victoriametrics:
|
||||
condition: service_healthy
|
||||
|
||||
victorialogs:
|
||||
image: docker.io/victoriametrics/victoria-logs:v0.28.0-victorialogs
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vl:/loki
|
||||
ports:
|
||||
- '9428:9428'
|
||||
command:
|
||||
- -storageDataPath=/loki
|
||||
- -loggerFormat=json
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:9428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:latest
|
||||
ports:
|
||||
- '8428:8428'
|
||||
command:
|
||||
- -storageDataPath=/vmsingle
|
||||
- -promscrape.config=/promscrape.yml
|
||||
- -loggerFormat=json
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vm:/vmsingle
|
||||
- ./scrape.yml:/promscrape.yml
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
volumes:
|
||||
victorialogs-vector-docker-vl:
|
||||
victorialogs-vector-docker-vm:
|
|
@ -0,0 +1,13 @@
|
|||
exporters:
|
||||
loki:
|
||||
endpoint: http://victorialogs:9428/insert/loki/api/v1/push
|
||||
receivers:
|
||||
filelog:
|
||||
include: [/tmp/logs/*.log]
|
||||
resource:
|
||||
region: us-east-1
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
receivers: [filelog]
|
||||
exporters: [loki]
|
|
@ -0,0 +1,11 @@
|
|||
scrape_configs:
|
||||
- job_name: "victoriametrics"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victoriametrics:8428
|
||||
- job_name: "victorialogs"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victorialogs:9428
|
|
@ -0,0 +1,27 @@
|
|||
# Docker compose OpenTelemetry OTLP integration with VictoriaLogs for docker
|
||||
|
||||
The folder contains the example of integration of [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) with Victorialogs
|
||||
|
||||
To spin-up environment run the following command:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To shut down the docker-compose environment run the following command:
|
||||
```
|
||||
docker compose down
|
||||
docker compose rm -f
|
||||
```
|
||||
|
||||
The docker compose file contains the following components:
|
||||
|
||||
* collector - vector is configured to collect logs from the `docker`, you can find configuration in the `config.yaml`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics.
|
||||
* VictoriaLogs - the log database, it accepts the data from `collector` by otlp protocol
|
||||
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics`
|
||||
|
||||
Querying the data
|
||||
|
||||
* [vmui](https://docs.victoriametrics.com/victorialogs/querying/#vmui) - a web UI is accessible by `http://localhost:9428/select/vmui`
|
||||
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
|
||||
|
||||
Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance.
|
|
@ -0,0 +1,48 @@
|
|||
services:
|
||||
collector:
|
||||
image: docker.io/otel/opentelemetry-collector-contrib:0.102.1
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- $PWD/logs:/tmp/logs
|
||||
- $PWD/config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
depends_on:
|
||||
victorialogs:
|
||||
condition: service_healthy
|
||||
victoriametrics:
|
||||
condition: service_healthy
|
||||
|
||||
victorialogs:
|
||||
image: docker.io/victoriametrics/victoria-logs:v0.29.0-victorialogs
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vl:/otlp
|
||||
ports:
|
||||
- '9428:9428'
|
||||
command:
|
||||
- -storageDataPath=/otlp
|
||||
- -loggerFormat=json
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:9428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:latest
|
||||
ports:
|
||||
- '8428:8428'
|
||||
command:
|
||||
- -storageDataPath=/vmsingle
|
||||
- -promscrape.config=/promscrape.yml
|
||||
- -loggerFormat=json
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vm:/vmsingle
|
||||
- ./scrape.yml:/promscrape.yml
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
volumes:
|
||||
victorialogs-vector-docker-vl:
|
||||
victorialogs-vector-docker-vm:
|
|
@ -0,0 +1,15 @@
|
|||
exporters:
|
||||
otlphttp:
|
||||
logs_endpoint: http://victorialogs:9428/insert/opentelemetry/v1/logs
|
||||
debug:
|
||||
verbosity: detailed
|
||||
receivers:
|
||||
filelog:
|
||||
include: [/tmp/logs/*.log]
|
||||
resource:
|
||||
region: us-east-1
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
receivers: [filelog]
|
||||
exporters: [otlphttp, debug]
|
|
@ -0,0 +1,11 @@
|
|||
scrape_configs:
|
||||
- job_name: "victoriametrics"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victoriametrics:8428
|
||||
- job_name: "victorialogs"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victorialogs:9428
|
|
@ -0,0 +1,27 @@
|
|||
# Docker compose OpenTelemetry Syslog integration with VictoriaLogs for docker
|
||||
|
||||
The folder contains the example of integration of [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) with Victorialogs
|
||||
|
||||
To spin-up environment run the following command:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To shut down the docker-compose environment run the following command:
|
||||
```
|
||||
docker compose down
|
||||
docker compose rm -f
|
||||
```
|
||||
|
||||
The docker compose file contains the following components:
|
||||
|
||||
* collector - vector is configured to collect logs from the `docker`, you can find configuration in the `config.yaml`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics.
|
||||
* VictoriaLogs - the log database, it accepts the data from `collector` by syslog protocol
|
||||
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics`
|
||||
|
||||
Querying the data
|
||||
|
||||
* [vmui](https://docs.victoriametrics.com/victorialogs/querying/#vmui) - a web UI is accessible by `http://localhost:9428/select/vmui`
|
||||
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
|
||||
|
||||
Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance.
|
|
@ -0,0 +1,49 @@
|
|||
services:
|
||||
collector:
|
||||
image: docker.io/otel/opentelemetry-collector-contrib:0.107.0
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- $PWD/logs:/tmp/logs
|
||||
- $PWD/config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
depends_on:
|
||||
victorialogs:
|
||||
condition: service_healthy
|
||||
victoriametrics:
|
||||
condition: service_healthy
|
||||
|
||||
victorialogs:
|
||||
image: docker.io/victoriametrics/victoria-logs:v0.28.0-victorialogs
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vl:/syslog
|
||||
ports:
|
||||
- '9428:9428'
|
||||
command:
|
||||
- -storageDataPath=/syslog
|
||||
- -syslog.listenAddr.tcp=:5410
|
||||
- -syslog.useLocalTimestamp.tcp
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:9428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:latest
|
||||
ports:
|
||||
- '8428:8428'
|
||||
command:
|
||||
- -storageDataPath=/vmsingle
|
||||
- -promscrape.config=/promscrape.yml
|
||||
- -loggerFormat=json
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vm:/vmsingle
|
||||
- ./scrape.yml:/promscrape.yml
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
volumes:
|
||||
victorialogs-vector-docker-vl:
|
||||
victorialogs-vector-docker-vm:
|
|
@ -0,0 +1,24 @@
|
|||
exporters:
|
||||
syslog:
|
||||
network: tcp
|
||||
endpoint: victorialogs
|
||||
port: 5410
|
||||
tls:
|
||||
insecure: true
|
||||
debug:
|
||||
verbosity: detailed
|
||||
processors:
|
||||
transform:
|
||||
log_statements:
|
||||
- context: log
|
||||
statements:
|
||||
- set(attributes["message"], body)
|
||||
receivers:
|
||||
filelog:
|
||||
include: [/tmp/logs/*.log]
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
receivers: [filelog]
|
||||
exporters: [syslog, debug]
|
||||
processors: [transform]
|
|
@ -0,0 +1,11 @@
|
|||
scrape_configs:
|
||||
- job_name: "victoriametrics"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victoriametrics:8428
|
||||
- job_name: "victorialogs"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victorialogs:9428
|
|
@ -1,5 +1,3 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
promtail:
|
||||
image: grafana/promtail:2.8.2
|
25
deployment/docker/victorialogs/telegraf-docker/README.md
Normal file
25
deployment/docker/victorialogs/telegraf-docker/README.md
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Docker compose Telegraf integration with VictoriaLogs for docker
|
||||
|
||||
The folder contains the example of integration of [telegraf](https://www.influxdata.com/time-series-platform/telegraf/) with VictoriaLogs
|
||||
|
||||
To spin-up environment run the following command:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To shut down the docker-compose environment run the following command:
|
||||
```
|
||||
docker compose down
|
||||
docker compose rm -f
|
||||
```
|
||||
|
||||
The docker compose file contains the following components:
|
||||
|
||||
* telegraf - telegraf is configured to collect logs from the `docker`, you can find configuration in the `telegraf.conf`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics.
|
||||
* VictoriaLogs - the log database, it accepts the data from `telegraf` by elastic protocol
|
||||
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics`
|
||||
|
||||
Querying the data
|
||||
|
||||
* [vmui](https://docs.victoriametrics.com/victorialogs/querying/#vmui) - a web UI is accessible by `http://localhost:9428/select/vmui`
|
||||
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
|
55
deployment/docker/victorialogs/telegraf-docker/compose.yml
Normal file
55
deployment/docker/victorialogs/telegraf-docker/compose.yml
Normal file
|
@ -0,0 +1,55 @@
|
|||
services:
|
||||
telegraf:
|
||||
image: bitnami/telegraf:1.31.0
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /var/run/docker.sock
|
||||
target: /var/run/docker.sock
|
||||
- type: bind
|
||||
source: /var/lib/docker
|
||||
target: /var/lib/docker
|
||||
- ./telegraf.conf:/etc/telegraf/telegraf.conf:ro
|
||||
command: --config /etc/telegraf/telegraf.conf
|
||||
depends_on:
|
||||
victorialogs:
|
||||
condition: service_healthy
|
||||
victoriametrics:
|
||||
condition: service_healthy
|
||||
|
||||
victorialogs:
|
||||
image: docker.io/victoriametrics/victoria-logs:v0.20.2-victorialogs
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vl:/vlogs
|
||||
ports:
|
||||
- '9428:9428'
|
||||
command:
|
||||
- -storageDataPath=/vlogs
|
||||
- -loggerFormat=json
|
||||
- -syslog.listenAddr.tcp=0.0.0.0:8094
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:9428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:latest
|
||||
ports:
|
||||
- '8428:8428'
|
||||
command:
|
||||
- -storageDataPath=/vmsingle
|
||||
- -promscrape.config=/promscrape.yml
|
||||
- -loggerFormat=json
|
||||
volumes:
|
||||
- victorialogs-vector-docker-vm:/vmsingle
|
||||
- ./scrape.yml:/promscrape.yml
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
||||
volumes:
|
||||
victorialogs-vector-docker-vl:
|
||||
victorialogs-vector-docker-vm:
|
11
deployment/docker/victorialogs/telegraf-docker/scrape.yml
Normal file
11
deployment/docker/victorialogs/telegraf-docker/scrape.yml
Normal file
|
@ -0,0 +1,11 @@
|
|||
scrape_configs:
|
||||
- job_name: "victoriametrics"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victoriametrics:8428
|
||||
- job_name: "victorialogs"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victorialogs:9428
|
70
deployment/docker/victorialogs/telegraf-docker/telegraf.conf
Normal file
70
deployment/docker/victorialogs/telegraf-docker/telegraf.conf
Normal file
|
@ -0,0 +1,70 @@
|
|||
[agent]
|
||||
interval = "10s"
|
||||
round_interval = true
|
||||
metric_batch_size = 1000
|
||||
metric_buffer_limit = 100000
|
||||
collection_jitter = "0s"
|
||||
flush_interval = "10s"
|
||||
flush_jitter = "0s"
|
||||
precision = ""
|
||||
debug = false
|
||||
quiet = false
|
||||
logtarget = "file"
|
||||
logfile = "/dev/null"
|
||||
hostname = "pop-os"
|
||||
omit_hostname = false
|
||||
|
||||
[[inputs.cpu]]
|
||||
|
||||
[[outputs.http]]
|
||||
url = "http://victorialogs:9428/insert/jsonline?_msg_field=fields.msg&_stream_fields=tags.log_source,tags.metric_type"
|
||||
data_format = "json"
|
||||
namepass = ["docker_log"]
|
||||
use_batch_format = false
|
||||
|
||||
[[outputs.loki]]
|
||||
domain = "http://victorialogs:9428"
|
||||
endpoint = "/insert/loki/api/v1/push?_msg_field=docker_log.msg&_time_field=@timestamp&_stream_fields=log_source,metric_type"
|
||||
namepass = ["docker_log"]
|
||||
gzip_request = true
|
||||
sanitize_label_names = true
|
||||
|
||||
[[outputs.syslog]]
|
||||
address = "tcp://victorialogs:8094"
|
||||
namepass = ["docker_log"]
|
||||
|
||||
[[outputs.elasticsearch]]
|
||||
urls = ["http://victorialogs:9428/insert/elasticsearch"]
|
||||
timeout = "1m"
|
||||
flush_interval = "30s"
|
||||
enable_sniffer = false
|
||||
health_check_interval = "0s"
|
||||
index_name = "device_log-%Y.%m.%d"
|
||||
manage_template = false
|
||||
template_name = "telegraf"
|
||||
overwrite_template = false
|
||||
namepass = ["docker_log"]
|
||||
[outputs.elasticsearch.headers]
|
||||
VL-Msg-Field = "docker_log.msg"
|
||||
VL-Time-Field = "@timestamp"
|
||||
VL-Stream-Fields = "tag.log_source,tag.metric_type"
|
||||
|
||||
[[outputs.http]]
|
||||
url = "http://victoriametrics:8428/api/v1/write"
|
||||
data_format = "prometheusremotewrite"
|
||||
namepass = ["cpu"]
|
||||
[outputs.http.headers]
|
||||
Content-Type = "application/x-protobuf"
|
||||
Content-Encoding = "snappy"
|
||||
X-Prometheus-Remote-Write-Version = "0.1.0"
|
||||
|
||||
[[inputs.docker_log]]
|
||||
[inputs.docker_log.tags]
|
||||
metric_type = "logs"
|
||||
log_source = "telegraf"
|
||||
|
||||
[[processors.rename]]
|
||||
namepass = ["docker_log"]
|
||||
[[processors.rename.replace]]
|
||||
field = "message"
|
||||
dest = "msg"
|
|
@ -1,75 +0,0 @@
|
|||
[api]
|
||||
enabled = true
|
||||
address = "0.0.0.0:8686"
|
||||
|
||||
|
||||
# ---------------------------------------------
|
||||
# Docker logs -> VictoriaLogs
|
||||
# ---------------------------------------------
|
||||
|
||||
[sources.docker]
|
||||
type = "docker_logs"
|
||||
|
||||
[transforms.msg_parser]
|
||||
type = "remap"
|
||||
inputs = ["docker"]
|
||||
source = '''
|
||||
.log = parse_json!(.message)
|
||||
del(.message)
|
||||
'''
|
||||
|
||||
[sinks.vlogs]
|
||||
type = "http"
|
||||
inputs = [ "msg_parser" ]
|
||||
uri = "http://victorialogs:9428/insert/jsonline?_stream_fields=source_type,host,container_name&_msg_field=log.msg&_time_field=timestamp"
|
||||
encoding.codec = "json"
|
||||
framing.method = "newline_delimited"
|
||||
compression = "gzip"
|
||||
healthcheck.enabled = false
|
||||
|
||||
[sinks.vlogs.request.headers]
|
||||
AccountID = "0"
|
||||
ProjectID = "0"
|
||||
|
||||
|
||||
# ---------------------------------------------
|
||||
# Generted demo logs -> VictoriaLogs
|
||||
# ---------------------------------------------
|
||||
|
||||
[sources.demo]
|
||||
type = "demo_logs"
|
||||
format = "apache_common"
|
||||
interval = 10
|
||||
|
||||
[sinks.vlogs_demo]
|
||||
type = "elasticsearch"
|
||||
inputs = [ "demo" ]
|
||||
endpoints = [ "http://victorialogs:9428/insert/elasticsearch/" ]
|
||||
mode = "bulk"
|
||||
api_version = "v8"
|
||||
compression = "gzip"
|
||||
healthcheck.enabled = false
|
||||
|
||||
[sinks.vlogs_demo.query]
|
||||
_msg_field = "message"
|
||||
_time_field = "timestamp"
|
||||
_stream_fields = "source_type"
|
||||
|
||||
[sinks.vlogs_demo.request.headers]
|
||||
AccountID = "0"
|
||||
ProjectID = "0"
|
||||
|
||||
|
||||
|
||||
# ---------------------------------------------
|
||||
# Vector Metrics -> VictoriaMetrics
|
||||
# ---------------------------------------------
|
||||
|
||||
[sources.vector_metrics]
|
||||
type = "internal_metrics"
|
||||
|
||||
[sinks.victoriametrics]
|
||||
type = "prometheus_remote_write"
|
||||
endpoint = "http://victoriametrics:8428/api/v1/write"
|
||||
inputs = ["vector_metrics"]
|
||||
healthcheck.enabled = false
|
|
@ -15,7 +15,7 @@ docker compose rm -f
|
|||
|
||||
The docker compose file contains the following components:
|
||||
|
||||
* vector - vector is configured to collect logs from the `docker`, you can find configuration in the `vector.toml`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics.
|
||||
* vector - vector is configured to collect logs from the `docker`, you can find configuration in the `vector.yaml`. It writes data in VictoriaLogs. It pushes metrics to VictoriaMetrics.
|
||||
* VictoriaLogs - the log database, it accepts the data from `vector` by elastic protocol
|
||||
* VictoriaMetrics - collects metrics from `VictoriaLogs` and `VictoriaMetrics`
|
||||
|
||||
|
@ -25,37 +25,39 @@ Querying the data
|
|||
* for querying the data via command-line please check [these docs](https://docs.victoriametrics.com/victorialogs/querying/#command-line)
|
||||
|
||||
|
||||
the example of vector configuration(`vector.toml`)
|
||||
the example of vector configuration(`vector.yaml`)
|
||||
|
||||
```
|
||||
[sources.docker]
|
||||
type = "docker_logs"
|
||||
|
||||
[transforms.msg_parser]
|
||||
type = "remap"
|
||||
inputs = ["docker"]
|
||||
source = '''
|
||||
.log = parse_json!(.message)
|
||||
del(.message)
|
||||
'''
|
||||
|
||||
[sinks.vlogs]
|
||||
type = "elasticsearch"
|
||||
inputs = [ "msg_parser" ]
|
||||
endpoints = [ "http://victorialogs:9428/insert/elasticsearch/" ]
|
||||
mode = "bulk"
|
||||
api_version = "v8"
|
||||
compression = "gzip"
|
||||
healthcheck.enabled = false
|
||||
|
||||
[sinks.vlogs.query]
|
||||
_msg_field = "log.msg"
|
||||
_time_field = "timestamp"
|
||||
_stream_fields = "source_type,host,container_name"
|
||||
|
||||
[sinks.vlogs.request.headers]
|
||||
AccountID = "0"
|
||||
ProjectID = "0"
|
||||
sources:
|
||||
docker:
|
||||
type: docker_logs
|
||||
transforms:
|
||||
msg_parser:
|
||||
type: remap
|
||||
inputs:
|
||||
- docker
|
||||
source: |
|
||||
.log = parse_json!(.message)
|
||||
del(.message)
|
||||
sinks:
|
||||
vlogs_es:
|
||||
type: elasticsearch
|
||||
inputs:
|
||||
- msg_parser
|
||||
endpoints:
|
||||
- http://victorialogs:9428/insert/elasticsearch/
|
||||
mode: bulk
|
||||
api_version: v8
|
||||
compression: gzip
|
||||
healthcheck.enabled: false
|
||||
query:
|
||||
_msg_field: log.msg
|
||||
_time_field: timestamp
|
||||
_stream_fields: source_type,host,container_name
|
||||
request:
|
||||
headers:
|
||||
AccountID: "0"
|
||||
ProjectID: "0"
|
||||
```
|
||||
|
||||
Please, note that `_stream_fields` parameter must follow recommended [best practices](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to achieve better performance.
|
|
@ -1,8 +1,6 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
vector:
|
||||
image: docker.io/timberio/vector:0.30.0-distroless-static
|
||||
image: docker.io/timberio/vector:0.38.0-distroless-static
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- type: bind
|
||||
|
@ -11,7 +9,7 @@ services:
|
|||
- type: bind
|
||||
source: /var/lib/docker
|
||||
target: /var/lib/docker
|
||||
- ./vector.toml:/etc/vector/vector.toml:ro
|
||||
- ./vector.yaml:/etc/vector/vector.yaml:ro
|
||||
user: root
|
||||
ports:
|
||||
- '8686:8686'
|
||||
|
@ -31,7 +29,7 @@ services:
|
|||
- -storageDataPath=/vlogs
|
||||
- -loggerFormat=json
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:9428/health"]
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:9428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
|
@ -48,7 +46,7 @@ services:
|
|||
- victorialogs-vector-docker-vm:/vmsingle
|
||||
- ./scrape.yml:/promscrape.yml
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:8428/health"]
|
||||
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8428/health"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
11
deployment/docker/victorialogs/vector/scrape.yml
Normal file
11
deployment/docker/victorialogs/vector/scrape.yml
Normal file
|
@ -0,0 +1,11 @@
|
|||
scrape_configs:
|
||||
- job_name: "victoriametrics"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victoriametrics:8428
|
||||
- job_name: "victorialogs"
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- victorialogs:9428
|
73
deployment/docker/victorialogs/vector/vector.yaml
Normal file
73
deployment/docker/victorialogs/vector/vector.yaml
Normal file
|
@ -0,0 +1,73 @@
|
|||
api:
|
||||
enabled: true
|
||||
address: 0.0.0.0:8686
|
||||
sources:
|
||||
docker:
|
||||
type: docker_logs
|
||||
demo:
|
||||
type: demo_logs
|
||||
format: json
|
||||
metrics:
|
||||
type: internal_metrics
|
||||
transforms:
|
||||
msg_parser:
|
||||
type: remap
|
||||
inputs:
|
||||
- docker
|
||||
source: |
|
||||
.log = parse_json!(.message)
|
||||
del(.message)
|
||||
sinks:
|
||||
vlogs_http:
|
||||
type: http
|
||||
inputs:
|
||||
- msg_parser
|
||||
uri: http://victorialogs:9428/insert/jsonline?_stream_fields=source_type,host,container_name&_msg_field=log.msg&_time_field=timestamp
|
||||
encoding:
|
||||
codec: json
|
||||
framing:
|
||||
method: newline_delimited
|
||||
compression: gzip
|
||||
healthcheck:
|
||||
enabled: false
|
||||
request:
|
||||
headers:
|
||||
AccountID: '0'
|
||||
ProjectID: '0'
|
||||
vlogs_loki:
|
||||
type: loki
|
||||
inputs:
|
||||
- demo
|
||||
endpoint: http://victorialogs:9428/insert/loki/
|
||||
compression: gzip
|
||||
path: /api/v1/push?_msg_field=message.message&_time_field=timestamp&_stream_fields=source
|
||||
encoding:
|
||||
codec: json
|
||||
labels:
|
||||
source: vector
|
||||
victoriametrics:
|
||||
type: prometheus_remote_write
|
||||
endpoint: http://victoriametrics:8428/api/v1/write
|
||||
inputs:
|
||||
- metrics
|
||||
healthcheck:
|
||||
enabled: false
|
||||
vlogs_es:
|
||||
type: elasticsearch
|
||||
inputs:
|
||||
- demo
|
||||
endpoints:
|
||||
- http://victorialogs:9428/insert/elasticsearch/
|
||||
mode: bulk
|
||||
api_version: v8
|
||||
compression: gzip
|
||||
healthcheck:
|
||||
enabled: false
|
||||
query:
|
||||
_msg_field: message
|
||||
_time_field: timestamp
|
||||
_stream_fields: source_type
|
||||
request:
|
||||
headers:
|
||||
AccountID: '0'
|
||||
ProjectID: '0'
|
|
@ -16,6 +16,6 @@ services:
|
|||
- ./../../dashboards/vm/vmalert.json:/var/lib/grafana/dashboards/vmalert.json
|
||||
- ./../../dashboards/vm/vmauth.json:/var/lib/grafana/dashboards/vmauth.json
|
||||
environment:
|
||||
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.8.2/victoriametrics-datasource-v0.8.2.zip;victoriametrics-datasource"
|
||||
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.9.0/victoriametrics-datasource-v0.9.0.zip;victoriametrics-datasource"
|
||||
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource"
|
||||
restart: always
|
||||
|
|
|
@ -15,7 +15,7 @@ services:
|
|||
- ./../../dashboards/vm/vmagent.json:/var/lib/grafana/dashboards/vmagent.json
|
||||
- ./../../dashboards/vm/vmalert.json:/var/lib/grafana/dashboards/vmalert.json
|
||||
environment:
|
||||
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.8.2/victoriametrics-datasource-v0.8.2.zip;victoriametrics-datasource"
|
||||
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.9.0/victoriametrics-datasource-v0.9.0.zip;victoriametrics-datasource"
|
||||
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource"
|
||||
networks:
|
||||
- vm_net
|
||||
|
|
|
@ -13,11 +13,14 @@ Every LTS line receives bugfixes and [security fixes](https://github.com/Victori
|
|||
the initial release. New LTS lines are published every 6 months, so the latest two LTS lines are supported at any given moment. This gives up to 6 months
|
||||
for the migration to new LTS lines for [VictoriaMetrics Enterprise](https://docs.victoriametrics.com/enterprise/) users.
|
||||
|
||||
LTS releases are published for [Enterprise versions of VictoriaMetrics](https://docs.victoriametrics.com/enterprise/) only.
|
||||
When a new LTS line is created, the new LTS release might be publicly available for everyone until the new major OS release will be published.
|
||||
|
||||
All the bugfixes and security fixes, which are included in LTS releases, are also available in [the latest release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest),
|
||||
so non-enterprise users are advised to regularly [upgrade](https://docs.victoriametrics.com/#how-to-upgrade-victoriametrics) VictoriaMetrics products
|
||||
to [the latest available releases](https://docs.victoriametrics.com/changelog/).
|
||||
|
||||
## Currently supported LTS release lines
|
||||
|
||||
- 1.102.x - the latest one is [v1.102.1 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.102.1)
|
||||
- 1.97.x - the latest one is [v1.97.6 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.97.6)
|
||||
- 1.102.x - the latest one is [v1.102.2 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.102.2)
|
||||
- 1.97.x - the latest one is [v1.97.7 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.97.7)
|
||||
|
|
|
@ -24,9 +24,12 @@ according to [these docs](https://docs.victoriametrics.com/victorialogs/quicksta
|
|||
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): move the Markdown toggle to the general settings panel in the upper left corner.
|
||||
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add search functionality to the column display settings in the table. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6668).
|
||||
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add the ability to select all columns in the column display settings of the table. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6668). Thanks to @yincongcyincong for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6680).
|
||||
* FEATURE: Allow to define ingestion parameters via headers. Supported headers - `VL-Msg-Field`,`VL-Stream-Fields`,`VL-Ignore-Fields`,`VL-Time-Field`, `VL-Debug`. See this [PR](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6443) for details.
|
||||
* FEATURE: [vlinsert](https://docs.victoriametrics.com/victorialogs/): added OpenTelemetry logs ingestion support. See this [PR](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6218) for details.
|
||||
|
||||
* BUGFIX: properly handle Logstash requests for Elasticsearch configuration when using `outputs.elasticsearch` in Logstash pipelines. Previously, the requests could be rejected with `400 Bad Request` response. Updates [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4750).
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix `not found index.js` error when loading vmui in VictoriaLogs. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6764). Thanks to @yincongcyincong for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6770).
|
||||
* BUGFIX: properly execute queries with `OR` [filters](https://docs.victoriametrics.com/victorialogs/logsql/#logical-filter) for distinct [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). For example, `field1:foo OR field2:bar`. Previously logs matching these filters may be skipped during querying. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6554) for details. Thanks to @yincongcyincong for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6556).
|
||||
|
||||
## [v0.28.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.28.0-victorialogs)
|
||||
|
||||
|
@ -241,10 +244,10 @@ Released at 2024-05-24
|
|||
|
||||
* FEATURE: return the number of matching log entries per returned value in [HTTP API](https://docs.victoriametrics.com/victorialogs/querying/#http-api) results. This simplifies detecting [field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) / [stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) values with the biggest number of logs for the given [LogsQL query](https://docs.victoriametrics.com/victorialogs/logsql/).
|
||||
* FEATURE: improve performance for [regexp filter](https://docs.victoriametrics.com/victorialogs/logsql/#regexp-filter) in the following cases:
|
||||
- If the regexp contains just a phrase without special regular expression chars. For example, `~"foo"`.
|
||||
- If the regexp starts with `.*` or ends with `.*`. For example, `~".*foo.*"`.
|
||||
- If the regexp contains multiple strings delimited by `|`. For example, `~"foo|bar|baz"`.
|
||||
- If the regexp contains multiple [words](https://docs.victoriametrics.com/victorialogs/logsql/#word). For example, `~"foo bar baz"`.
|
||||
* If the regexp contains just a phrase without special regular expression chars. For example, `~"foo"`.
|
||||
* If the regexp starts with `.*` or ends with `.*`. For example, `~".*foo.*"`.
|
||||
* If the regexp contains multiple strings delimited by `|`. For example, `~"foo|bar|baz"`.
|
||||
* If the regexp contains multiple [words](https://docs.victoriametrics.com/victorialogs/logsql/#word). For example, `~"foo bar baz"`.
|
||||
* FEATURE: allow disabling automatic unquoting of the matched placeholders in [`extract` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#extract-pipe). See [these docs](https://docs.victoriametrics.com/victorialogs/logsql/#format-for-extract-pipe-pattern).
|
||||
|
||||
* BUGFIX: properly parse `!` in front of [exact filter](https://docs.victoriametrics.com/victorialogs/logsql/#exact-filter), [exact-prefix filter](https://docs.victoriametrics.com/victorialogs/logsql/#exact-prefix-filter) and [regexp filter](https://docs.victoriametrics.com/victorialogs/logsql/#regexp-filter). For example, `!~"some regexp"` is properly parsed as `not ="some regexp"`. Previously it was incorrectly parsed as `'~="some regexp"'` [phrase filter](https://docs.victoriametrics.com/victorialogs/logsql/#phrase-filter).
|
||||
|
@ -304,7 +307,6 @@ Released at 2024-05-15
|
|||
* FEATURE: add ability to return the first `N` results from [`sort` pipe](#https://docs.victoriametrics.com/victorialogs/logsql/#sort-pipe). This is useful when `N` biggest or `N` smallest values must be returned from large amounts of logs.
|
||||
* FEATURE: add [`quantile`](https://docs.victoriametrics.com/victorialogs/logsql/#quantile-stats) and [`median`](https://docs.victoriametrics.com/victorialogs/logsql/#median-stats) [stats functions](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe).
|
||||
|
||||
|
||||
## [v0.6.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.6.1-victorialogs)
|
||||
|
||||
Released at 2024-05-14
|
||||
|
@ -314,7 +316,6 @@ Released at 2024-05-14
|
|||
* BUGFIX: properly return matching logs in [streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) with small number of entries. Previously they could be skipped. The issue has been introduced in [the release v0.6.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.6.0-victorialogs).
|
||||
* BUGFIX: fix `runtime error: index out of range` panic when using [`sort` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#sort-pipe) like `_time:1h | sort by (_time)`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6258).
|
||||
|
||||
|
||||
## [v0.6.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.6.0-victorialogs)
|
||||
|
||||
Released at 2024-05-12
|
||||
|
@ -342,7 +343,6 @@ Released at 2024-04-11
|
|||
|
||||
* BUGFIX: properly register new [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) under high data ingestion rate. The issue has been introduced in [v0.5.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.5.0-victorialogs).
|
||||
|
||||
|
||||
## [v0.5.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.5.1-victorialogs)
|
||||
|
||||
Released at 2024-04-04
|
||||
|
|
|
@ -14,6 +14,51 @@ aliases:
|
|||
|
||||
# Fluentbit setup
|
||||
|
||||
VictoriaLogs supports given below Fluentbit outputs:
|
||||
- [Elasticsearch](#elasticsearch)
|
||||
- [Loki](#loki)
|
||||
- [HTTP JSON](#http)
|
||||
|
||||
## Elasticsearch
|
||||
|
||||
Specify [elasticsearch output](https://docs.fluentbit.io/manual/pipeline/outputs/elasticsearch) section in the `fluentbit.conf`
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```conf
|
||||
[Output]
|
||||
Name es
|
||||
Match *
|
||||
host victorialogs
|
||||
port 9428
|
||||
compress gzip
|
||||
path /insert/elasticsearch
|
||||
header AccountID 0
|
||||
header ProjectID 0
|
||||
header VL-Stream-Fields path
|
||||
header VL-Msg-Field log
|
||||
header VL-Time-Field @timestamp
|
||||
```
|
||||
|
||||
## Loki
|
||||
|
||||
Specify [loki output](https://docs.fluentbit.io/manual/pipeline/outputs/loki) section in the `fluentbit.conf`
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```conf
|
||||
[OUTPUT]
|
||||
name loki
|
||||
match *
|
||||
host victorialogs
|
||||
uri /insert/loki/api/v1/push
|
||||
port 9428
|
||||
label_keys $path,$log,$time
|
||||
header VL-Msg-Field log
|
||||
header VL-Time-Field time
|
||||
header VL-Stream-Fields path
|
||||
```
|
||||
|
||||
## HTTP
|
||||
|
||||
Specify [http output](https://docs.fluentbit.io/manual/pipeline/outputs/http) section in the `fluentbit.conf`
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
|
|
|
@ -11,6 +11,13 @@ aliases:
|
|||
- /victorialogs/data-ingestion/logstash.html
|
||||
- /victorialogs/data-ingestion/Logstash.html
|
||||
---
|
||||
VictoriaLogs supports given below Logstash outputs:
|
||||
- [Elasticsearch](#elasticsearch)
|
||||
- [Loki](#loki)
|
||||
- [HTTP JSON](#http)
|
||||
|
||||
## Elasticsearch
|
||||
|
||||
Specify [`output.elasticsearch`](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) section in the `logstash.conf` file
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
|
@ -106,6 +113,32 @@ output {
|
|||
}
|
||||
```
|
||||
|
||||
## Loki
|
||||
|
||||
Specify [`output.loki`](https://grafana.com/docs/loki/latest/send-data/logstash/) section in the `logstash.conf` file
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```conf
|
||||
output {
|
||||
loki {
|
||||
url => "http://victorialogs:9428/insert/loki/api/v1/push?_stream_fields=host.ip,process.name&_msg_field=message&_time_field=@timestamp"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## HTTP
|
||||
|
||||
Specify [`output.http`](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-http.html) section in the `logstash.conf` file
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```conf
|
||||
output {
|
||||
url => "http://victorialogs:9428/insert/jsonline?_stream_fields=host.ip,process.name&_msg_field=message&_time_field=@timestamp"
|
||||
format => "json"
|
||||
http_method => "post"
|
||||
}
|
||||
```
|
||||
|
||||
See also:
|
||||
|
||||
- [Data ingestion troubleshooting](https://docs.victoriametrics.com/victorialogs/data-ingestion/#troubleshooting).
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
- Logstash - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/).
|
||||
- Vector - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/).
|
||||
- Promtail (aka Grafana Loki) - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/).
|
||||
- Telegraf - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/).
|
||||
- OpenTelemetry Collector - see [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/opentelemetry/).
|
||||
|
||||
The ingested logs can be queried according to [these docs](https://docs.victoriametrics.com/victorialogs/querying/).
|
||||
|
||||
|
@ -14,7 +16,6 @@ See also:
|
|||
- [Log collectors and data ingestion formats](#log-collectors-and-data-ingestion-formats).
|
||||
- [Data ingestion troubleshooting](#troubleshooting).
|
||||
|
||||
|
||||
## HTTP APIs
|
||||
|
||||
VictoriaLogs supports the following data ingestion HTTP APIs:
|
||||
|
@ -22,6 +23,7 @@ VictoriaLogs supports the following data ingestion HTTP APIs:
|
|||
- Elasticsearch bulk API. See [these docs](#elasticsearch-bulk-api).
|
||||
- JSON stream API aka [ndjson](https://jsonlines.org/). See [these docs](#json-stream-api).
|
||||
- Loki JSON API. See [these docs](#loki-json-api).
|
||||
- OpenTelemetry API. See [these docs](#opentelemetry-api).
|
||||
|
||||
VictoriaLogs accepts optional [HTTP parameters](#http-parameters) at data ingestion HTTP APIs.
|
||||
|
||||
|
@ -168,7 +170,12 @@ See also:
|
|||
|
||||
### HTTP parameters
|
||||
|
||||
VictoriaLogs accepts the following parameters at [data ingestion HTTP APIs](#http-apis):
|
||||
VictoriaLogs accepts the following configuration parameters via [HTTP Headers](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields) or URL [Query string](https://en.wikipedia.org/wiki/Query_string) at [data ingestion HTTP APIs](#http-apis).
|
||||
First defined parameter is used. [Query string](https://en.wikipedia.org/wiki/Query_string) parameters have priority over HTTP Headers.
|
||||
|
||||
#### HTTP Query string parameters
|
||||
|
||||
List of supported [Query string](https://en.wikipedia.org/wiki/Query_string) parameters:
|
||||
|
||||
- `_msg_field` - it must contain the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
|
||||
with the [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) generated by the log shipper.
|
||||
|
@ -193,10 +200,37 @@ VictoriaLogs accepts the following parameters at [data ingestion HTTP APIs](#htt
|
|||
|
||||
See also [HTTP headers](#http-headers).
|
||||
|
||||
### HTTP headers
|
||||
#### HTTP headers
|
||||
|
||||
List of supported [HTTP Headers](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields) parameters:
|
||||
|
||||
- `AccountID` - may contain the needed accountID of tenant to ingest data to. See [multitenancy docs](https://docs.victoriametrics.com/victorialogs/#multitenancy) for details.
|
||||
|
||||
- `ProjectID`- may contain the projectID needed of tenant to ingest data to. See [multitenancy docs](https://docs.victoriametrics.com/victorialogs/#multitenancy) for details.
|
||||
VictoriaLogs accepts optional `AccountID` and `ProjectID` headers at [data ingestion HTTP APIs](#http-apis).
|
||||
These headers may contain the needed tenant to ingest data to. See [multitenancy docs](https://docs.victoriametrics.com/victorialogs/#multitenancy) for details.
|
||||
|
||||
- `VL-Msg-Field` - it must contain the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
|
||||
with the [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) generated by the log shipper.
|
||||
This is usually the `message` field for Filebeat and Logstash.
|
||||
If the `VL-Msg-Field` header isn't set, then VictoriaLogs reads the log message from the `_msg` field.
|
||||
|
||||
- `VL-Time-Field` - it must contain the name of the [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
|
||||
with the [log timestamp](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) generated by the log shipper.
|
||||
This is usually the `@timestamp` field for Filebeat and Logstash.
|
||||
If the `VL-Time-Field` header isn't set, then VictoriaLogs reads the timestamp from the `_time` field.
|
||||
If this field doesn't exist, then the current timestamp is used.
|
||||
|
||||
- `VL-Stream-Fields` - it should contain comma-separated list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names,
|
||||
which uniquely identify every [log stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) collected the log shipper.
|
||||
If the `VL-Stream-Fields` header isn't set, then all the ingested logs are written to default log stream - `{}`.
|
||||
|
||||
- `VL-Ignore-Fields` - this parameter may contain the list of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names,
|
||||
which must be ignored during data ingestion.
|
||||
|
||||
- `VL-Debug` - if this parameter is set to `1`, then the ingested logs aren't stored in VictoriaLogs. Instead,
|
||||
the ingested data is logged by VictoriaLogs, so it can be investigated later.
|
||||
|
||||
See also [HTTP Query string parameters](#http-query-string-parameters).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
@ -241,12 +275,14 @@ VictoriaLogs exposes various [metrics](https://docs.victoriametrics.com/victoria
|
|||
|
||||
Here is the list of log collectors and their ingestion formats supported by VictoriaLogs:
|
||||
|
||||
| How to setup the collector | Format: Elasticsearch | Format: JSON Stream | Format: Loki | Format: syslog |
|
||||
|----------------------------|-----------------------|---------------------|--------------|----------------|
|
||||
| [Rsyslog](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/) | [Yes](https://www.rsyslog.com/doc/configuration/modules/omelasticsearch.html) | No | No | [Yes](https://www.rsyslog.com/doc/configuration/modules/omfwd.html) |
|
||||
| [Syslog-ng](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/) | Yes, [v1](https://support.oneidentity.com/technical-documents/syslog-ng-open-source-edition/3.16/administration-guide/28#TOPIC-956489), [v2](https://support.oneidentity.com/technical-documents/doc/syslog-ng-open-source-edition/3.16/administration-guide/29#TOPIC-956494) | No | No | [Yes](https://support.oneidentity.com/technical-documents/doc/syslog-ng-open-source-edition/3.16/administration-guide/44#TOPIC-956553) |
|
||||
| [Filebeat](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/) | [Yes](https://www.elastic.co/guide/en/beats/filebeat/current/elasticsearch-output.html) | No | No | No |
|
||||
| [Fluentbit](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentbit/) | No | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/http) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/loki) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/syslog) |
|
||||
| [Logstash](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/) | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) | No | No | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-syslog.html) |
|
||||
| [Vector](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/elasticsearch/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/http/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/loki/) | No |
|
||||
| [Promtail](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/) | No | No | [Yes](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#clients) | No |
|
||||
| How to setup the collector | Format: Elasticsearch | Format: JSON Stream | Format: Loki | Format: syslog | Format: OpenTelemetry |
|
||||
|----------------------------|-----------------------|---------------------|--------------|----------------|-----------------------|
|
||||
| [Rsyslog](https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/) | [Yes](https://www.rsyslog.com/doc/configuration/modules/omelasticsearch.html) | No | No | [Yes](https://www.rsyslog.com/doc/configuration/modules/omfwd.html) | No |
|
||||
| [Syslog-ng](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/) | Yes, [v1](https://support.oneidentity.com/technical-documents/syslog-ng-open-source-edition/3.16/administration-guide/28#TOPIC-956489), [v2](https://support.oneidentity.com/technical-documents/doc/syslog-ng-open-source-edition/3.16/administration-guide/29#TOPIC-956494) | No | No | [Yes](https://support.oneidentity.com/technical-documents/doc/syslog-ng-open-source-edition/3.16/administration-guide/44#TOPIC-956553) | No |
|
||||
| [Filebeat](https://docs.victoriametrics.com/victorialogs/data-ingestion/filebeat/) | [Yes](https://www.elastic.co/guide/en/beats/filebeat/current/elasticsearch-output.html) | No | No | No | No |
|
||||
| [Fluentbit](https://docs.victoriametrics.com/victorialogs/data-ingestion/fluentbit/) | No | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/http) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/loki) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/syslog) | [Yes](https://docs.fluentbit.io/manual/pipeline/outputs/opentelemetry) |
|
||||
| [Logstash](https://docs.victoriametrics.com/victorialogs/data-ingestion/logstash/) | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) | No | No | [Yes](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-syslog.html) | [Yes](https://github.com/paulgrav/logstash-output-opentelemetry) |
|
||||
| [Vector](https://docs.victoriametrics.com/victorialogs/data-ingestion/vector/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/elasticsearch/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/http/) | [Yes](https://vector.dev/docs/reference/configuration/sinks/loki/) | No | [Yes](https://vector.dev/docs/reference/configuration/sources/opentelemetry/) |
|
||||
| [Promtail](https://docs.victoriametrics.com/victorialogs/data-ingestion/promtail/) | No | No | [Yes](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#clients) | No | No |
|
||||
| [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter) | No | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/lokiexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/syslogexporter) | [Yes](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) |
|
||||
| [Telegraf](https://docs.victoriametrics.com/victorialogs/data-ingestion/telegraf/) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/elasticsearch) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/http) | [Yes](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) | [Yes](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/syslog) | Yes |
|
||||
|
|
123
docs/VictoriaLogs/data-ingestion/Telegraf.md
Normal file
123
docs/VictoriaLogs/data-ingestion/Telegraf.md
Normal file
|
@ -0,0 +1,123 @@
|
|||
---
|
||||
weight: 5
|
||||
title: Telegraf setup
|
||||
disableToc: true
|
||||
menu:
|
||||
docs:
|
||||
parent: "victorialogs-data-ingestion"
|
||||
weight: 5
|
||||
aliases:
|
||||
- /VictoriaLogs/data-ingestion/Telegraf.html
|
||||
---
|
||||
# Telegraf setup
|
||||
|
||||
VictoriaLogs supports given below Telegraf outputs:
|
||||
- [Elasticsearch](#elasticsearch)
|
||||
- [Loki](#loki)
|
||||
- [HTTP JSON](#http)
|
||||
|
||||
## Elasticsearch
|
||||
|
||||
Specify [Elasticsearch output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/elasticsearch) in the `telegraf.toml`
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```toml
|
||||
[[outputs.elasticsearch]]
|
||||
urls = ["http://localhost:9428/insert/elasticsearch"]
|
||||
timeout = "1m"
|
||||
flush_interval = "30s"
|
||||
enable_sniffer = false
|
||||
health_check_interval = "0s"
|
||||
index_name = "device_log-%Y.%m.%d"
|
||||
manage_template = false
|
||||
template_name = "telegraf"
|
||||
overwrite_template = false
|
||||
namepass = ["tail"]
|
||||
[outputs.elasticsearch.headers]
|
||||
"VL-Msg-Field" = "tail.value"
|
||||
"VL-Time-Field" = "@timestamp"
|
||||
"VL-Stream-Fields" = "tag.log_source,tag.metric_type"
|
||||
|
||||
[[inputs.tail]]
|
||||
files = ["/tmp/telegraf.log"]
|
||||
from_beginning = false
|
||||
interval = "10s"
|
||||
pipe = false
|
||||
watch_method = "inotify"
|
||||
data_format = "value"
|
||||
data_type = "string"
|
||||
character_encoding = "utf-8"
|
||||
[inputs.tail.tags]
|
||||
metric_type = "logs"
|
||||
log_source = "telegraf"
|
||||
```
|
||||
|
||||
|
||||
## Loki
|
||||
|
||||
Specify [Loki output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) in the `telegraf.toml`
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```toml
|
||||
[[outputs.loki]]
|
||||
domain = "http://localhost:9428"
|
||||
endpoint = "/insert/loki/api/v1/push&_msg_field=tail.value&_time_field=@timefield&_stream_fields=log_source,metric_type"
|
||||
namepass = ["tail"]
|
||||
gzip_request = true
|
||||
sanitize_label_names = true
|
||||
|
||||
[[inputs.tail]]
|
||||
files = ["/tmp/telegraf.log"]
|
||||
from_beginning = false
|
||||
interval = "10s"
|
||||
pipe = false
|
||||
watch_method = "inotify"
|
||||
data_format = "value"
|
||||
data_type = "string"
|
||||
character_encoding = "utf-8"
|
||||
[inputs.tail.tags]
|
||||
metric_type = "logs"
|
||||
log_source = "telegraf"
|
||||
```
|
||||
|
||||
|
||||
## HTTP
|
||||
|
||||
Specify [HTTP output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/http) in the `telegraf.toml with batch mode disabled`
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```toml
|
||||
[[inputs.tail]]
|
||||
files = ["/tmp/telegraf.log"]
|
||||
from_beginning = false
|
||||
interval = "10s"
|
||||
pipe = false
|
||||
watch_method = "inotify"
|
||||
data_format = "value"
|
||||
data_type = "string"
|
||||
character_encoding = "utf-8"
|
||||
[inputs.tail.tags]
|
||||
metric_type = "logs"
|
||||
log_source = "telegraf"
|
||||
|
||||
[[outputs.http]]
|
||||
url = "http://localhost:9428/insert/jsonline?_msg_field=fields.message&_time_field=timestamp,_stream_fields=tags.log_source,tags.metric_type"
|
||||
data_format = "json"
|
||||
namepass = ["docker_log"]
|
||||
use_batch_format = false
|
||||
```
|
||||
|
||||
Substitute the `localhost:9428` address inside `endpoints` section with the real TCP address of VictoriaLogs.
|
||||
|
||||
See [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-headers) for details on headers specified
|
||||
in the `[[output.elasticsearch]]` section.
|
||||
|
||||
It is recommended verifying whether the initial setup generates the needed [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
|
||||
and uses the correct [stream fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields).
|
||||
|
||||
See also:
|
||||
|
||||
- [Data ingestion troubleshooting](https://docs.victoriametrics.com/victorialogs/data-ingestion/#troubleshooting).
|
||||
- [How to query VictoriaLogs](https://docs.victoriametrics.com/victorialogs/querying/).
|
||||
- [Elasticsearch output docs for Telegraf](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/elasticsearch).
|
||||
- [Docker-compose demo for Telegraf integration with VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/telegraf-docker).
|
|
@ -11,24 +11,52 @@ aliases:
|
|||
- /victorialogs/data-ingestion/Vector.html
|
||||
- /victorialogs/data-ingestion/vector.html
|
||||
---
|
||||
## Elasticsearch sink
|
||||
VictoriaLogs supports given below Vector sinks:
|
||||
- [Elasticsearch](#elasticsearch)
|
||||
- [Loki](#loki)
|
||||
- [HTTP JSON](#http)
|
||||
|
||||
Specify [Elasticsearch sink type](https://vector.dev/docs/reference/configuration/sinks/elasticsearch/) in the `vector.toml`
|
||||
## Elasticsearch
|
||||
|
||||
Specify [Elasticsearch sink type](https://vector.dev/docs/reference/configuration/sinks/elasticsearch/) in the `vector.yaml`
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```toml
|
||||
[sinks.vlogs]
|
||||
inputs = [ "your_input" ]
|
||||
type = "elasticsearch"
|
||||
endpoints = [ "http://localhost:9428/insert/elasticsearch/" ]
|
||||
mode = "bulk"
|
||||
api_version = "v8"
|
||||
healthcheck.enabled = false
|
||||
```yaml
|
||||
sinks:
|
||||
vlogs:
|
||||
inputs:
|
||||
- your_input
|
||||
type: elasticsearch
|
||||
endpoints:
|
||||
- http://localhost:9428/insert/elasticsearch/
|
||||
mode: bulk
|
||||
api_version: v8
|
||||
healthcheck:
|
||||
enabled: false
|
||||
query:
|
||||
_msg_field: message
|
||||
_time_field: timestamp
|
||||
_stream_fields: host,container_name
|
||||
```
|
||||
|
||||
[sinks.vlogs.query]
|
||||
_msg_field = "message"
|
||||
_time_field = "timestamp"
|
||||
_stream_fields = "host,container_name"
|
||||
## Loki
|
||||
|
||||
Specify [Loki sink type](https://vector.dev/docs/reference/configuration/sinks/loki/) in the `vector.yaml`
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/):
|
||||
|
||||
```yaml
|
||||
sinks:
|
||||
vlogs:
|
||||
type: "loki"
|
||||
endpoint = "http://localhost:9428/insert/loki/"
|
||||
inputs:
|
||||
- your_input
|
||||
compression: gzip
|
||||
path: /api/v1/push?_msg_field=message.message&_time_field=timestamp&_stream_fields=source
|
||||
encoding:
|
||||
codec: json
|
||||
labels:
|
||||
source: vector
|
||||
```
|
||||
|
||||
Substitute the `localhost:9428` address inside `endpoints` section with the real TCP address of VictoriaLogs.
|
||||
|
@ -36,129 +64,148 @@ Substitute the `localhost:9428` address inside `endpoints` section with the real
|
|||
Replace `your_input` with the name of the `inputs` section, which collects logs. See [these docs](https://vector.dev/docs/reference/configuration/sources/) for details.
|
||||
|
||||
See [these docs](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters) for details on parameters specified
|
||||
in the `[sinks.vlogs.query]` section.
|
||||
in the `sinks.vlogs.query` section.
|
||||
|
||||
It is recommended verifying whether the initial setup generates the needed [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
|
||||
and uses the correct [stream fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields).
|
||||
This can be done by specifying `debug` [parameter](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters)
|
||||
in the `[sinks.vlogs.query]` section and inspecting VictoriaLogs logs then:
|
||||
in the `sinks.vlogs.query` section and inspecting VictoriaLogs logs then:
|
||||
|
||||
```toml
|
||||
[sinks.vlogs]
|
||||
inputs = [ "your_input" ]
|
||||
type = "elasticsearch"
|
||||
endpoints = [ "http://localhost:9428/insert/elasticsearch/" ]
|
||||
mode = "bulk"
|
||||
api_version = "v8"
|
||||
healthcheck.enabled = false
|
||||
|
||||
[sinks.vlogs.query]
|
||||
_msg_field = "message"
|
||||
_time_field = "timestamp"
|
||||
_stream_fields = "host,container_name"
|
||||
debug = "1"
|
||||
```yaml
|
||||
sinks:
|
||||
vlogs:
|
||||
inputs:
|
||||
- your_input
|
||||
type: elasticsearch
|
||||
endpoints:
|
||||
- http://localhost:9428/insert/elasticsearch/
|
||||
mode: bulk
|
||||
api_version: v8
|
||||
healthcheck:
|
||||
enabled: false
|
||||
query:
|
||||
_msg_field: message
|
||||
_time_field: timestamp
|
||||
_stream_fields: host,container_name
|
||||
debug: "1"
|
||||
```
|
||||
|
||||
If some [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) must be skipped
|
||||
during data ingestion, then they can be put into `ignore_fields` [parameter](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters).
|
||||
For example, the following config instructs VictoriaLogs to ignore `log.offset` and `event.original` fields in the ingested logs:
|
||||
|
||||
```toml
|
||||
[sinks.vlogs]
|
||||
inputs = [ "your_input" ]
|
||||
type = "elasticsearch"
|
||||
endpoints = [ "http://localhost:9428/insert/elasticsearch/" ]
|
||||
mode = "bulk"
|
||||
api_version = "v8"
|
||||
healthcheck.enabled = false
|
||||
|
||||
[sinks.vlogs.query]
|
||||
_msg_field = "message"
|
||||
_time_field = "timestamp"
|
||||
_stream_fields = "host,container_name"
|
||||
ignore_fields = "log.offset,event.original"
|
||||
```yaml
|
||||
sinks:
|
||||
vlogs:
|
||||
inputs:
|
||||
- your_input
|
||||
type: elasticsearch
|
||||
endpoints:
|
||||
- http://localhost:9428/insert/elasticsearch/
|
||||
mode: bulk
|
||||
api_version: v8
|
||||
healthcheck:
|
||||
enabled: false
|
||||
query:
|
||||
_msg_field: message
|
||||
_time_field: timestamp
|
||||
_stream_fields: host,container_name
|
||||
_ignore_fields: log.offset,event.original
|
||||
```
|
||||
|
||||
When Vector ingests logs into VictoriaLogs at a high rate, then it may be needed to tune `batch.max_events` option.
|
||||
For example, the following config is optimized for higher than usual ingestion rate:
|
||||
|
||||
```toml
|
||||
[sinks.vlogs]
|
||||
inputs = [ "your_input" ]
|
||||
type = "elasticsearch"
|
||||
endpoints = [ "http://localhost:9428/insert/elasticsearch/" ]
|
||||
mode = "bulk"
|
||||
api_version = "v8"
|
||||
healthcheck.enabled = false
|
||||
|
||||
[sinks.vlogs.query]
|
||||
_msg_field = "message"
|
||||
_time_field = "timestamp"
|
||||
_stream_fields = "host,container_name"
|
||||
|
||||
[sinks.vlogs.batch]
|
||||
max_events = 1000
|
||||
```yaml
|
||||
sinks:
|
||||
vlogs:
|
||||
inputs:
|
||||
- your_input
|
||||
type: elasticsearch
|
||||
endpoints:
|
||||
- http://localhost:9428/insert/elasticsearch/
|
||||
mode: bulk
|
||||
api_version: v8
|
||||
healthcheck:
|
||||
enabled: false
|
||||
query:
|
||||
_msg_field: message
|
||||
_time_field: timestamp
|
||||
_stream_fields: host,container_name
|
||||
batch]
|
||||
max_events: 1000
|
||||
```
|
||||
|
||||
If the Vector sends logs to VictoriaLogs in another datacenter, then it may be useful enabling data compression via `compression = "gzip"` option.
|
||||
This usually allows saving network bandwidth and costs by up to 5 times:
|
||||
|
||||
```toml
|
||||
[sinks.vlogs]
|
||||
inputs = [ "your_input" ]
|
||||
type = "elasticsearch"
|
||||
endpoints = [ "http://localhost:9428/insert/elasticsearch/" ]
|
||||
mode = "bulk"
|
||||
api_version = "v8"
|
||||
healthcheck.enabled = false
|
||||
compression = "gzip"
|
||||
|
||||
[sinks.vlogs.query]
|
||||
_msg_field = "message"
|
||||
_time_field = "timestamp"
|
||||
_stream_fields = "host,container_name"
|
||||
```yaml
|
||||
sinks:
|
||||
vlogs:
|
||||
inputs:
|
||||
- your_input
|
||||
type: elasticsearch
|
||||
endpoints:
|
||||
- http://localhost:9428/insert/elasticsearch/
|
||||
mode: bulk
|
||||
api_version: v8
|
||||
healthcheck:
|
||||
enabled: false
|
||||
compression: gzip
|
||||
query:
|
||||
_msg_field: message
|
||||
_time_field: timestamp
|
||||
_stream_fields: host,container_name
|
||||
```
|
||||
|
||||
By default, the ingested logs are stored in the `(AccountID=0, ProjectID=0)` [tenant](https://docs.victoriametrics.com/victorialogs/keyconcepts/#multitenancy).
|
||||
If you need storing logs in other tenant, then specify the needed tenant via `[sinks.vlogs.request.headers]` section.
|
||||
For example, the following `vector.toml` config instructs Vector to store the data to `(AccountID=12, ProjectID=34)` tenant:
|
||||
If you need storing logs in other tenant, then specify the needed tenant via `sinks.vlogs.request.headers` section.
|
||||
For example, the following `vector.yaml` config instructs Vector to store the data to `(AccountID=12, ProjectID=34)` tenant:
|
||||
|
||||
```toml
|
||||
[sinks.vlogs]
|
||||
inputs = [ "your_input" ]
|
||||
type = "elasticsearch"
|
||||
endpoints = [ "http://localhost:9428/insert/elasticsearch/" ]
|
||||
mode = "bulk"
|
||||
api_version = "v8"
|
||||
healthcheck.enabled = false
|
||||
|
||||
[sinks.vlogs.query]
|
||||
_msg_field = "message"
|
||||
_time_field = "timestamp"
|
||||
_stream_fields = "host,container_name"
|
||||
|
||||
[sinks.vlogs.request.headers]
|
||||
AccountID = "12"
|
||||
ProjectID = "34"
|
||||
```yaml
|
||||
sinks:
|
||||
vlogs:
|
||||
inputs:
|
||||
- your_input
|
||||
type: elasticsearch
|
||||
endpoints:
|
||||
- http://localhost:9428/insert/elasticsearch/
|
||||
mode: bulk
|
||||
api_version: v8
|
||||
healthcheck:
|
||||
enabled: false
|
||||
query:
|
||||
_msg_field: message
|
||||
_time_field: timestamp
|
||||
_stream_fields: host,container_name
|
||||
request:
|
||||
headers:
|
||||
AccountID: "12"
|
||||
ProjectID: "34"
|
||||
```
|
||||
|
||||
## HTTP sink
|
||||
## HTTP
|
||||
|
||||
Vector can be configured with [HTTP](https://vector.dev/docs/reference/configuration/sinks/http/) sink type
|
||||
for sending data to [JSON stream API](https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api):
|
||||
|
||||
```toml
|
||||
[sinks.vlogs]
|
||||
inputs = [ "your_input" ]
|
||||
type = "http"
|
||||
uri = "http://localhost:9428/insert/jsonline?_stream_fields=host,container_name&_msg_field=message&_time_field=timestamp"
|
||||
encoding.codec = "json"
|
||||
framing.method = "newline_delimited"
|
||||
healthcheck.enabled = false
|
||||
|
||||
[sinks.vlogs.request.headers]
|
||||
AccountID = "12"
|
||||
ProjectID = "34"
|
||||
```yaml
|
||||
sinks:
|
||||
vlogs:
|
||||
inputs:
|
||||
- your_input
|
||||
type: http
|
||||
uri: http://localhost:9428/insert/jsonline?_stream_fields=host,container_name&_msg_field=message&_time_field=timestamp
|
||||
encoding:
|
||||
codec: json
|
||||
framing:
|
||||
method: newline_delimited
|
||||
healthcheck:
|
||||
enabled: false
|
||||
request:
|
||||
headers:
|
||||
AccountID: "12"
|
||||
ProjectID: "34"
|
||||
```
|
||||
|
||||
See also:
|
||||
|
|
120
docs/VictoriaLogs/data-ingestion/opentelemetry.md
Normal file
120
docs/VictoriaLogs/data-ingestion/opentelemetry.md
Normal file
|
@ -0,0 +1,120 @@
|
|||
---
|
||||
weight: 4
|
||||
title: OpenTelemetry setup
|
||||
disableToc: true
|
||||
menu:
|
||||
docs:
|
||||
parent: "victorialogs-data-ingestion"
|
||||
weight: 4
|
||||
aliases:
|
||||
- /VictoriaLogs/data-ingestion/OpenTelemetry.html
|
||||
---
|
||||
|
||||
|
||||
VictoriaLogs supports both client open-telemetry [SDK](https://opentelemetry.io/docs/languages/) and [collector](https://opentelemetry.io/docs/collector/).
|
||||
|
||||
## Client SDK
|
||||
|
||||
Specify `EndpointURL` for http-exporter builder.
|
||||
|
||||
Consider the following example for `golang` `SDK`:
|
||||
|
||||
```go
|
||||
// Create the OTLP log exporter that sends logs to configured destination
|
||||
logExporter, err := otlploghttp.New(ctx,
|
||||
otlploghttp.WithEndpointURL("http://victorialogs:9428/insert/opentelemetry/v1/logs"),
|
||||
)
|
||||
```
|
||||
|
||||
Optionally, [stream fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) could be defined via headers:
|
||||
|
||||
```go
|
||||
// Create the OTLP log exporter that sends logs to configured destination
|
||||
logExporter, err := otlploghttp.New(ctx,
|
||||
otlploghttp.WithEndpointURL("http://victorialogs:9428/insert/opentelemetry/v1/logs"),
|
||||
otlploghttp.WithHeaders(map[string]string{"VL-Stream-Fields": "telemetry.sdk.language,severity"}),
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
Given config defines 2 stream fields - `severity` and `telemetry.sdk.language`.
|
||||
|
||||
See also [HTTP headers](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-headers)
|
||||
|
||||
## Collector configuration
|
||||
|
||||
VictoriaLogs supports given below OpenTelemetry collector exporters:
|
||||
|
||||
* [Elasticsearch](#elasticsearch)
|
||||
* [Loki](#loki)
|
||||
* [OpenTelemetry](#opentelemetry)
|
||||
|
||||
### Elasticsearch
|
||||
|
||||
```yaml
|
||||
exporters:
|
||||
elasticsearch:
|
||||
endpoints:
|
||||
- http://victorialogs:9428/insert/elasticsearch
|
||||
receivers:
|
||||
filelog:
|
||||
include: [/tmp/logs/*.log]
|
||||
resource:
|
||||
region: us-east-1
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
receivers: [filelog]
|
||||
exporters: [elasticsearch]
|
||||
```
|
||||
|
||||
### Loki
|
||||
|
||||
```yaml
|
||||
exporters:
|
||||
loki:
|
||||
endpoint: http://victorialogs:9428/insert/loki/api/v1/push
|
||||
receivers:
|
||||
filelog:
|
||||
include: [/tmp/logs/*.log]
|
||||
resource:
|
||||
region: us-east-1
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
receivers: [filelog]
|
||||
exporters: [loki]
|
||||
```
|
||||
|
||||
### OpenTelemetry
|
||||
|
||||
Specify logs endpoint for [OTLP/HTTP exporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/otlphttpexporter/README.md) in configuration file
|
||||
for sending the collected logs to [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/):
|
||||
|
||||
```yaml
|
||||
exporters:
|
||||
otlphttp:
|
||||
logs_endpoint: http://localhost:9428/insert/opentelemetry/v1/logs
|
||||
```
|
||||
|
||||
Optionally, [stream fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) could be defined via headers:
|
||||
|
||||
```yaml
|
||||
exporters:
|
||||
otlphttp:
|
||||
logs_endpoint: http://localhost:9428/insert/opentelemetry/v1/logs
|
||||
headers:
|
||||
VL-Stream-Fields: telemetry.sdk.language,severity
|
||||
```
|
||||
|
||||
See also [HTTP headers](https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-headers)
|
||||
|
||||
Substitute `localhost:9428` address inside `exporters.oltphttp.logs_endpoint` with the real address of VictoriaLogs.
|
||||
|
||||
The ingested log entries can be queried according to [these docs](https://docs.victoriametrics.com/VictoriaLogs/querying/).
|
||||
|
||||
See also:
|
||||
|
||||
* [Data ingestion troubleshooting](https://docs.victoriametrics.com/victorialogs/data-ingestion/#troubleshooting).
|
||||
* [How to query VictoriaLogs](https://docs.victoriametrics.com/victorialogs/querying/).
|
||||
* [Docker-compose demo for OpenTelemetry collector integration with VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/opentelemetry-collector).
|
|
@ -20,7 +20,6 @@ The VictoriaLogs datasource plugin allows you to query and visualize
|
|||
* [Installation](#installation)
|
||||
* [How to make new release](#how-to-make-new-release)
|
||||
* [Notes](#notes)
|
||||
* [Frequently Asked Questions](#faq)
|
||||
* [License](#license)
|
||||
|
||||
## Installation
|
||||
|
@ -96,7 +95,7 @@ docker-compose -f docker-compose.yaml up
|
|||
|
||||
After Grafana starts successfully, datasource should be available in the datasources tab
|
||||
|
||||

|
||||
<img src="provision_datasources.webp" width="800" alt="Configuration">
|
||||
|
||||
### Install in Kubernetes
|
||||
|
||||
|
@ -136,7 +135,7 @@ extraInitContainers:
|
|||
- |
|
||||
set -ex
|
||||
mkdir -p /var/lib/grafana/plugins/
|
||||
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/victorialogs-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
ver=$(curl -s -L https://api.github.com/repos/VictoriaMetrics/victorialogs-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
curl -L https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/$ver/victorialogs-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vl-plugin.tar.gz
|
||||
tar -xf /var/lib/grafana/plugins/vl-plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||
rm /var/lib/grafana/plugins/vl-plugin.tar.gz
|
||||
|
@ -309,12 +308,6 @@ In the `plugin.json` file of our plugin, the `metrics` field is set to `true`. T
|
|||
|
||||
For more information on the fields in `plugin.json`, please refer to the [Grafana documentation](https://grafana.com/developers/plugin-tools/reference-plugin-json#properties).
|
||||
|
||||
## FAQ
|
||||
|
||||
### Which version of Grafana is required in order to use VictoriaLogs datasource?
|
||||
|
||||
[10.0.3](https://grafana.com/grafana/download/10.0.3) or newer.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under
|
||||
|
|
|
@ -18,6 +18,14 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
|
|||
|
||||
## tip
|
||||
|
||||
**Update note 1: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation/): perform deduplication for all received data when specifying `-streamAggr.dedupInterval` or `-remoteWrite.streamAggr.dedupInterval` command-line flag. Previously, if the `-remoteWrite.streamAggr.config` or `-streamAggr.config` is set, only series that matched aggregation config were deduplicated. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6711#issuecomment-2288361213) for details.**
|
||||
|
||||
* FEATURE [stream aggregation](https://docs.victoriametrics.com/stream-aggregation/): perform deduplication for all received data when specifying `-streamAggr.dedupInterval` or `-remoteWrite.streamAggr.dedupInterval` command-line flags are set. Previously, if the `-remoteWrite.streamAggr.config` or `-streamAggr.config` is set, only series that matched aggregation config were deduplicated. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6711#issuecomment-2288361213) for details.
|
||||
* FEATURE: all VictoriaMetrics [enterprise](https://docs.victoriametrics.com/enterprise/) components: add support of hot-reload for license key supplied by `-licenseFile` command-line flag.
|
||||
|
||||
* BUGFIX: [vmagent dashboard](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards/vmagent.json): fix legend captions for stream aggregation related panels. Before they were displaying wrong label names.
|
||||
* BUGFIX: [vmgateway](https://docs.victoriametrics.com/vmgateway/): add missing `datadog`, `newrelic`, `opentelemetry` and `pushgateway` routes to the `JWT` authorization routes. Allows prefixed (`promtheus/graphite`) routes for query requests.
|
||||
|
||||
## [v1.103.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.103.0)
|
||||
|
||||
Released at 2024-08-28
|
||||
|
|
|
@ -8,6 +8,7 @@ menu:
|
|||
weight: 6
|
||||
aliases:
|
||||
- /CHANGELOG_2020.html
|
||||
- /changelog_2020
|
||||
---
|
||||
## [v1.51.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.51.0)
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ menu:
|
|||
weight: 5
|
||||
aliases:
|
||||
- /CHANGELOG_2021.html
|
||||
- /changelog_2021
|
||||
---
|
||||
## [v1.71.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.71.0)
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ menu:
|
|||
weight: 4
|
||||
aliases:
|
||||
- /CHANGELOG_2022.html
|
||||
- /changelog_2022
|
||||
---
|
||||
## [v1.85.3](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.85.3)
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ menu:
|
|||
weight: 3
|
||||
aliases:
|
||||
- /CHANGELOG_2023.html
|
||||
- /changelog_2023
|
||||
---
|
||||
## [v1.96.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.96.0)
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ menu:
|
|||
aliases:
|
||||
- /operator/api/
|
||||
- /operator/api/index.html
|
||||
- /operator/api.html
|
||||
---
|
||||
<!-- this doc autogenerated - don't edit it manually -->
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ aliases:
|
|||
- /operator/vars/index.html
|
||||
---
|
||||
<!-- this doc autogenerated - don't edit it manually -->
|
||||
updated at Thu Aug 29 13:46:17 UTC 2024
|
||||
updated at Mon Sep 2 21:11:44 UTC 2024
|
||||
|
||||
|
||||
| variable name | variable default value | variable required | variable description |
|
||||
|
|
|
@ -86,8 +86,6 @@ before sending them to the configured `-remoteWrite.url`. The de-duplication can
|
|||
only the last sample per each seen [time series](https://docs.victoriametrics.com/keyconcepts/#time-series) per every 30 seconds.
|
||||
The de-deduplication is performed after applying [relabeling](https://docs.victoriametrics.com/vmagent/#relabeling) and
|
||||
before performing the aggregation.
|
||||
If the `-remoteWrite.streamAggr.config` and / or `-streamAggr.config` is set, then the de-duplication is performed individually per each
|
||||
[stream aggregation config](#stream-aggregation-config) for the matching samples after applying [input_relabel_configs](#relabeling).
|
||||
|
||||
- By specifying `dedup_interval` option individually per each [stream aggregation config](#stream-aggregation-config)
|
||||
in `-remoteWrite.streamAggr.config` or `-streamAggr.config` configs.
|
||||
|
@ -100,9 +98,6 @@ before sending them to the configured `-remoteWrite.url`. The de-duplication can
|
|||
seen [time series](https://docs.victoriametrics.com/keyconcepts/#time-series) per every 30 seconds.
|
||||
The de-duplication is performed after applying `-relabelConfig` [relabeling](https://docs.victoriametrics.com/#relabeling).
|
||||
|
||||
If the `-streamAggr.config` is set, then the de-duplication is performed individually per each [stream aggregation config](#stream-aggregation-config)
|
||||
for the matching samples after applying [input_relabel_configs](#relabeling).
|
||||
|
||||
- By specifying `dedup_interval` option individually per each [stream aggregation config](#stream-aggregation-config) at `-streamAggr.config`.
|
||||
|
||||
It is possible to drop the given labels before applying the de-duplication. See [these docs](#dropping-unneeded-labels).
|
||||
|
|
|
@ -178,7 +178,7 @@ extraInitContainers:
|
|||
- |
|
||||
set -ex
|
||||
mkdir -p /var/lib/grafana/plugins/
|
||||
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/victoriametrics-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
ver=$(curl -s -L https://api.github.com/repos/VictoriaMetrics/victoriametrics-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
curl -L https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/$ver/victoriametrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
tar -xf /var/lib/grafana/plugins/vm-plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||
rm /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
|
|
|
@ -199,7 +199,7 @@ to the same `-remoteWrite.url`. In this case you can specify comma-separated lis
|
|||
command-line flag. For example, `-remoteWrite.shardByURL.labels=instance,__name__` would shard metrics with the same name and `instance`
|
||||
label to the same `-remoteWrite.url`.
|
||||
|
||||
Sometimes is may be needed ignoring some labels when sharding samples across multiple `-remoteWrite.url` backends.
|
||||
Sometimes, it may be necessary to ignore some labels when sharding samples across multiple `-remoteWrite.url` backends.
|
||||
For example, if all the [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) with the same set of labels
|
||||
except of `instance` and `pod` labels must be routed to the same backend. In this case the list of ignored labels must be passed to
|
||||
`-remoteWrite.shardByURL.ignoreLabels` command-line flag: `-remoteWrite.shardByURL.ignoreLabels=instance,pod`.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package filestream
|
||||
|
||||
func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error {
|
||||
func (st *streamTracker) adviseDontNeed(_ int, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
func fadviseSequentialRead(f *os.File, prefetch bool) error {
|
||||
func fadviseSequentialRead(_ *os.File, _ bool) error {
|
||||
// TODO: implement this properly
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ func TestFilterAnd(t *testing.T) {
|
|||
}
|
||||
|
||||
// non-empty intersection
|
||||
// foo:a AND foo:abc*
|
||||
fa := &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
|
@ -41,6 +42,7 @@ func TestFilterAnd(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fa, "foo", []int{2, 6})
|
||||
|
||||
// reverse non-empty intersection
|
||||
// foo:abc* AND foo:a
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
|
@ -56,6 +58,7 @@ func TestFilterAnd(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fa, "foo", []int{2, 6})
|
||||
|
||||
// the first filter mismatch
|
||||
// foo:bc* AND foo:a
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
|
@ -71,6 +74,7 @@ func TestFilterAnd(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fa, "foo", nil)
|
||||
|
||||
// the last filter mismatch
|
||||
// foo:abc AND foo:foo*
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
|
@ -86,6 +90,7 @@ func TestFilterAnd(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fa, "foo", nil)
|
||||
|
||||
// empty intersection
|
||||
// foo:foo AND foo:abc*
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
|
@ -101,6 +106,7 @@ func TestFilterAnd(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fa, "foo", nil)
|
||||
|
||||
// reverse empty intersection
|
||||
// foo:abc* AND foo:foo
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
|
@ -114,4 +120,255 @@ func TestFilterAnd(t *testing.T) {
|
|||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", nil)
|
||||
|
||||
// empty value
|
||||
// foo:"" AND bar:""
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterExact{
|
||||
fieldName: "foo",
|
||||
value: "",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", []int{5})
|
||||
|
||||
// non-existing field with empty value
|
||||
// foo:foo* AND bar:""
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "foo",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", []int{0, 1, 3, 4, 6})
|
||||
|
||||
// reverse non-existing field with empty value
|
||||
// bar:"" AND foo:foo*
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", []int{0, 1, 3, 4, 6})
|
||||
|
||||
// non-existing field with non-empty value
|
||||
// foo:foo* AND bar:*
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "foo",
|
||||
},
|
||||
&filterPrefix{
|
||||
fieldName: "bar",
|
||||
prefix: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", nil)
|
||||
|
||||
// reverse non-existing field with non-empty value
|
||||
// bar:* AND foo:foo*
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "bar",
|
||||
prefix: "",
|
||||
},
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", nil)
|
||||
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6554
|
||||
// foo:"a foo"* AND (foo:="a foobar" OR boo:bbbbbbb)
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "a foo",
|
||||
},
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterExact{
|
||||
fieldName: "foo",
|
||||
value: "a foobar",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "boo",
|
||||
value: "bbbbbbb",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", []int{1})
|
||||
|
||||
// foo:"a foo"* AND (foo:"abcd foobar" OR foo:foobar)
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "a foo",
|
||||
},
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "abcd foobar",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "foobar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", []int{1, 6})
|
||||
|
||||
// (foo:foo* OR bar:baz) AND (bar:x OR foo:a)
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "foo",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "bar",
|
||||
phrase: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "bar",
|
||||
phrase: "x",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "a",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", []int{0, 1, 3, 4, 6})
|
||||
|
||||
// (foo:foo* OR bar:baz) AND (bar:x OR foo:xyz)
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "foo",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "bar",
|
||||
phrase: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "bar",
|
||||
phrase: "x",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "xyz",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", nil)
|
||||
|
||||
// (foo:foo* OR bar:baz) AND (bar:* OR foo:xyz)
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "foo",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "bar",
|
||||
phrase: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "bar",
|
||||
prefix: "",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "xyz",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", nil)
|
||||
|
||||
// (foo:foo* OR bar:baz) AND (bar:"" OR foo:xyz)
|
||||
fa = &filterAnd{
|
||||
filters: []filter{
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
fieldName: "foo",
|
||||
prefix: "foo",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "bar",
|
||||
phrase: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterOr{
|
||||
filters: []filter{
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "xyz",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fa, "foo", []int{0, 1, 3, 4, 6})
|
||||
}
|
||||
|
|
|
@ -171,13 +171,28 @@ func (fo *filterOr) initByFieldTokens() {
|
|||
|
||||
var byFieldTokens []fieldTokens
|
||||
for _, fieldName := range fieldNames {
|
||||
commonTokens := getCommonTokens(m[fieldName])
|
||||
if len(commonTokens) > 0 {
|
||||
byFieldTokens = append(byFieldTokens, fieldTokens{
|
||||
field: fieldName,
|
||||
tokens: commonTokens,
|
||||
})
|
||||
tokenss := m[fieldName]
|
||||
if len(tokenss) != len(fo.filters) {
|
||||
// The filter for the given fieldName is missing in some OR filters,
|
||||
// so it is impossible to extract common tokens from these filters.
|
||||
// Give up extracting common tokens from the remaining filters,
|
||||
// since they may not cover log entries matching fieldName filters.
|
||||
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6554
|
||||
byFieldTokens = nil
|
||||
break
|
||||
}
|
||||
commonTokens := getCommonTokens(tokenss)
|
||||
if len(commonTokens) == 0 {
|
||||
// Give up extracting common tokens from the remaining filters,
|
||||
// since they may not cover log entries matching fieldName filters.
|
||||
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6554
|
||||
byFieldTokens = nil
|
||||
break
|
||||
}
|
||||
byFieldTokens = append(byFieldTokens, fieldTokens{
|
||||
field: fieldName,
|
||||
tokens: commonTokens,
|
||||
})
|
||||
}
|
||||
|
||||
fo.byFieldTokens = byFieldTokens
|
||||
|
|
|
@ -26,6 +26,7 @@ func TestFilterOr(t *testing.T) {
|
|||
}
|
||||
|
||||
// non-empty union
|
||||
// foo:23 OR foo:abc
|
||||
fo := &filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
|
@ -41,6 +42,7 @@ func TestFilterOr(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fo, "foo", []int{2, 6, 9})
|
||||
|
||||
// reverse non-empty union
|
||||
// foo:abc OR foo:23
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
|
@ -56,6 +58,7 @@ func TestFilterOr(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fo, "foo", []int{2, 6, 9})
|
||||
|
||||
// first empty result, second non-empty result
|
||||
// foo:xabc* OR foo:23
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
|
@ -71,6 +74,7 @@ func TestFilterOr(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fo, "foo", []int{9})
|
||||
|
||||
// first non-empty result, second empty result
|
||||
// foo:23 OR foo:xabc*
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
|
@ -86,6 +90,7 @@ func TestFilterOr(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fo, "foo", []int{9})
|
||||
|
||||
// first match all
|
||||
// foo:a OR foo:23
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
|
@ -101,6 +106,7 @@ func TestFilterOr(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fo, "foo", []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
|
||||
|
||||
// second match all
|
||||
// foo:23 OR foo:a
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterPrefix{
|
||||
|
@ -116,6 +122,7 @@ func TestFilterOr(t *testing.T) {
|
|||
testFilterMatchForColumns(t, columns, fo, "foo", []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
|
||||
|
||||
// both empty results
|
||||
// foo:x23 OR foo:xabc
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
|
@ -129,4 +136,191 @@ func TestFilterOr(t *testing.T) {
|
|||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fo, "foo", nil)
|
||||
|
||||
// non-existing column (last)
|
||||
// foo:23 OR bar:xabc*
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "23",
|
||||
},
|
||||
&filterPrefix{
|
||||
fieldName: "bar",
|
||||
prefix: "xabc",
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fo, "foo", []int{9})
|
||||
|
||||
// non-existing column (first)
|
||||
// bar:xabc* OR foo:23
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "23",
|
||||
},
|
||||
&filterPrefix{
|
||||
fieldName: "bar",
|
||||
prefix: "xabc",
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fo, "foo", []int{9})
|
||||
|
||||
// (foo:23 AND bar:"") OR (foo:foo AND bar:*)
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "23",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "foo",
|
||||
},
|
||||
&filterPrefix{
|
||||
fieldName: "bar",
|
||||
prefix: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fo, "foo", []int{9})
|
||||
|
||||
// (foo:23 AND bar:"") OR (foo:foo AND bar:"")
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "23",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "foo",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fo, "foo", []int{0, 9})
|
||||
|
||||
// (foo:23 AND bar:"") OR (foo:foo AND baz:"")
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "23",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "foo",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "baz",
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fo, "foo", []int{0, 9})
|
||||
|
||||
// (foo:23 AND bar:abc) OR (foo:foo AND bar:"")
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "23",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "bar",
|
||||
phrase: "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "foo",
|
||||
},
|
||||
&filterExact{
|
||||
fieldName: "bar",
|
||||
value: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fo, "foo", []int{0})
|
||||
|
||||
// (foo:23 AND bar:abc) OR (foo:foo AND bar:*)
|
||||
fo = &filterOr{
|
||||
filters: []filter{
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "23",
|
||||
},
|
||||
&filterPhrase{
|
||||
fieldName: "bar",
|
||||
phrase: "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
&filterAnd{
|
||||
filters: []filter{
|
||||
&filterPhrase{
|
||||
fieldName: "foo",
|
||||
phrase: "foo",
|
||||
},
|
||||
&filterPrefix{
|
||||
fieldName: "bar",
|
||||
prefix: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testFilterMatchForColumns(t, columns, fo, "foo", nil)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package promscrape
|
|||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
@ -133,18 +134,29 @@ func TestClientProxyReadOk(t *testing.T) {
|
|||
defer ps.Close()
|
||||
|
||||
c, err := newClient(ctx, &ScrapeWork{
|
||||
ScrapeURL: backend.URL,
|
||||
ProxyURL: proxy.MustNewURL(ps.URL),
|
||||
ScrapeTimeout: 2 * time.Second,
|
||||
AuthConfig: newTestAuthConfig(t, isBackendTLS, backendAuth),
|
||||
ProxyAuthConfig: newTestAuthConfig(t, isProxyTLS, proxyAuth),
|
||||
MaxScrapeSize: 16000,
|
||||
ScrapeURL: backend.URL,
|
||||
ProxyURL: proxy.MustNewURL(ps.URL),
|
||||
// bump timeout for slow CIs
|
||||
ScrapeTimeout: 5 * time.Second,
|
||||
// force connection re-creating to avoid broken conns in slow CIs
|
||||
DisableKeepAlive: true,
|
||||
AuthConfig: newTestAuthConfig(t, isBackendTLS, backendAuth),
|
||||
ProxyAuthConfig: newTestAuthConfig(t, isProxyTLS, proxyAuth),
|
||||
MaxScrapeSize: 16000,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %s", err)
|
||||
}
|
||||
|
||||
var bb bytesutil.ByteBuffer
|
||||
if err := c.ReadData(&bb); err != nil {
|
||||
err = c.ReadData(&bb)
|
||||
if errors.Is(err, io.EOF) {
|
||||
bb.Reset()
|
||||
// EOF could occur in slow envs, like CI
|
||||
err = c.ReadData(&bb)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error at ReadData: %s", err)
|
||||
}
|
||||
got, err := io.ReadAll(bb.NewReader())
|
||||
|
|
|
@ -3,12 +3,13 @@ package firehose
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
)
|
||||
|
||||
func TestProcessRequestBody(t *testing.T) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue