Merge tag 'v1.110.0' into pmm-6401-read-prometheus-data-files-cpc

This commit is contained in:
f41gh7 2025-01-27 11:52:51 +01:00
commit 7492f18591
No known key found for this signature in database
GPG key ID: 4558311CF775EC72
243 changed files with 3905 additions and 1897 deletions
app
apptest
dashboards
deployment
docs

View file

@ -688,13 +688,13 @@ func ProcessStatsQueryRangeRequest(ctx context.Context, w http.ResponseWriter, r
m := make(map[string]*statsSeries)
var mLock sync.Mutex
timestamp := q.GetTimestamp()
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
clonedColumnNames := make([]string, len(columns))
for i, c := range columns {
clonedColumnNames[i] = strings.Clone(c.Name)
}
for i := range timestamps {
timestamp := q.GetTimestamp()
labels := make([]logstorage.Field, 0, len(byFields))
for j, c := range columns {
if c.Name == "_time" {

View file

@ -28,7 +28,7 @@ func TestParseExtraFilters_Success(t *testing.T) {
// LogsQL filter
f(`foobar`, `foobar`)
f(`foo:bar`, `foo:bar`)
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `(foo:bar or foo:baz) error _time:5m {foo="bar",baz="z"}`)
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `{foo="bar",baz="z"} (foo:bar or foo:baz) error _time:5m`)
}
func TestParseExtraFilters_Failure(t *testing.T) {
@ -77,7 +77,7 @@ func TestParseExtraStreamFilters_Success(t *testing.T) {
// LogsQL filter
f(`foobar`, `foobar`)
f(`foo:bar`, `foo:bar`)
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `(foo:bar or foo:baz) error _time:5m {foo="bar",baz="z"}`)
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `{foo="bar",baz="z"} (foo:bar or foo:baz) error _time:5m`)
}
func TestParseExtraStreamFilters_Failure(t *testing.T) {

View file

@ -1,12 +1,12 @@
{
"files": {
"main.css": "./static/css/main.4aacd559.css",
"main.js": "./static/js/main.5ce54a05.js",
"main.css": "./static/css/main.3134e778.css",
"main.js": "./static/js/main.82cd6930.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.4aacd559.css",
"static/js/main.5ce54a05.js"
"static/css/main.3134e778.css",
"static/js/main.82cd6930.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.5ce54a05.js"></script><link href="./static/css/main.4aacd559.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.82cd6930.js"></script><link href="./static/css/main.3134e778.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -222,8 +222,7 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
isDefault = true
}
rtb := getReadTrackingBody(r.Body, maxRequestBodySizeToRetry.IntN())
defer putReadTrackingBody(rtb)
rtb := newReadTrackingBody(r.Body, maxRequestBodySizeToRetry.IntN())
r.Body = rtb
maxAttempts := up.getBackendsCount()
@ -559,22 +558,11 @@ type readTrackingBody struct {
bufComplete bool
}
func (rtb *readTrackingBody) reset() {
rtb.maxBodySize = 0
rtb.r = nil
rtb.buf = rtb.buf[:0]
rtb.readBuf = nil
rtb.cannotRetry = false
rtb.bufComplete = false
}
func getReadTrackingBody(r io.ReadCloser, maxBodySize int) *readTrackingBody {
v := readTrackingBodyPool.Get()
if v == nil {
v = &readTrackingBody{}
}
rtb := v.(*readTrackingBody)
func newReadTrackingBody(r io.ReadCloser, maxBodySize int) *readTrackingBody {
// do not use sync.Pool there
// since http.RoundTrip may still use request body after return
// See this issue for details https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051
rtb := &readTrackingBody{}
if maxBodySize < 0 {
maxBodySize = 0
}
@ -597,13 +585,6 @@ func (r *zeroReader) Close() error {
return nil
}
func putReadTrackingBody(rtb *readTrackingBody) {
rtb.reset()
readTrackingBodyPool.Put(rtb)
}
var readTrackingBodyPool sync.Pool
// Read implements io.Reader interface.
func (rtb *readTrackingBody) Read(p []byte) (int, error) {
if len(rtb.readBuf) > 0 {

View file

@ -195,7 +195,7 @@ unauthorized_user:
}
responseExpected = `
statusCode=401
The provided authKey doesn't match -reloadAuthKey`
Expected to receive non-empty authKey when -reloadAuthKey is set`
f(cfgStr, requestURL, backendHandler, responseExpected)
if err := reloadAuthKey.Set(origAuthKey); err != nil {
t.Fatalf("unexpected error: %s", err)
@ -545,8 +545,7 @@ func TestReadTrackingBody_RetrySuccess(t *testing.T) {
f := func(s string, maxBodySize int) {
t.Helper()
rtb := getReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
defer putReadTrackingBody(rtb)
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true before reading anything")
@ -581,8 +580,7 @@ func TestReadTrackingBody_RetrySuccessPartialRead(t *testing.T) {
t.Helper()
// Check the case with partial read
rtb := getReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
defer putReadTrackingBody(rtb)
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
for i := 0; i < len(s); i++ {
buf := make([]byte, i)
@ -631,8 +629,7 @@ func TestReadTrackingBody_RetryFailureTooBigBody(t *testing.T) {
f := func(s string, maxBodySize int) {
t.Helper()
rtb := getReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
defer putReadTrackingBody(rtb)
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true before reading anything")
@ -681,8 +678,7 @@ func TestReadTrackingBody_RetryFailureZeroOrNegativeMaxBodySize(t *testing.T) {
f := func(s string, maxBodySize int) {
t.Helper()
rtb := getReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
defer putReadTrackingBody(rtb)
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true before reading anything")

View file

@ -29,13 +29,13 @@ import (
)
var (
deleteAuthKey = flagutil.NewPassword("deleteAuthKey", "authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries. It overrides -httpAuth.*")
deleteAuthKey = flagutil.NewPassword("deleteAuthKey", "authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries. It could be passed via authKey query arg. It overrides -httpAuth.*")
maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", getDefaultMaxConcurrentRequests(), "The maximum number of concurrent search requests. "+
"It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. "+
"See also -search.maxQueueDuration and -search.maxMemoryPerQuery")
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests "+
"limit is reached; see also -search.maxQueryDuration")
resetCacheAuthKey = flagutil.NewPassword("search.resetCacheAuthKey", "Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call. It overrides -httpAuth.*")
resetCacheAuthKey = flagutil.NewPassword("search.resetCacheAuthKey", "Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call. It could be passed via authKey query arg. It overrides -httpAuth.*")
logSlowQueryDuration = flag.Duration("search.logSlowQueryDuration", 5*time.Second, "Log queries with execution time exceeding this value. Zero disables slow query logging. "+
"See also -search.logQueryMemoryUsage")
vmalertProxyURL = flag.String("vmalert.proxyURL", "", "Optional URL for proxying requests to vmalert. For example, if -vmalert.proxyURL=http://vmalert:8880 , then alerting API requests such as /api/v1/rules from Grafana will be proxied to http://vmalert:8880/api/v1/rules")

View file

@ -481,6 +481,8 @@ func DeleteHandler(startTime time.Time, r *http.Request) error {
if err != nil {
return err
}
cp.deadline = searchutils.GetDeadlineForDelete(r, startTime)
if !cp.IsDefaultTimeRange() {
return fmt.Errorf("start=%d and end=%d args aren't supported. Remove these args from the query in order to delete all the matching metrics", cp.start, cp.end)
}

View file

@ -374,8 +374,8 @@ func getRollupConfigs(funcName string, rf rollupFunc, expr metricsql.Expr, start
preFunc := func(_ []float64, _ []int64) {}
funcName = strings.ToLower(funcName)
if rollupFuncsRemoveCounterResets[funcName] {
preFunc = func(values []float64, _ []int64) {
removeCounterResets(values)
preFunc = func(values []float64, timestamps []int64) {
removeCounterResets(values, timestamps, lookbackDelta)
}
}
samplesScannedPerCall := rollupFuncsSamplesScannedPerCall[funcName]
@ -486,8 +486,8 @@ func getRollupConfigs(funcName string, rf rollupFunc, expr metricsql.Expr, start
for _, aggrFuncName := range aggrFuncNames {
if rollupFuncsRemoveCounterResets[aggrFuncName] {
// There is no need to save the previous preFunc, since it is either empty or the same.
preFunc = func(values []float64, _ []int64) {
removeCounterResets(values)
preFunc = func(values []float64, timestamps []int64) {
removeCounterResets(values, timestamps, lookbackDelta)
}
}
rf := rollupAggrFuncs[aggrFuncName]
@ -900,7 +900,7 @@ func getMaxPrevInterval(scrapeInterval int64) int64 {
return scrapeInterval + scrapeInterval/8
}
func removeCounterResets(values []float64) {
func removeCounterResets(values []float64, timestamps []int64, maxStalenessInterval int64) {
// There is no need in handling NaNs here, since they are impossible
// on values from vmstorage.
if len(values) == 0 {
@ -919,6 +919,16 @@ func removeCounterResets(values []float64) {
correction += prevValue
}
}
if i > 0 && maxStalenessInterval > 0 {
gap := timestamps[i] - timestamps[i-1]
if gap > maxStalenessInterval {
// reset correction if gap between samples exceeds staleness interval
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8072
correction = 0
prevValue = v
continue
}
}
prevValue = v
values[i] = v + correction
// Check again, there could be precision error in float operations,

View file

@ -117,31 +117,49 @@ func TestRollupIderivDuplicateTimestamps(t *testing.T) {
}
func TestRemoveCounterResets(t *testing.T) {
removeCounterResets(nil)
removeCounterResets(nil, nil, 0)
values := append([]float64{}, testValues...)
removeCounterResets(values)
timestamps := append([]int64{}, testTimestamps...)
removeCounterResets(values, timestamps, 0)
valuesExpected := []float64{123, 157, 167, 188, 221, 255, 320, 332, 364, 396, 398, 398}
testRowsEqual(t, values, testTimestamps, valuesExpected, testTimestamps)
// removeCounterResets doesn't expect negative values, so it doesn't work properly with them.
values = []float64{-100, -200, -300, -400}
removeCounterResets(values)
valuesExpected = []float64{-100, -100, -100, -100}
timestampsExpected := []int64{0, 1, 2, 3}
removeCounterResets(values, timestampsExpected, 0)
valuesExpected = []float64{-100, -100, -100, -100}
testRowsEqual(t, values, timestampsExpected, valuesExpected, timestampsExpected)
// verify how partial counter reset is handled.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2787
values = []float64{100, 95, 120, 119, 139, 50}
removeCounterResets(values)
valuesExpected = []float64{100, 100, 125, 125, 145, 195}
timestampsExpected = []int64{0, 1, 2, 3, 4, 5}
removeCounterResets(values, timestampsExpected, 0)
valuesExpected = []float64{100, 100, 125, 125, 145, 195}
testRowsEqual(t, values, timestampsExpected, valuesExpected, timestampsExpected)
// verify that staleness interval is respected during resets
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8072
values = []float64{10, 12, 14, 4, 6, 8, 6, 8, 4, 6}
timestamps = []int64{10, 20, 30, 60, 70, 80, 90, 100, 120, 130}
valuesExpected = []float64{10, 12, 14, 4, 6, 8, 14, 16, 4, 6}
removeCounterResets(values, timestamps, 10)
testRowsEqual(t, values, timestamps, valuesExpected, timestamps)
// verify that staleness is respected if there was no counter reset
// but correction was made previously
values = []float64{10, 12, 2, 4}
timestamps = []int64{10, 20, 30, 60}
valuesExpected = []float64{10, 12, 14, 4}
removeCounterResets(values, timestamps, 10)
testRowsEqual(t, values, timestamps, valuesExpected, timestamps)
// verify results always increase monotonically with possible float operations precision error
values = []float64{34.094223, 2.7518, 2.140669, 0.044878, 1.887095, 2.546569, 2.490149, 0.045, 0.035684, 0.062454, 0.058296}
removeCounterResets(values)
timestampsExpected = []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
removeCounterResets(values, timestampsExpected, 0)
var prev float64
for i, v := range values {
if v < prev {
@ -166,7 +184,7 @@ func TestDeltaValues(t *testing.T) {
// remove counter resets
values = append([]float64{}, testValues...)
removeCounterResets(values)
removeCounterResets(values, testTimestamps, 0)
deltaValues(values)
valuesExpected = []float64{34, 10, 21, 33, 34, 65, 12, 32, 32, 2, 0, 0}
testRowsEqual(t, values, testTimestamps, valuesExpected, testTimestamps)
@ -188,7 +206,7 @@ func TestDerivValues(t *testing.T) {
// remove counter resets
values = append([]float64{}, testValues...)
removeCounterResets(values)
removeCounterResets(values, testTimestamps, 0)
derivValues(values, testTimestamps)
valuesExpected = []float64{3400, 1111.111111111111, 1750, 2538.4615384615386, 3090.909090909091, 3611.1111111111113,
6000, 1882.3529411764705, 1777.7777777777778, 400, 0, 0}
@ -219,7 +237,7 @@ func testRollupFunc(t *testing.T, funcName string, args []any, vExpected float64
rfa.timestamps = append(rfa.timestamps, testTimestamps...)
rfa.window = rfa.timestamps[len(rfa.timestamps)-1] - rfa.timestamps[0]
if rollupFuncsRemoveCounterResets[funcName] {
removeCounterResets(rfa.values)
removeCounterResets(rfa.values, rfa.timestamps, 0)
}
for i := 0; i < 5; i++ {
v := rf(&rfa)

View file

@ -15,6 +15,7 @@ import (
var (
maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call")
maxDeleteDuration = flag.Duration("search.maxDeleteDuration", time.Minute*5, "The maximum duration for /api/v1/admin/tsdb/delete_series call")
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution. It can be overridden to a smaller value on a per-query basis via 'timeout' query arg")
maxStatusRequestDuration = flag.Duration("search.maxStatusRequestDuration", time.Minute*5, "The maximum duration for /api/v1/status/* requests")
maxLabelsAPIDuration = flag.Duration("search.maxLabelsAPIDuration", time.Second*5, "The maximum duration for /api/v1/labels, /api/v1/label/.../values and /api/v1/series requests. "+
@ -58,6 +59,12 @@ func GetDeadlineForLabelsAPI(r *http.Request, startTime time.Time) Deadline {
return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxLabelsAPIDuration")
}
// GetDeadlineForDelete returns deadline for the given request to /api/v1/admin/tsdb/delete_series.
func GetDeadlineForDelete(r *http.Request, startTime time.Time) Deadline {
dMax := maxDeleteDuration.Milliseconds()
return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxDeleteDuration")
}
func getDeadlineWithMaxDuration(r *http.Request, startTime time.Time, dMax int64, flagHint string) Deadline {
d, err := httputils.GetDuration(r, "timeout", 0)
if err != nil {

View file

@ -1,13 +1,13 @@
{
"files": {
"main.css": "./static/css/main.63479b72.css",
"main.js": "./static/js/main.256ee243.js",
"main.css": "./static/css/main.af583aad.css",
"main.js": "./static/js/main.1413b18d.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.63479b72.css",
"static/js/main.256ee243.js"
"static/css/main.af583aad.css",
"static/js/main.1413b18d.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.256ee243.js"></script><link href="./static/css/main.63479b72.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.1413b18d.js"></script><link href="./static/css/main.af583aad.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,4 +1,4 @@
FROM golang:1.23.4 AS build-web-stage
FROM golang:1.23.5 AS build-web-stage
COPY build /build
WORKDIR /build

View file

@ -5,7 +5,6 @@ import "./style.scss";
import useStateSearchParams from "../../../../hooks/useStateSearchParams";
import { useSearchParams } from "react-router-dom";
import Button from "../../../Main/Button/Button";
import classNames from "classnames";
import { SettingsIcon, VisibilityIcon, VisibilityOffIcon } from "../../../Main/Icons";
import Tooltip from "../../../Main/Tooltip/Tooltip";
import Popper from "../../../Main/Popper/Popper";
@ -24,27 +23,20 @@ const BarHitsOptions: FC<Props> = ({ onChange }) => {
setFalse: handleCloseOptions,
} = useBoolean(false);
const [graphStyle, setGraphStyle] = useStateSearchParams(GRAPH_STYLES.LINE_STEPPED, "graph");
const [stacked, setStacked] = useStateSearchParams(false, "stacked");
const [fill, setFill] = useStateSearchParams(false, "fill");
const [fill, setFill] = useStateSearchParams("true", "fill");
const [hideChart, setHideChart] = useStateSearchParams(false, "hide_chart");
const options: GraphOptions = useMemo(() => ({
graphStyle,
graphStyle: GRAPH_STYLES.BAR,
stacked,
fill,
fill: fill === "true",
hideChart,
}), [graphStyle, stacked, fill, hideChart]);
const handleChangeGraphStyle = (val: string) => () => {
setGraphStyle(val as GRAPH_STYLES);
searchParams.set("graph", val);
setSearchParams(searchParams);
};
}), [stacked, fill, hideChart]);
const handleChangeFill = (val: boolean) => {
setFill(val);
val ? searchParams.set("fill", "true") : searchParams.delete("fill");
setFill(`${val}`);
searchParams.set("fill", `${val}`);
setSearchParams(searchParams);
};
@ -97,21 +89,6 @@ const BarHitsOptions: FC<Props> = ({ onChange }) => {
title={"Graph settings"}
>
<div className="vm-bar-hits-options-settings">
<div className="vm-bar-hits-options-settings-item vm-bar-hits-options-settings-item_list">
<p className="vm-bar-hits-options-settings-item__title">Graph style:</p>
{Object.values(GRAPH_STYLES).map(style => (
<div
key={style}
className={classNames({
"vm-list-item": true,
"vm-list-item_active": graphStyle === style,
})}
onClick={handleChangeGraphStyle(style)}
>
{style}
</div>
))}
</div>
<div className="vm-bar-hits-options-settings-item">
<Switch
label={"Stacked"}
@ -122,7 +99,7 @@ const BarHitsOptions: FC<Props> = ({ onChange }) => {
<div className="vm-bar-hits-options-settings-item">
<Switch
label={"Fill"}
value={fill}
value={fill === "true"}
onChange={handleChangeFill}
/>
</div>

View file

@ -11,12 +11,12 @@
&-settings {
display: grid;
align-items: flex-start;
gap: $padding-global;
min-width: 200px;
gap: $padding-global;
padding-bottom: $padding-global;
&-item {
border-bottom: $border-divider;
padding: 0 $padding-global $padding-global;
padding: 0 $padding-global;
&_list {
padding: 0;

View file

@ -124,7 +124,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
};
useEffect(() => {
setOpenAutocomplete(!!AutocompleteEl);
setOpenAutocomplete(!!AutocompleteEl && autocompleteQuick);
}, [autocompleteQuick]);
useEffect(() => {

View file

@ -0,0 +1,246 @@
import React, { FC, useMemo, useState } from "preact/compat";
import useBoolean from "../../../hooks/useBoolean";
import { RestartIcon, SettingsIcon } from "../../Main/Icons";
import Button from "../../Main/Button/Button";
import Modal from "../../Main/Modal/Modal";
import Tooltip from "../../Main/Tooltip/Tooltip";
import { Logs } from "../../../api/types";
import Select from "../../Main/Select/Select";
import { useSearchParams } from "react-router-dom";
import "./style.scss";
import Switch from "../../Main/Switch/Switch";
import TextField from "../../Main/TextField/TextField";
import dayjs from "dayjs";
import Hyperlink from "../../Main/Hyperlink/Hyperlink";
import {
LOGS_DISPLAY_FIELDS,
LOGS_GROUP_BY,
LOGS_DATE_FORMAT,
LOGS_URL_PARAMS,
WITHOUT_GROUPING
} from "../../../constants/logs";
const {
GROUP_BY,
NO_WRAP_LINES,
COMPACT_GROUP_HEADER,
DISPLAY_FIELDS,
DATE_FORMAT
} = LOGS_URL_PARAMS;
const title = "Group view settings";
interface Props {
logs: Logs[];
}
const GroupLogsConfigurators: FC<Props> = ({ logs }) => {
const [searchParams, setSearchParams] = useSearchParams();
const groupBy = searchParams.get(GROUP_BY) || LOGS_GROUP_BY;
const noWrapLines = searchParams.get(NO_WRAP_LINES) === "true";
const compactGroupHeader = searchParams.get(COMPACT_GROUP_HEADER) === "true";
const displayFieldsString = searchParams.get(DISPLAY_FIELDS) || "";
const displayFields = displayFieldsString ? displayFieldsString.split(",") : [];
const [dateFormat, setDateFormat] = useState(searchParams.get(DATE_FORMAT) || LOGS_DATE_FORMAT);
const [errorFormat, setErrorFormat] = useState("");
const isGroupChanged = groupBy !== LOGS_GROUP_BY;
const isDisplayFieldsChanged = displayFields.length > 0;
const isTimeChanged = searchParams.get(DATE_FORMAT) !== LOGS_DATE_FORMAT;
const hasChanges = [
isGroupChanged,
isDisplayFieldsChanged,
noWrapLines,
compactGroupHeader,
isTimeChanged
].some(Boolean);
const logsKeys = useMemo(() => {
const excludeKeys = ["_msg", "_time"];
const uniqKeys = Array.from(new Set(logs.map(l => Object.keys(l)).flat()));
return uniqKeys.filter(k => !excludeKeys.includes(k));
}, [logs]);
const {
value: openModal,
toggle: toggleOpen,
setFalse: handleClose,
} = useBoolean(false);
const handleSelectGroupBy = (key: string) => {
searchParams.set(GROUP_BY, key);
setSearchParams(searchParams);
};
const handleSelectDisplayField = (value: string) => {
const prev = displayFields;
const newDisplayFields = prev.includes(value) ? prev.filter(v => v !== value) : [...prev, value];
searchParams.set(DISPLAY_FIELDS, newDisplayFields.join(","));
setSearchParams(searchParams);
};
const handleResetDisplayFields = () => {
searchParams.delete(DISPLAY_FIELDS);
setSearchParams(searchParams);
};
const toggleWrapLines = () => {
searchParams.set(NO_WRAP_LINES, String(!noWrapLines));
setSearchParams(searchParams);
};
const toggleCompactGroupHeader = () => {
searchParams.set(COMPACT_GROUP_HEADER, String(!compactGroupHeader));
setSearchParams(searchParams);
};
const handleChangeDateFormat = (format: string) => {
const date = new Date();
if (!dayjs(date, format, true).isValid()) {
setErrorFormat("Invalid date format");
}
setDateFormat(format);
};
const handleSaveAndClose = () => {
searchParams.set(DATE_FORMAT, dateFormat);
setSearchParams(searchParams);
handleClose();
};
const tooltipContent = () => {
if (!hasChanges) return title;
return (
<div className="vm-group-logs-configurator__tooltip">
<p>{title}</p>
<hr/>
<ul>
{isGroupChanged && <li>Group by <code>{`"${groupBy}"`}</code></li>}
{isDisplayFieldsChanged && <li>Display fields: {displayFields.length || 1}</li>}
{noWrapLines && <li>Single-line text is enabled</li>}
{compactGroupHeader && <li>Compact group header is enabled</li>}
{isTimeChanged && <li>Date format: <code>{dateFormat}</code></li>}
</ul>
</div>
);
};
return (
<>
<div className="vm-group-logs-configurator-button">
<Tooltip title={tooltipContent()}>
<Button
variant="text"
startIcon={<SettingsIcon/>}
onClick={toggleOpen}
ariaLabel={title}
/>
</Tooltip>
{hasChanges && <span className="vm-group-logs-configurator-button__marker"/>}
</div>
{openModal && (
<Modal
title={title}
onClose={handleSaveAndClose}
>
<div className="vm-group-logs-configurator">
<div className="vm-group-logs-configurator-item">
<Select
value={groupBy}
list={[WITHOUT_GROUPING, ...logsKeys]}
label="Group by field"
placeholder="Group by field"
onChange={handleSelectGroupBy}
searchable
/>
<Tooltip title={"Reset grouping"}>
<Button
variant="text"
color="primary"
startIcon={<RestartIcon/>}
onClick={() => handleSelectGroupBy(LOGS_GROUP_BY)}
/>
</Tooltip>
<span className="vm-group-logs-configurator-item__info">
Select a field to group logs by (default: <code>{LOGS_GROUP_BY}</code>).
</span>
</div>
<div className="vm-group-logs-configurator-item">
<Select
value={displayFields}
list={logsKeys}
label="Display fields"
placeholder="Display fields"
onChange={handleSelectDisplayField}
searchable
/>
<Tooltip title={"Clear fields"}>
<Button
variant="text"
color="primary"
startIcon={<RestartIcon/>}
onClick={handleResetDisplayFields}
/>
</Tooltip>
<span className="vm-group-logs-configurator-item__info">
Select fields to display instead of the message (default: <code>{LOGS_DISPLAY_FIELDS}</code>).
</span>
</div>
<div className="vm-group-logs-configurator-item">
<TextField
autofocus
label="Date format"
value={dateFormat}
onChange={handleChangeDateFormat}
error={errorFormat}
/>
<Tooltip title={"Reset format"}>
<Button
variant="text"
color="primary"
startIcon={<RestartIcon/>}
onClick={() => setDateFormat(LOGS_DATE_FORMAT)}
/>
</Tooltip>
<span className="vm-group-logs-configurator-item__info vm-group-logs-configurator-item__info_input">
Set the date format (e.g., <code>YYYY-MM-DD HH:mm:ss</code>).
Learn more in <Hyperlink
href="https://day.js.org/docs/en/display/format"
>this documentation</Hyperlink>. <br/>
Your current date format: <code>{dayjs().format(dateFormat || LOGS_DATE_FORMAT)}</code>
</span>
</div>
<div className="vm-group-logs-configurator-item">
<Switch
value={noWrapLines}
onChange={toggleWrapLines}
label="Single-line message"
/>
<span className="vm-group-logs-configurator-item__info">
Displays message in a single line and truncates it with an ellipsis if it exceeds the available space
</span>
</div>
<div className="vm-group-logs-configurator-item">
<Switch
value={compactGroupHeader}
onChange={toggleCompactGroupHeader}
label="Compact group header"
/>
<span className="vm-group-logs-configurator-item__info">
Shows group headers in one line with a &quot;+N more&quot; badge for extra fields.
</span>
</div>
</div>
</Modal>
)}
</>
);
};
export default GroupLogsConfigurators;

View file

@ -0,0 +1,48 @@
@use "src/styles/variables" as *;
.vm-group-logs-configurator {
display: grid;
gap: calc($padding-large * 2);
padding: $padding-global 0;
width: 600px;
&-item {
display: grid;
grid-template-columns: 1fr 31px;
align-items: center;
justify-content: stretch;
gap: 0 $padding-small;
&__info {
margin-top: $padding-small;
grid-column: 1/span 2;
font-size: $font-size-small;
color: $color-text-secondary;
line-height: 130%;
&_input {
margin-top: 0;
}
}
}
&-button {
position: relative;
&__marker {
position: absolute;
top: 6px;
left: 6px;
width: 5px;
height: 5px;
border-radius: 50%;
background-color: $color-secondary;
}
}
&__tooltip {
ul {
list-style-position: inside;
}
}
}

View file

@ -30,6 +30,10 @@ const Accordion: FC<AccordionProps> = ({
onChange && onChange(isOpen);
}, [isOpen]);
useEffect(() => {
setIsOpen(defaultExpanded);
}, [defaultExpanded]);
return (
<>
<header

View file

@ -67,11 +67,11 @@ const Modal: FC<ModalProps> = ({
})}
onMouseDown={onClose}
>
<div className="vm-modal-content">
<div
className="vm-modal-content-header"
onMouseDown={handleMouseDown}
>
<div
className="vm-modal-content"
onMouseDown={handleMouseDown}
>
<div className="vm-modal-content-header">
{title && (
<div className="vm-modal-content-header__title">
{title}
@ -91,7 +91,6 @@ const Modal: FC<ModalProps> = ({
{/* tabIndex to fix Ctrl-A */}
<div
className="vm-modal-content-body"
onMouseDown={handleMouseDown}
tabIndex={0}
>
{children}

View file

@ -33,9 +33,9 @@
align-items: center;
justify-content: center;
background-color: $color-hover-black;
padding: 2px 2px 2px 6px;
padding: 2px 2px 2px $padding-small;
border-radius: $border-radius-small;
font-size: $font-size;
font-size: $font-size-small;
line-height: $font-size;
max-width: 100%;

View file

@ -11,7 +11,7 @@ import useBoolean from "../../../hooks/useBoolean";
import TextField from "../../Main/TextField/TextField";
import { KeyboardEvent, useState } from "react";
import Modal from "../../Main/Modal/Modal";
import { getFromStorage, removeFromStorage, saveToStorage } from "../../../utils/storage";
import { useSearchParams } from "react-router-dom";
const title = "Table settings";
@ -30,6 +30,8 @@ const TableSettings: FC<TableSettingsProps> = ({
onChangeColumns,
toggleTableCompact
}) => {
const [searchParams, setSearchParams] = useSearchParams();
const buttonRef = useRef<HTMLDivElement>(null);
const {
@ -38,11 +40,6 @@ const TableSettings: FC<TableSettingsProps> = ({
setFalse: handleClose,
} = useBoolean(false);
const {
value: saveColumns,
toggle: toggleSaveColumns,
} = useBoolean(Boolean(getFromStorage("TABLE_COLUMNS")));
const [searchColumn, setSearchColumn] = useState("");
const [indexFocusItem, setIndexFocusItem] = useState(-1);
@ -60,15 +57,34 @@ const TableSettings: FC<TableSettingsProps> = ({
return filteredColumns.every(col => selectedColumns.includes(col));
}, [selectedColumns, filteredColumns]);
const handleChangeDisplayColumns = (displayColumns: string[]) => {
onChangeColumns(displayColumns);
const updatedParams = new URLSearchParams(searchParams.toString());
const isAllCheck = displayColumns.length === columns.length;
if (isAllCheck) {
updatedParams.delete("columns");
} else {
updatedParams.set("columns", displayColumns.map(encodeURIComponent).join(","));
}
setSearchParams(updatedParams);
};
const handleChange = (key: string) => {
onChangeColumns(selectedColumns.includes(key) ? selectedColumns.filter(col => col !== key) : [...selectedColumns, key]);
const displayColumns = selectedColumns.includes(key)
? selectedColumns.filter(col => col !== key)
: [...selectedColumns, key];
handleChangeDisplayColumns(displayColumns);
};
const toggleAllColumns = () => {
if (isAllChecked) {
onChangeColumns(selectedColumns.filter(col => !filteredColumns.includes(col)));
handleChangeDisplayColumns(selectedColumns.filter(col => !filteredColumns.includes(col)));
} else {
onChangeColumns(filteredColumns);
handleChangeDisplayColumns(filteredColumns);
}
};
@ -95,22 +111,16 @@ const TableSettings: FC<TableSettingsProps> = ({
};
useEffect(() => {
if (arrayEquals(columns, selectedColumns) || saveColumns) return;
if (arrayEquals(columns, selectedColumns) || searchParams.has("columns")) return;
onChangeColumns(columns);
}, [columns]);
useEffect(() => {
if (!saveColumns) {
removeFromStorage(["TABLE_COLUMNS"]);
} else if (selectedColumns.length) {
saveToStorage("TABLE_COLUMNS", selectedColumns.join(","));
}
}, [saveColumns, selectedColumns]);
useEffect(() => {
const saveColumns = getFromStorage("TABLE_COLUMNS") as string;
if (!saveColumns) return;
onChangeColumns(saveColumns.split(","));
const hasColumns = searchParams.has("columns");
if (!hasColumns) return;
const columnsParam = searchParams.get("columns") || "";
const columnsArray = columnsParam.split(",").map(decodeURIComponent).filter(Boolean);
onChangeColumns(columnsArray);
}, []);
return (
@ -183,19 +193,6 @@ const TableSettings: FC<TableSettingsProps> = ({
</div>
))}
</div>
<div className="vm-table-settings-modal-preserve">
<Checkbox
checked={saveColumns}
onChange={toggleSaveColumns}
label={"Preserve column settings"}
disabled={tableCompact}
color={"primary"}
/>
<p className="vm-table-settings-modal-preserve__info">
This label indicates that when the checkbox is activated,
the current column configurations will not be reset.
</p>
</div>
</div>
</div>
<div className="vm-table-settings-modal-section">

View file

@ -3,6 +3,7 @@
.vm-table-settings {
&-modal {
.vm-modal-content-body {
min-width: clamp(300px, 600px, 90vw);
padding: 0;
}
@ -83,16 +84,5 @@
}
}
}
&-preserve {
padding: $padding-global;
&__info {
padding-top: $padding-small;
font-size: $font-size-small;
color: $color-text-secondary;
line-height: 130%;
}
}
}
}

View file

@ -1,2 +1,21 @@
import { DATE_TIME_FORMAT } from "./date";
export const LOGS_ENTRIES_LIMIT = 50;
export const LOGS_BARS_VIEW = 100;
// "Ungrouped" is a string that is used as a value for the "groupBy" parameter.
export const WITHOUT_GROUPING = "Ungrouped";
// Default values for the logs configurators.
export const LOGS_GROUP_BY = "_stream";
export const LOGS_DISPLAY_FIELDS = "_msg";
export const LOGS_DATE_FORMAT = `${DATE_TIME_FORMAT}.SSS`;
// URL parameters for the logs page.
export const LOGS_URL_PARAMS = {
GROUP_BY: "groupBy",
DISPLAY_FIELDS: "displayFields",
NO_WRAP_LINES: "noWrapLines",
COMPACT_GROUP_HEADER: "compactGroupHeader",
DATE_FORMAT: "dateFormat",
};

View file

@ -20,7 +20,7 @@ const useClickOutside = <T extends HTMLElement = HTMLElement>(
handler(event); // Call the handler only if the click is outside of the element passed.
}, [ref, handler]);
useEventListener("mousedown", listener);
useEventListener("mouseup", listener);
useEventListener("touchstart", listener);
};

View file

@ -1,24 +1,19 @@
import React, { FC, useCallback, useEffect, useMemo, useRef } from "preact/compat";
import { MouseEvent, useState } from "react";
import React, { FC, useCallback, useEffect, useMemo } from "preact/compat";
import { useState } from "react";
import "./style.scss";
import { Logs } from "../../../api/types";
import Accordion from "../../../components/Main/Accordion/Accordion";
import { groupByMultipleKeys } from "../../../utils/array";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
import GroupLogsItem from "./GroupLogsItem";
import { useAppState } from "../../../state/common/StateContext";
import classNames from "classnames";
import Button from "../../../components/Main/Button/Button";
import { CollapseIcon, ExpandIcon, StorageIcon } from "../../../components/Main/Icons";
import Popper from "../../../components/Main/Popper/Popper";
import TextField from "../../../components/Main/TextField/TextField";
import useBoolean from "../../../hooks/useBoolean";
import useStateSearchParams from "../../../hooks/useStateSearchParams";
import { CollapseIcon, ExpandIcon } from "../../../components/Main/Icons";
import { useSearchParams } from "react-router-dom";
import { getStreamPairs } from "../../../utils/logs";
const WITHOUT_GROUPING = "No Grouping";
import GroupLogsConfigurators
from "../../../components/LogsConfigurators/GroupLogsConfigurators/GroupLogsConfigurators";
import GroupLogsHeader from "./GroupLogsHeader";
import { LOGS_DISPLAY_FIELDS, LOGS_GROUP_BY, LOGS_URL_PARAMS, WITHOUT_GROUPING } from "../../../constants/logs";
interface Props {
logs: Logs[];
@ -26,47 +21,22 @@ interface Props {
}
const GroupLogs: FC<Props> = ({ logs, settingsRef }) => {
const { isDarkTheme } = useAppState();
const copyToClipboard = useCopyToClipboard();
const [searchParams, setSearchParams] = useSearchParams();
const [searchParams] = useSearchParams();
const [expandGroups, setExpandGroups] = useState<boolean[]>([]);
const [groupBy, setGroupBy] = useStateSearchParams("_stream", "groupBy");
const [copied, setCopied] = useState<string | null>(null);
const [searchKey, setSearchKey] = useState("");
const optionsButtonRef = useRef<HTMLDivElement>(null);
const {
value: openOptions,
toggle: toggleOpenOptions,
setFalse: handleCloseOptions,
} = useBoolean(false);
const groupBy = searchParams.get(LOGS_URL_PARAMS.GROUP_BY) || LOGS_GROUP_BY;
const displayFieldsString = searchParams.get(LOGS_URL_PARAMS.DISPLAY_FIELDS) || LOGS_DISPLAY_FIELDS;
const displayFields = displayFieldsString.split(",");
const expandAll = useMemo(() => expandGroups.every(Boolean), [expandGroups]);
const logsKeys = useMemo(() => {
const excludeKeys = ["_msg", "_time"];
const uniqKeys = Array.from(new Set(logs.map(l => Object.keys(l)).flat()));
return [WITHOUT_GROUPING, ...uniqKeys.filter(k => !excludeKeys.includes(k))];
}, [logs]);
const filteredLogsKeys = useMemo(() => {
if (!searchKey) return logsKeys;
try {
const regexp = new RegExp(searchKey, "i");
return logsKeys.filter(item => regexp.test(item))
.sort((a, b) => (a.match(regexp)?.index || 0) - (b.match(regexp)?.index || 0));
} catch (e) {
return [];
}
}, [logsKeys, searchKey]);
const groupData = useMemo(() => {
return groupByMultipleKeys(logs, [groupBy]).map((item) => {
const streamValue = item.values[0]?.[groupBy] || "";
const pairs = getStreamPairs(streamValue);
// values sorting by time
const values = item.values.sort((a,b) => new Date(b._time).getTime() - new Date(a._time).getTime());
const values = item.values.sort((a, b) => new Date(b._time).getTime() - new Date(a._time).getTime());
return {
keys: item.keys,
keysString: item.keys.join(""),
@ -76,23 +46,6 @@ const GroupLogs: FC<Props> = ({ logs, settingsRef }) => {
}).sort((a, b) => a.keysString.localeCompare(b.keysString)); // groups sorting
}, [logs, groupBy]);
const handleClickByPair = (value: string) => async (e: MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
const isKeyValue = /(.+)?=(".+")/.test(value);
const copyValue = isKeyValue ? `${value.replace(/=/, ": ")}` : `${groupBy}: "${value}"`;
const isCopied = await copyToClipboard(copyValue);
if (isCopied) {
setCopied(value);
}
};
const handleSelectGroupBy = (key: string) => () => {
setGroupBy(key);
searchParams.set("groupBy", key);
setSearchParams(searchParams);
handleCloseOptions();
};
const handleToggleExpandAll = useCallback(() => {
setExpandGroups(new Array(groupData.length).fill(!expandAll));
}, [expandAll, groupData.length]);
@ -105,11 +58,6 @@ const GroupLogs: FC<Props> = ({ logs, settingsRef }) => {
});
}, []);
useEffect(() => {
if (copied === null) return;
const timeout = setTimeout(() => setCopied(null), 2000);
return () => clearTimeout(timeout);
}, [copied]);
useEffect(() => {
setExpandGroups(new Array(groupData.length).fill(true));
@ -124,38 +72,16 @@ const GroupLogs: FC<Props> = ({ logs, settingsRef }) => {
key={item.keysString}
>
<Accordion
key={String(expandGroups[i])}
defaultExpanded={expandGroups[i]}
onChange={handleChangeExpand(i)}
title={groupBy !== WITHOUT_GROUPING && (
<div className="vm-group-logs-section-keys">
<span className="vm-group-logs-section-keys__title">Group by <code>{groupBy}</code>:</span>
{item.pairs.map((pair) => (
<Tooltip
title={copied === pair ? "Copied" : "Copy to clipboard"}
key={`${item.keysString}_${pair}`}
placement={"top-center"}
>
<div
className={classNames({
"vm-group-logs-section-keys__pair": true,
"vm-group-logs-section-keys__pair_dark": isDarkTheme
})}
onClick={handleClickByPair(pair)}
>
{pair}
</div>
</Tooltip>
))}
<span className="vm-group-logs-section-keys__count">{item.values.length} entries</span>
</div>
)}
title={groupBy !== WITHOUT_GROUPING && <GroupLogsHeader group={item}/>}
>
<div className="vm-group-logs-section-rows">
{item.values.map((value) => (
<GroupLogsItem
key={`${value._msg}${value._time}`}
log={value}
displayFields={displayFields}
/>
))}
</div>
@ -175,47 +101,7 @@ const GroupLogs: FC<Props> = ({ logs, settingsRef }) => {
ariaLabel={expandAll ? "Collapse All" : "Expand All"}
/>
</Tooltip>
<Tooltip title={"Group by"}>
<div ref={optionsButtonRef}>
<Button
variant="text"
startIcon={<StorageIcon/>}
onClick={toggleOpenOptions}
ariaLabel={"Group by"}
/>
</div>
</Tooltip>
{
<Popper
open={openOptions}
placement="bottom-right"
onClose={handleCloseOptions}
buttonRef={optionsButtonRef}
>
<div className="vm-list vm-group-logs-header-keys">
<div className="vm-group-logs-header-keys__search">
<TextField
label="Search key"
value={searchKey}
onChange={setSearchKey}
type="search"
/>
</div>
{filteredLogsKeys.map(id => (
<div
className={classNames({
"vm-list-item": true,
"vm-list-item_active": id === groupBy
})}
key={id}
onClick={handleSelectGroupBy(id)}
>
{id}
</div>
))}
</div>
</Popper>
}
<GroupLogsConfigurators logs={logs}/>
</div>
), settingsRef.current)}
</>

View file

@ -1,8 +1,10 @@
import React, { FC, memo, useCallback, useEffect, useState } from "preact/compat";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
import Button from "../../../components/Main/Button/Button";
import { CopyIcon } from "../../../components/Main/Icons";
import { CopyIcon, StorageIcon, VisibilityIcon } from "../../../components/Main/Icons";
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
import { useSearchParams } from "react-router-dom";
import { LOGS_GROUP_BY, LOGS_URL_PARAMS } from "../../../constants/logs";
interface Props {
field: string;
@ -11,8 +13,17 @@ interface Props {
const GroupLogsFieldRow: FC<Props> = ({ field, value }) => {
const copyToClipboard = useCopyToClipboard();
const [searchParams, setSearchParams] = useSearchParams();
const [copied, setCopied] = useState<boolean>(false);
const groupBy = searchParams.get(LOGS_URL_PARAMS.GROUP_BY) || LOGS_GROUP_BY;
const displayFieldsString = searchParams.get(LOGS_URL_PARAMS.DISPLAY_FIELDS) || "";
const displayFields = displayFieldsString ? displayFieldsString.split(",") : [];
const isSelectedField = displayFields.includes(field);
const isGroupByField = groupBy === field;
const handleCopy = useCallback(async () => {
if (copied) return;
try {
@ -23,6 +34,18 @@ const GroupLogsFieldRow: FC<Props> = ({ field, value }) => {
}
}, [copied, copyToClipboard]);
const handleSelectDisplayField = () => {
const prev = displayFields;
const newDisplayFields = prev.includes(field) ? prev.filter(v => v !== field) : [...prev, field];
searchParams.set(LOGS_URL_PARAMS.DISPLAY_FIELDS, newDisplayFields.join(","));
setSearchParams(searchParams);
};
const handleSelectGroupBy = () => {
isGroupByField ? searchParams.delete(LOGS_URL_PARAMS.GROUP_BY) : searchParams.set(LOGS_URL_PARAMS.GROUP_BY, field);
setSearchParams(searchParams);
};
useEffect(() => {
if (copied === null) return;
const timeout = setTimeout(() => setCopied(false), 2000);
@ -35,6 +58,7 @@ const GroupLogsFieldRow: FC<Props> = ({ field, value }) => {
<div className="vm-group-logs-row-fields-item-controls__wrapper">
<Tooltip title={copied ? "Copied" : "Copy to clipboard"}>
<Button
className="vm-group-logs-row-fields-item-controls__button"
variant="text"
color="gray"
size="small"
@ -43,6 +67,34 @@ const GroupLogsFieldRow: FC<Props> = ({ field, value }) => {
ariaLabel="copy to clipboard"
/>
</Tooltip>
<Tooltip
key={`${field}_${isSelectedField}_${isGroupByField}`}
title={isSelectedField ? "Hide this field" : "Show this field instead of the message"}
>
<Button
className="vm-group-logs-row-fields-item-controls__button"
variant="text"
color={isSelectedField ? "secondary" : "gray"}
size="small"
startIcon={isSelectedField ? <VisibilityIcon/> : <VisibilityIcon/>}
onClick={handleSelectDisplayField}
ariaLabel="copy to clipboard"
/>
</Tooltip>
<Tooltip
key={`${field}_${isSelectedField}_${isGroupByField}`}
title={isGroupByField ? "Ungroup this field" : "Group by this field"}
>
<Button
className="vm-group-logs-row-fields-item-controls__button"
variant="text"
color={isGroupByField ? "secondary" : "gray"}
size="small"
startIcon={<StorageIcon/>}
onClick={handleSelectGroupBy}
ariaLabel="copy to clipboard"
/>
</Tooltip>
</div>
</td>
<td className="vm-group-logs-row-fields-item__key">{field}</td>

View file

@ -0,0 +1,127 @@
import React, { FC, useCallback, useEffect, useRef } from "preact/compat";
import classNames from "classnames";
import { useSearchParams } from "react-router-dom";
import { MouseEvent, useState } from "react";
import { useAppState } from "../../../state/common/StateContext";
import { Logs } from "../../../api/types";
import useEventListener from "../../../hooks/useEventListener";
import Popper from "../../../components/Main/Popper/Popper";
import useBoolean from "../../../hooks/useBoolean";
import GroupLogsHeaderItem from "./GroupLogsHeaderItem";
import { LOGS_GROUP_BY, LOGS_URL_PARAMS } from "../../../constants/logs";
interface Props {
group: {
keys: string[]
keysString: string
values: Logs[]
pairs: string[]
};
}
const GroupLogsHeader: FC<Props> = ({ group }) => {
const { isDarkTheme } = useAppState();
const [searchParams] = useSearchParams();
const containerRef = useRef<HTMLDivElement>(null);
const moreRef = useRef<HTMLDivElement>(null);
const {
value: openMore,
toggle: handleToggleMore,
setFalse: handleCloseMore,
} = useBoolean(false);
const [hideParisCount, setHideParisCount] = useState<number>(0);
const groupBy = searchParams.get(LOGS_URL_PARAMS.GROUP_BY) || LOGS_GROUP_BY;
const compactGroupHeader = searchParams.get(LOGS_URL_PARAMS.COMPACT_GROUP_HEADER) === "true";
const pairs = group.pairs;
const hideAboveIndex = pairs.length - hideParisCount - 1;
const handleClickMore = (e: MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
handleToggleMore();
};
const calcVisiblePairsCount = useCallback(() => {
if (!compactGroupHeader || !containerRef.current) {
setHideParisCount(0);
return;
}
const container = containerRef.current;
const containerSize = container.getBoundingClientRect();
const selector = ".vm-group-logs-section-keys__pair:not(.vm-group-logs-section-keys__pair_more)";
const children = Array.from(container.querySelectorAll(selector));
let count = 0;
for (const child of children) {
const { right } = (child as HTMLElement).getBoundingClientRect();
if ((right + 220) > containerSize.width) {
count++;
}
}
setHideParisCount(count);
}, [compactGroupHeader, containerRef]);
useEffect(calcVisiblePairsCount, [group.pairs, compactGroupHeader, containerRef]);
useEventListener("resize", calcVisiblePairsCount);
return (
<div
className={classNames({
"vm-group-logs-section-keys": true,
"vm-group-logs-section-keys_compact": compactGroupHeader,
})}
ref={containerRef}
>
<span className="vm-group-logs-section-keys__title">Group by <code>{groupBy}</code>:</span>
{pairs.map((pair, i) => (
<GroupLogsHeaderItem
key={`${group.keysString}_${pair}`}
pair={pair}
isHide={hideParisCount ? i > hideAboveIndex : false}
/>
))}
{hideParisCount > 0 && (
<>
<div
className={classNames({
"vm-group-logs-section-keys__pair": true,
"vm-group-logs-section-keys__pair_more": true,
"vm-group-logs-section-keys__pair_dark": isDarkTheme
})}
ref={moreRef}
onClick={handleClickMore}
>
+{hideParisCount} more
</div>
<Popper
open={openMore}
buttonRef={moreRef}
placement="bottom-left"
onClose={handleCloseMore}
>
<div className="vm-group-logs-section-keys vm-group-logs-section-keys_popper">
{pairs.slice(hideAboveIndex + 1).map((pair) => (
<GroupLogsHeaderItem
key={`${group.keysString}_${pair}`}
pair={pair}
/>
))}
</div>
</Popper>
</>
)}
<span className="vm-group-logs-section-keys__count">{group.values.length} entries</span>
</div>
)
;
};
export default GroupLogsHeader;

View file

@ -0,0 +1,59 @@
import React, { FC, useEffect } from "preact/compat";
import { useAppState } from "../../../state/common/StateContext";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
import classNames from "classnames";
import { MouseEvent, useState } from "react";
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
import { useSearchParams } from "react-router-dom";
import { LOGS_GROUP_BY, LOGS_URL_PARAMS } from "../../../constants/logs";
interface Props {
pair: string;
isHide?: boolean;
}
const GroupLogsHeaderItem: FC<Props> = ({ pair, isHide }) => {
const { isDarkTheme } = useAppState();
const copyToClipboard = useCopyToClipboard();
const [searchParams] = useSearchParams();
const [copied, setCopied] = useState<string | null>(null);
const groupBy = searchParams.get(LOGS_URL_PARAMS.GROUP_BY) || LOGS_GROUP_BY;
const handleClickByPair = (value: string) => async (e: MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
const isKeyValue = /(.+)?=(".+")/.test(value);
const copyValue = isKeyValue ? `${value.replace(/=/, ": ")}` : `${groupBy}: "${value}"`;
const isCopied = await copyToClipboard(copyValue);
if (isCopied) {
setCopied(value);
}
};
useEffect(() => {
if (copied === null) return;
const timeout = setTimeout(() => setCopied(null), 2000);
return () => clearTimeout(timeout);
}, [copied]);
return (
<Tooltip
title={copied === pair ? "Copied" : "Copy to clipboard"}
placement={"top-center"}
>
<div
className={classNames({
"vm-group-logs-section-keys__pair": true,
"vm-group-logs-section-keys__pair_hide": isHide,
"vm-group-logs-section-keys__pair_dark": isDarkTheme
})}
onClick={handleClickByPair(pair)}
>
{pair}
</div>
</Tooltip>
);
};
export default GroupLogsHeaderItem;

View file

@ -6,28 +6,34 @@ import { ArrowDownIcon } from "../../../components/Main/Icons";
import classNames from "classnames";
import { useLogsState } from "../../../state/logsPanel/LogsStateContext";
import dayjs from "dayjs";
import { DATE_TIME_FORMAT } from "../../../constants/date";
import { useTimeState } from "../../../state/time/TimeStateContext";
import GroupLogsFieldRow from "./GroupLogsFieldRow";
import { marked } from "marked";
import { useSearchParams } from "react-router-dom";
import { LOGS_DATE_FORMAT, LOGS_URL_PARAMS } from "../../../constants/logs";
interface Props {
log: Logs;
displayFields?: string[];
}
const GroupLogsItem: FC<Props> = ({ log }) => {
const GroupLogsItem: FC<Props> = ({ log, displayFields = ["_msg"] }) => {
const {
value: isOpenFields,
toggle: toggleOpenFields,
} = useBoolean(false);
const [searchParams] = useSearchParams();
const { markdownParsing } = useLogsState();
const { timezone } = useTimeState();
const noWrapLines = searchParams.get(LOGS_URL_PARAMS.NO_WRAP_LINES) === "true";
const dateFormat = searchParams.get(LOGS_URL_PARAMS.DATE_FORMAT) || LOGS_DATE_FORMAT;
const formattedTime = useMemo(() => {
if (!log._time) return "";
return dayjs(log._time).tz().format(`${DATE_TIME_FORMAT}.SSS`);
}, [log._time, timezone]);
return dayjs(log._time).tz().format(dateFormat);
}, [log._time, timezone, dateFormat]);
const formattedMarkdown = useMemo(() => {
if (!markdownParsing || !log._msg) return "";
@ -38,6 +44,14 @@ const GroupLogsItem: FC<Props> = ({ log }) => {
const hasFields = fields.length > 0;
const displayMessage = useMemo(() => {
if (displayFields.length) {
return displayFields.filter(field => log[field]).map((field, i) => (
<span
className="vm-group-logs-row-content__sub-msg"
key={field + i}
>{log[field]}</span>
));
}
if (log._msg) return log._msg;
if (!hasFields) return;
const dataObject = fields.reduce<{ [key: string]: string }>((obj, [key, value]) => {
@ -45,7 +59,7 @@ const GroupLogsItem: FC<Props> = ({ log }) => {
return obj;
}, {});
return JSON.stringify(dataObject);
}, [log, fields, hasFields]);
}, [log, fields, hasFields, displayFields]);
return (
<div className="vm-group-logs-row">
@ -76,7 +90,8 @@ const GroupLogsItem: FC<Props> = ({ log }) => {
className={classNames({
"vm-group-logs-row-content__msg": true,
"vm-group-logs-row-content__msg_empty-msg": !log._msg,
"vm-group-logs-row-content__msg_missing": !displayMessage
"vm-group-logs-row-content__msg_missing": !displayMessage,
"vm-group-logs-row-content__msg_single-line": noWrapLines,
})}
dangerouslySetInnerHTML={(markdownParsing && formattedMarkdown) ? { __html: formattedMarkdown } : undefined}
>

View file

@ -1,5 +1,7 @@
@use "src/styles/variables" as *;
$font-size-logs: var(--font-size-logs, $font-size-small);
.vm-group-logs {
margin-top: calc(-1 * $padding-medium);
@ -19,22 +21,44 @@
}
&-section {
border-bottom: $border-divider;
&-keys {
position: relative;
display: flex;
align-items: center;
flex-wrap: wrap;
gap: $padding-small;
border-bottom: $border-divider;
padding: $padding-small 0;
padding: $padding-small 120px $padding-small 0;
font-size: $font-size-logs;
&_compact {
flex-wrap: nowrap;
overflow: hidden;
}
&_popper {
display: flex;
flex-wrap: nowrap;
flex-direction: column;
align-items: flex-start;
justify-content: flex-start;
padding: $padding-global;
max-height: 400px;
overflow: auto;
}
&__title {
font-weight: bold;
white-space: nowrap;
code {
font-family: monospace;
&:before {
content: "\"";
}
&:after {
content: "\"";
}
@ -42,19 +66,35 @@
}
&__count {
position: absolute;
top: auto;
right: 0;
flex-grow: 1;
text-align: right;
font-size: $font-size-small;
font-size: $font-size-logs;
color: $color-text-secondary;
padding-right: calc($padding-large * 3);
}
&__pair {
order: 0;
padding: calc($padding-global / 2) $padding-global;
background-color: lighten($color-tropical-blue, 6%);
color: darken($color-dodger-blue, 20%);
border-radius: $border-radius-medium;
transition: background-color 0.3s ease-in, transform 0.1s ease-in, opacity 0.3s ease-in;
white-space: nowrap;
&_hide {
order: 2;
visibility: hidden;
opacity: 0;
pointer-events: none;
}
&_more {
order: 1;
}
&:hover {
background-color: $color-tropical-blue;
@ -84,13 +124,19 @@
&-row {
position: relative;
border-bottom: $border-divider;
&:last-child {
margin-bottom: $padding-small;
}
&-content {
position: relative;
display: grid;
grid-template-columns: auto minmax(180px, max-content) 1fr;
padding: $padding-global 0;
grid-template-columns: auto max-content 1fr;
padding: calc($padding-small / 4) 0;
font-size: $font-size-logs;
font-variant-numeric: tabular-nums;
line-height: 1.3;
cursor: pointer;
transition: background-color 0.2s ease-in;
@ -116,8 +162,7 @@
display: flex;
align-items: flex-start;
justify-content: flex-end;
margin-right: $padding-small;
line-height: 1;
padding: 0 $padding-global 0 $padding-small;
white-space: nowrap;
&_missing {
@ -130,7 +175,12 @@
&__msg {
font-family: $font-family-monospace;
overflow-wrap: anywhere;
line-height: 1.1;
&_single-line {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
&_empty-msg {
overflow: hidden;
@ -158,7 +208,7 @@
border-radius: $border-radius-small;
tab-size: 4;
font-variant-ligatures: none;
margin: calc($padding-small/4) 0;
margin: calc($padding-small / 4) 0;
}
p {
@ -171,7 +221,7 @@
}
code {
font-size: $font-size-small;
font-size: $font-size-logs;
padding: calc($padding-small / 4) calc($padding-small / 2);
}
@ -194,25 +244,35 @@
blockquote {
border-left: 4px solid $color-hover-black;
margin: calc($padding-small/2) $padding-small;
padding: calc($padding-small/2) $padding-small;
margin: calc($padding-small / 2) $padding-small;
padding: calc($padding-small / 2) $padding-small;
}
ul, ol {
list-style-position: inside;
}
/* end styles for markdown */
}
&__sub-msg {
padding-right: $padding-global;
}
}
&-fields {
position: relative;
grid-row: 2;
padding: $padding-small 0;
margin-bottom: $padding-small;
margin: $padding-small 0 $padding-small calc($padding-global * 2);
border: $border-divider;
border-radius: $border-radius-small;
overflow: auto;
max-height: 300px;
height: 300px;
resize: vertical;
font-family: $font-family-monospace;
font-size: $font-size-logs;
font-variant-numeric: tabular-nums;
&-item {
border-radius: $border-radius-small;
@ -223,19 +283,26 @@
}
&-controls {
padding: 0;
padding: 0 calc($padding-small / 2);
&__wrapper {
display: flex;
align-items: center;
justify-content: center;
}
&__button.vm-button_small {
width: 22px;
height: 22px;
min-height: 22px;
}
}
&__key,
&__value {
vertical-align: top;
padding: calc($padding-small / 2) $padding-global;
line-height: $font-size;
padding: calc($padding-small / 2);
}
&__key {

View file

@ -3,7 +3,6 @@ export type StorageKeys = "AUTOCOMPLETE"
| "QUERY_TRACING"
| "SERIES_LIMITS"
| "TABLE_COMPACT"
| "TABLE_COLUMNS"
| "TIMEZONE"
| "DISABLED_DEFAULT_TIMEZONE"
| "THEME"

View file

@ -10,7 +10,7 @@ import (
// specific files
// static content
//
//go:embed favicon-32x32.png robots.txt index.html manifest.json asset-manifest.json
//go:embed favicon.svg robots.txt index.html manifest.json asset-manifest.json
//go:embed static
var files embed.FS

View file

@ -171,4 +171,49 @@ func TestClusterMultiTenantSelect(t *testing.T) {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// Delete series from specific tenant
vmselect.DeleteSeries(t, "foo_bar", apptest.QueryOpts{
Tenant: "5:15",
})
wantSR = apptest.NewPrometheusAPIV1SeriesResponse(t,
`{"data": [
{"__name__":"foo_bar", "vm_account_id":"0", "vm_project_id":"10"},
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"1"},
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"15"},
{"__name__":"foo_bar", "vm_account_id":"5", "vm_project_id":"0"}
]
}`)
wantSR.Sort()
gotSR = vmselect.PrometheusAPIV1Series(t, "foo_bar", apptest.QueryOpts{
Tenant: "multitenant",
Start: "2022-05-10T08:03:00.000Z",
})
gotSR.Sort()
if diff := cmp.Diff(wantSR, gotSR, cmpSROpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// Delete series for multitenant with tenant filter
vmselect.DeleteSeries(t, `foo_bar{vm_account_id="1"}`, apptest.QueryOpts{
Tenant: "multitenant",
})
wantSR = apptest.NewPrometheusAPIV1SeriesResponse(t,
`{"data": [
{"__name__":"foo_bar", "vm_account_id":"0", "vm_project_id":"10"},
{"__name__":"foo_bar", "vm_account_id":"5", "vm_project_id":"0"}
]
}`)
wantSR.Sort()
gotSR = vmselect.PrometheusAPIV1Series(t, `foo_bar`, apptest.QueryOpts{
Tenant: "multitenant",
Start: "2022-05-10T08:03:00.000Z",
})
gotSR.Sort()
if diff := cmp.Diff(wantSR, gotSR, cmpSROpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
}

View file

@ -117,6 +117,22 @@ func (app *Vmselect) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts
return NewPrometheusAPIV1SeriesResponse(t, res)
}
// DeleteSeries sends a query to a /prometheus/api/v1/admin/tsdb/delete_series
//
// See https://docs.victoriametrics.com/url-examples/#apiv1admintsdbdelete_series
func (app *Vmselect) DeleteSeries(t *testing.T, matchQuery string, opts QueryOpts) {
t.Helper()
seriesURL := fmt.Sprintf("http://%s/delete/%s/prometheus/api/v1/admin/tsdb/delete_series", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res := app.cli.PostForm(t, seriesURL, values, http.StatusNoContent)
if res != "" {
t.Fatalf("unexpected non-empty DeleteSeries response=%q", res)
}
}
// String returns the string representation of the vmselect app state.
func (app *Vmselect) String() string {
return fmt.Sprintf("{app: %s httpListenAddr: %q}", app.app, app.httpListenAddr)

View file

@ -1,7 +1,7 @@
dashboard-copy:
echo "" > dashboards/vm/${SRC}
cat dashboards/${SRC} >> dashboards/vm/${SRC}
sed -i='.tmp' 's/prometheus/victoriametrics-datasource/g' dashboards/vm/${SRC}
sed -i='.tmp' 's/prometheus/victoriametrics-metrics-datasource/g' dashboards/vm/${SRC}
sed -i='.tmp' 's/Prometheus/VictoriaMetrics/g' dashboards/vm/${SRC}
sed -i='.tmp' 's/${D_UID}/${D_UID}_vm/g' dashboards/vm/${SRC}
sed -i='.tmp' 's/"title": "${TITLE}"/"title": "${TITLE} (VM)"/g' dashboards/vm/${SRC}

View file

@ -17,7 +17,7 @@
},
{
"type": "datasource",
"id": "victoriametrics-datasource",
"id": "victoriametrics-metrics-datasource",
"name": "VictoriaMetrics",
"version": "1.0.0"
},
@ -85,7 +85,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -133,7 +133,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -150,7 +150,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Status of last backup operation.",
@ -210,7 +210,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -225,7 +225,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Number of backups stored in remote storage.",
@ -275,7 +275,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -290,7 +290,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Space used in remote storage.",
@ -341,7 +341,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -356,7 +356,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -420,7 +420,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -435,7 +435,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Status of last retention run.\n\nRetention is a process of removing old backups from remote storage.",
@ -495,7 +495,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -510,7 +510,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -573,7 +573,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -588,7 +588,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -674,7 +674,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -715,7 +715,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -801,7 +801,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -852,7 +852,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Max duration of backup run. Lower better.\n\nEach backup starts with data upload during `latest` backup. Subsequent backups (`hourly`, `daily`, `weekly`, `monthly`) are copying date by using server-side copy. ",
@ -931,7 +931,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -946,7 +946,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1020,7 +1020,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1035,7 +1035,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1110,7 +1110,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1125,7 +1125,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1184,7 +1184,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1227,7 +1227,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1298,7 +1298,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1313,7 +1313,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1387,7 +1387,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1416,7 +1416,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Max duration of retention run. Lower better.\n\nRetention is a process of removing old backups from remote storage.",
@ -1485,7 +1485,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1500,7 +1500,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1574,7 +1574,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1603,7 +1603,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Duration of backup run. Lower better.\n\nEach backup starts with data upload during `latest` backup. Subsequent backups (`hourly`, `daily`, `weekly`, `monthly`) are copying date by using server-side copy.\n",
@ -1671,7 +1671,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1705,7 +1705,7 @@
"multi": false,
"name": "ds",
"options": [],
"query": "victoriametrics-datasource",
"query": "victoriametrics-metrics-datasource",
"queryValue": "",
"refresh": 1,
"regex": "",
@ -1764,7 +1764,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"filters": [],

View file

@ -17,7 +17,7 @@
},
{
"type": "datasource",
"id": "victoriametrics-datasource",
"id": "victoriametrics-metrics-datasource",
"name": "VictoriaMetrics",
"version": "1.0.0"
},
@ -80,7 +80,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "How many datapoints are inserted into storage per second by accountID and projectID",
@ -168,7 +168,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -184,7 +184,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Request rate accepted by vmselect nodes per tenant",
@ -272,7 +272,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -290,7 +290,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the number of active time series with new data points inserted during the last hour. High value may result in ingestion slowdown. \n\nSee following link for details:",
@ -385,7 +385,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -403,7 +403,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Time spent on query execution per tenant per second",
@ -491,7 +491,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -509,7 +509,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the amount of on-disk space occupied by data points only. The disk space is used for storing by datapoint and indexdb. There is no option to expose per tenant statistic for indexdb. Usually, indexed takes much less space compared to datapoints. But with a high churn rate, the size of the indexdb could grow significantly.",
@ -596,7 +596,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -614,7 +614,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Number of new series created over last 24h.",
@ -702,7 +702,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -731,7 +731,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -785,7 +785,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -800,7 +800,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -853,7 +853,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -868,7 +868,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -921,7 +921,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -936,7 +936,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -991,7 +991,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1006,7 +1006,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1059,7 +1059,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1074,7 +1074,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1127,7 +1127,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1142,7 +1142,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1242,7 +1242,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1260,7 +1260,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1360,7 +1360,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1378,7 +1378,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1478,7 +1478,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1496,7 +1496,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1596,7 +1596,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1614,7 +1614,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1714,7 +1714,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1732,7 +1732,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1832,7 +1832,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1868,7 +1868,7 @@
"multi": false,
"name": "ds",
"options": [],
"query": "victoriametrics-datasource",
"query": "victoriametrics-metrics-datasource",
"queryValue": "",
"refresh": 1,
"regex": "",
@ -1879,7 +1879,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_tenant_active_timeseries, accountID)",
@ -1905,7 +1905,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_tenant_active_timeseries{accountID=~\"$accountID\"},projectID)",
@ -1929,7 +1929,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "PE8D8DB4BEE4E4B22"
},
"filters": [],

View file

@ -33,7 +33,7 @@
{
"collapsed": false,
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"gridPos": {
@ -47,7 +47,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"refId": "A"
@ -58,7 +58,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"gridPos": {
@ -81,7 +81,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"refId": "A"
@ -92,7 +92,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Number of objects at kubernetes cluster per each controller",
@ -147,7 +147,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -162,7 +162,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -215,7 +215,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -234,7 +234,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": " Shows per namespace watchers for VictoriaMetrics Operator objects (ServiceMonitors, PodMonitors, etc) ",
@ -288,11 +288,11 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(operator_victoriametrics-datasource_converter_active_watchers)",
"expr": "sum(operator_victoriametrics-metrics-datasource_converter_active_watchers)",
"instant": false,
"legendFormat": "__auto",
"range": true,
@ -304,7 +304,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": " Number of operator instances with obtained leader status. \n Value above 1 indicates that instances with the same job may behave incorrectly.\n It's recommend to check Operator logs. ",
@ -358,7 +358,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -374,7 +374,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": " Shows number of active reconcile workers",
@ -428,7 +428,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -444,7 +444,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": " Shows amount of VictoriaMetrics Operator objects processed by Operator.",
@ -527,11 +527,11 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(operator_victoriametrics-datasource_converter_watch_events_total{job=~\"$job\"}[$__interval])) by (event_type,object_type_name)",
"expr": "sum(rate(operator_victoriametrics-metrics-datasource_converter_watch_events_total{job=~\"$job\"}[$__interval])) by (event_type,object_type_name)",
"instant": false,
"legendFormat": "{{object_type_name}} {{event_type}}",
"range": true,
@ -543,7 +543,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -627,7 +627,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -642,7 +642,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the rate of logging the messages by their level. Unexpected spike in rate is a good reason to check logs.",
@ -727,7 +727,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -743,7 +743,7 @@
{
"collapsed": false,
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"gridPos": {
@ -757,7 +757,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"refId": "A"
@ -768,7 +768,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Non zero metrics indicates about error with CR object definition (typos or incorrect values) or errors with kubernetes API connection.",
@ -852,7 +852,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -865,7 +865,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -881,7 +881,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Operator limits number of reconcile configuration events to 5 events per 2 seconds by default.",
@ -965,7 +965,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -980,7 +980,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Number of objects waiting in the queue for reconciliation. Non-zero values indicate that operator cannot process CR objects changes with the given resources.",
@ -1064,7 +1064,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1079,7 +1079,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": " For controllers with StatefulSet it's ok to see latency greater then 3 seconds. It could be vmalertmanager,vmcluster or vmagent in statefulMode.\n\n For other controllers, latency greater then 2 second may indicate issues with kubernetes cluster or operator's performance.\n ",
@ -1165,7 +1165,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1180,7 +1180,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Number of HTTP requests to the Kubernetes API server break down by code and method",
@ -1262,7 +1262,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1278,7 +1278,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows how many ongoing reconcile events are taking place, where:\n* `max` - equal to the value of flag`-controller.maxConcurrentReconciles`;\n* `current` - current number of reconcile workers processing CRD objects.\n\nWhen `current` hits `max` constantly, it means operator cannot process events in time. It should be either increased value for flag `-controller.maxConcurrentReconciles` or allocated additional CPU resources to the operator.",
@ -1377,7 +1377,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1389,7 +1389,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1406,7 +1406,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the time goroutines have spent in runnable state before actually running. The lower is better.\n\nHigh values or values exceeding the threshold is usually a sign of insufficient CPU resources or CPU throttling. \n\nVerify that service has enough CPU resources. Otherwise, the service could work unreliably with delays in processing.",
@ -1486,7 +1486,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1502,7 +1502,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": " Requests latency to the Kubernetes API server.",
@ -1585,7 +1585,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1602,7 +1602,7 @@
{
"collapsed": true,
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"gridPos": {
@ -1615,7 +1615,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1699,7 +1699,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1710,7 +1710,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1722,7 +1722,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1734,7 +1734,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1750,7 +1750,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1830,7 +1830,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1845,7 +1845,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1925,7 +1925,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1940,7 +1940,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -2023,7 +2023,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2040,7 +2040,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"refId": "A"
@ -2065,7 +2065,7 @@
"multi": false,
"name": "ds",
"options": [],
"query": "victoriametrics-datasource",
"query": "victoriametrics-metrics-datasource",
"queryValue": "te",
"refresh": 1,
"regex": "",
@ -2075,7 +2075,7 @@
{
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(operator_log_messages_total,job)",
@ -2097,7 +2097,7 @@
{
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(operator_log_messages_total{job=~\"$job\"},instance)",
@ -2119,7 +2119,7 @@
{
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{job=\"$job\", instance=\"$instance\"}, version)",

View file

@ -11,7 +11,7 @@
},
{
"type": "datasource",
"id": "victoriametrics-datasource",
"id": "victoriametrics-metrics-datasource",
"name": "VictoriaMetrics",
"version": "1.0.0"
},
@ -50,7 +50,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"enable": true,
@ -63,7 +63,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"enable": true,
@ -97,7 +97,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "How many log entries are in storage",
@ -149,7 +149,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -168,7 +168,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the logs ingestion rate.",
@ -220,7 +220,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -239,7 +239,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "The ratio of original data size and compressed data stored on disk",
@ -291,7 +291,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -310,7 +310,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Total number of available CPUs for VM process",
@ -366,7 +366,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -385,7 +385,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -439,7 +439,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -456,7 +456,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Total amount of used disk space",
@ -508,7 +508,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -527,7 +527,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the rate of HTTP read requests.",
@ -579,7 +579,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -598,7 +598,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Total size of available memory for VM process",
@ -680,7 +680,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "How many logs are inserted into storage per second",
@ -769,7 +769,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -787,7 +787,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "* `*` - unsupported query path\n* `/insert` - insert into VM\n* `/metrics` - query VL system metrics\n* `/query` - read the data",
@ -877,7 +877,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -895,7 +895,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the amount of on-disk space occupied by data before and after compressiom",
@ -985,7 +985,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1003,7 +1003,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "The number of the new log streams created over the last 24h",
@ -1093,7 +1093,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1111,7 +1111,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the number of restarts per job. The chart can be useful to identify periodic process restarts and correlate them with potential issues or anomalies. Normally, processes shouldn't restart unless restart was inited by user. The reason of restarts should be figured out by checking the logs of each specific service. ",
@ -1200,7 +1200,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1226,7 +1226,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Percentage of used memory (resident).\nThe application's performance will significantly degrade when memory usage is close to 100%.",
@ -1315,7 +1315,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1332,7 +1332,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -1421,7 +1421,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(go_memstats_sys_bytes{job=~\"$job\", instance=~\"$instance\"}) + sum(vm_cache_size_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -1433,7 +1433,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(go_memstats_heap_inuse_bytes{job=~\"$job\", instance=~\"$instance\"}) + sum(vm_cache_size_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -1445,7 +1445,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(go_memstats_stack_inuse_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -1457,7 +1457,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(process_resident_memory_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -1470,7 +1470,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -1488,7 +1488,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.\nSafe memory usage % considered to be below 80%",
@ -1577,7 +1577,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1594,7 +1594,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1682,7 +1682,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1701,7 +1701,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the percentage of open file descriptors compared to the limit set in the OS.\nReaching the limit of open files can cause various issues and must be prevented.\n\nSee how to change limits here https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a",
@ -1807,7 +1807,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1826,7 +1826,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -1931,7 +1931,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1945,7 +1945,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -1963,7 +1963,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -2052,7 +2052,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2069,7 +2069,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the number of bytes read/write from the storage layer.",
@ -2170,7 +2170,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(rate(process_io_storage_read_bytes_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
@ -2183,7 +2183,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(rate(process_io_storage_written_bytes_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
@ -2200,7 +2200,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -2289,7 +2289,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2306,7 +2306,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the number of read/write syscalls such as read, pread, write, pwrite.",
@ -2407,7 +2407,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2422,7 +2422,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2441,7 +2441,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -2530,7 +2530,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2548,7 +2548,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -2637,7 +2637,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2655,7 +2655,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the time goroutines have spent in runnable state before actually running. The lower is better.\n\nHigh values or values exceeding the threshold is usually a sign of insufficient CPU resources or CPU throttling. \n\nVerify that service has enough CPU resources. Otherwise, the service could work unreliably with delays in processing.",
@ -2743,7 +2743,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2780,7 +2780,7 @@
"multi": false,
"name": "ds",
"options": [],
"query": "victoriametrics-datasource",
"query": "victoriametrics-metrics-datasource",
"queryValue": "",
"refresh": 1,
"regex": "",
@ -2790,7 +2790,7 @@
{
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{version=~\"victoria-logs-.*\"}, job)",
@ -2812,7 +2812,7 @@
{
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{job=~\"$job\"}, instance)",
@ -2833,7 +2833,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"filters": [],

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -11,7 +11,7 @@
},
{
"type": "datasource",
"id": "victoriametrics-datasource",
"id": "victoriametrics-metrics-datasource",
"name": "VictoriaMetrics",
"version": "1.0.0"
},
@ -56,7 +56,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"enable": true,
@ -69,7 +69,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"enable": true,
@ -140,7 +140,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows if the last configuration update was successful. \"Not Ok\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
@ -210,7 +210,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -225,7 +225,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the total number of loaded alerting rules across selected instances and groups.",
@ -273,7 +273,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -288,7 +288,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the total number of loaded recording rules across selected instances and groups.",
@ -336,7 +336,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -351,7 +351,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the total number of errors generated by recording/alerting rules for selected instances and groups.",
@ -403,7 +403,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -418,7 +418,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows number of Recording Rules which produce no data.\n\n Usually it means that such rules are misconfigured, since they give no output during the evaluation.\nPlease check if rule's expression is correct and it is working as expected.",
@ -470,7 +470,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -487,7 +487,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -568,7 +568,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -584,7 +584,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -672,7 +672,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -716,7 +716,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the number of fired alerts by job.",
@ -804,7 +804,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -821,7 +821,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Top $topk groups by evaluation duration. Shows groups that take the most of time during the evaluation across all instances.\n\nThe panel uses MetricsQL functions and may not work with VictoriaMetrics.",
@ -909,7 +909,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -926,7 +926,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows how many requests (executions) per second vmalert sends to the configured datasource.",
@ -1011,7 +1011,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1028,7 +1028,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the error rate while executing configured rules. Non-zero value means there are some issues with existing rules. Check the logs to get more details.",
@ -1113,7 +1113,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1144,7 +1144,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "The precentage of used RSS memory\n\nIf you think that usage is abnormal or unexpected, please file an issue and attach memory profile if possible.",
@ -1241,7 +1241,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1258,7 +1258,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Amount of used RSS memory\n\nIf you think that usage is abnormal or unexpected, please file an issue and attach memory profile if possible.",
@ -1354,7 +1354,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1371,7 +1371,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the CPU usage percentage per vmalert instance. \nIf you think that usage is abnormal or unexpected pls file an issue and attach CPU profile if possible.",
@ -1468,7 +1468,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1487,7 +1487,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the max number of CPU cores used by a `job` and the corresponding limit.",
@ -1584,7 +1584,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1599,7 +1599,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1619,7 +1619,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Panel shows the percentage of open file descriptors in the OS.\nReaching the limit of open files can cause various issues and must be prevented.\n\nSee how to change limits here https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a",
@ -1709,7 +1709,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1728,7 +1728,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1817,7 +1817,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1835,7 +1835,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the percent of CPU spent on garbage collection.\n\nIf % is high, then CPU usage can be decreased by changing GOGC to higher values. Increasing GOGC value will increase memory usage, and decrease CPU usage.\n\nTry searching for keyword `GOGC` at https://docs.victoriametrics.com/troubleshooting/ ",
@ -1925,7 +1925,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1943,7 +1943,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the time goroutines have spent in runnable state before actually running. The lower is better.\n\nHigh values or values exceeding the threshold is usually a sign of insufficient CPU resources or CPU throttling. \n\nVerify that service has enough CPU resources. Otherwise, the service could work unreliably with delays in processing.",
@ -2031,7 +2031,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2072,7 +2072,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -2157,7 +2157,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2202,7 +2202,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Missed evaluation means that group evaluation time takes longer than the configured evaluation interval. \nThis may result in missed alerting notifications or recording rules samples. Try increasing evaluation interval or concurrency for such groups. See https://docs.victoriametrics.com/vmalert/#groups\n\nIf rule expressions are taking longer than expected, please see https://docs.victoriametrics.com/troubleshooting/#slow-queries.\"",
@ -2287,7 +2287,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2304,7 +2304,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the number of restarts per job. The chart can be useful to identify periodic process restarts and correlate them with potential issues or anomalies. Normally, processes shouldn't restart unless restart was inited by user. The reason of restarts should be figured out by checking the logs of each specific service. ",
@ -2392,7 +2392,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2426,7 +2426,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows top $topk current active (firing) alerting rules.\n\nThe panel uses MetricsQL functions and may not work with VictoriaMetrics.",
@ -2511,7 +2511,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2528,7 +2528,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the events when rule execution resulted into an error. Check the logs for more details.",
@ -2613,7 +2613,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2630,7 +2630,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the current pending alerting rules per group.\nBy pending means the rule which remains active less than configured `for` parameter.",
@ -2715,7 +2715,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2732,7 +2732,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the error rate for the attempts to send alerts to Alertmanager. If not zero it means there issues on attempt to send notification to Alertmanager and some alerts may be not delivered properly. Check the logs for more details.",
@ -2816,7 +2816,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -2831,7 +2831,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows how many alerts are sent to Alertmanager per second. Only active alerts are sent.",
@ -2915,7 +2915,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2959,7 +2959,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the top $topk recording rules which generate the most of [samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples). Each generated sample is basically a time series which then ingested into configured remote storage. Rules with high numbers may cause the most pressure on the remote database and become a source of too high cardinality.\n\nThe panel uses MetricsQL functions and may not work with VictoriaMetrics.",
@ -3044,7 +3044,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -3061,7 +3061,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the rules which do not produce any [samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) during the evaluation. Usually it means that such rules are misconfigured, since they give no output during the evaluation.\nPlease check if rule's expression is correct and it is working as expected.",
@ -3146,7 +3146,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -3163,7 +3163,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -3245,7 +3245,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -3285,7 +3285,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -3361,7 +3361,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -3376,7 +3376,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the number of datapoints dropped by vmalert while sending to the configured remote write URL. vmalert performs up to 5 retries before dropping the data. Check vmalert's error logs for the specific error message.",
@ -3453,7 +3453,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -3468,7 +3468,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows current number of established connections to remote write endpoints.\n\n",
@ -3554,7 +3554,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -3571,7 +3571,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the global rate for number of written bytes via remote write connections.",
@ -3657,7 +3657,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -3696,7 +3696,7 @@
"multi": false,
"name": "ds",
"options": [],
"query": "victoriametrics-datasource",
"query": "victoriametrics-metrics-datasource",
"queryValue": "",
"refresh": 1,
"regex": "",
@ -3706,7 +3706,7 @@
{
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{version=~\"^vmalert.*\"}, job)",
@ -3729,7 +3729,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{job=~\"$job\"}, instance)",
@ -3752,7 +3752,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vmalert_iteration_total{job=~\"$job\", instance=~\"$instance\"},file)",
@ -3775,7 +3775,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vmalert_iteration_total{job=~\"$job\", instance=~\"$instance\"}, group)",
@ -3843,7 +3843,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"filters": [],

View file

@ -11,7 +11,7 @@
},
{
"type": "datasource",
"id": "victoriametrics-datasource",
"id": "victoriametrics-metrics-datasource",
"name": "VictoriaMetrics",
"version": "1.0.0"
},
@ -116,7 +116,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -204,7 +204,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -222,7 +222,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows if the last configuration update was successful. \"Not Ok\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
@ -292,7 +292,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -307,7 +307,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the rate of requests.",
@ -356,7 +356,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -372,7 +372,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the total number of users defined at configuration file.",
@ -420,7 +420,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -437,7 +437,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the rate of request errors.",
@ -486,7 +486,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -502,7 +502,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -581,7 +581,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -611,7 +611,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -690,7 +690,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -702,7 +702,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -718,7 +718,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows percent utilization of per concurrent requests capacity.",
@ -805,7 +805,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -822,7 +822,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the rate of rejected requests by a reason.",
@ -902,7 +902,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -918,7 +918,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": " The number of concurrent connections processed by vmauth reached one of limits. Possible solutions:\n- increase global limit with flag -maxConcurrentRequests\n- increase limit with flag: -maxConcurrentPerUserRequests for all users or with config option `max_concurrent_requests` per user.\n- deploy additional vmauth replicas\n- check requests latency at backend service and allocate resources to it if needed",
@ -998,7 +998,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1010,7 +1010,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1022,7 +1022,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1038,7 +1038,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows duration in seconds of user requests by quantile.",
@ -1124,7 +1124,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1136,7 +1136,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1162,7 +1162,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Percentage of used memory (resident).\nThe application's performance will significantly degrade when memory usage is close to 100%.",
@ -1251,7 +1251,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1268,7 +1268,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -1356,7 +1356,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1375,7 +1375,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -1464,7 +1464,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(go_memstats_sys_bytes{job=~\"$job\", instance=~\"$instance\"}) + sum(vm_cache_size_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -1476,7 +1476,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(go_memstats_heap_inuse_bytes{job=~\"$job\", instance=~\"$instance\"}) + sum(vm_cache_size_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -1488,7 +1488,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(go_memstats_stack_inuse_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -1500,7 +1500,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(process_resident_memory_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -1513,7 +1513,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -1531,7 +1531,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -1636,7 +1636,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])",
@ -1648,7 +1648,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"exemplar": false,
@ -1666,7 +1666,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -1755,7 +1755,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(vm_tcplistener_conns{job=~\"$job\", instance=~\"$instance\"})",
@ -1771,7 +1771,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
@ -1860,7 +1860,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -1878,7 +1878,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the percentage of open file descriptors compared to the limit set in the OS.\nReaching the limit of open files can cause various issues and must be prevented.\n\nSee how to change limits here https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a",
@ -1984,7 +1984,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2003,7 +2003,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -2092,7 +2092,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(go_goroutines{job=~\"$job\", instance=~\"$instance\"})",
@ -2107,7 +2107,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -2196,7 +2196,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"expr": "sum(process_num_threads{job=~\"$job\", instance=~\"$instance\"})",
@ -2225,7 +2225,7 @@
"panels": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
@ -2311,7 +2311,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2352,7 +2352,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows number of generated error and warning messages in logs. Non-zero value may be a sign of connectivity or missconfiguration errors.",
@ -2433,7 +2433,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2448,7 +2448,7 @@
},
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the number of restarts per job. The chart can be useful to identify periodic process restarts and correlate them with potential issues or anomalies. Normally, processes shouldn't restart unless restart was inited by user. The reason of restarts should be figured out by checking the logs of each specific service. ",
@ -2537,7 +2537,7 @@
"targets": [
{
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
@ -2573,7 +2573,7 @@
"multi": false,
"name": "ds",
"options": [],
"query": "victoriametrics-datasource",
"query": "victoriametrics-metrics-datasource",
"queryValue": "",
"refresh": 1,
"regex": "",
@ -2583,7 +2583,7 @@
{
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{version=~\"^vmauth.*\"}, job)",
@ -2606,7 +2606,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{job=~\"$job\"}, instance)",
@ -2628,7 +2628,7 @@
{
"current": {},
"datasource": {
"type": "victoriametrics-datasource",
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"definition": "label_values(vmauth_user_requests_total{job=~\"$job\", instance=~\"$instance\"}, username)",

View file

@ -6,7 +6,7 @@ ROOT_IMAGE ?= alpine:3.21.2
ROOT_IMAGE_SCRATCH ?= scratch
CERTS_IMAGE := alpine:3.21.2
GO_BUILDER_IMAGE := golang:1.23.4-alpine
GO_BUILDER_IMAGE := golang:1.23.5-alpine
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
DOCKER ?= docker

View file

@ -4,7 +4,7 @@ services:
# And forward them to --remoteWrite.url
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.109.0
image: victoriametrics/vmagent:v1.109.1
depends_on:
- "vminsert"
ports:
@ -39,7 +39,7 @@ services:
# where N is number of vmstorages (2 in this case).
vmstorage-1:
container_name: vmstorage-1
image: victoriametrics/vmstorage:v1.109.0-cluster
image: victoriametrics/vmstorage:v1.109.1-cluster
ports:
- 8482
- 8400
@ -51,7 +51,7 @@ services:
restart: always
vmstorage-2:
container_name: vmstorage-2
image: victoriametrics/vmstorage:v1.109.0-cluster
image: victoriametrics/vmstorage:v1.109.1-cluster
ports:
- 8482
- 8400
@ -66,7 +66,7 @@ services:
# pre-process them and distributes across configured vmstorage shards.
vminsert:
container_name: vminsert
image: victoriametrics/vminsert:v1.109.0-cluster
image: victoriametrics/vminsert:v1.109.1-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@ -81,7 +81,7 @@ services:
# vmselect collects results from configured `--storageNode` shards.
vmselect-1:
container_name: vmselect-1
image: victoriametrics/vmselect:v1.109.0-cluster
image: victoriametrics/vmselect:v1.109.1-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@ -94,7 +94,7 @@ services:
restart: always
vmselect-2:
container_name: vmselect-2
image: victoriametrics/vmselect:v1.109.0-cluster
image: victoriametrics/vmselect:v1.109.1-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@ -112,7 +112,7 @@ services:
# It can be used as an authentication proxy.
vmauth:
container_name: vmauth
image: victoriametrics/vmauth:v1.109.0
image: victoriametrics/vmauth:v1.109.1
depends_on:
- "vmselect-1"
- "vmselect-2"
@ -127,7 +127,7 @@ services:
# vmalert executes alerting and recording rules
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.109.0
image: victoriametrics/vmalert:v1.109.1
depends_on:
- "vmauth"
ports:

View file

@ -16,7 +16,7 @@ services:
- ./../../dashboards/victoriametrics.json:/var/lib/grafana/dashboards/vm.json
- ./../../dashboards/victorialogs.json:/var/lib/grafana/dashboards/vl.json
environment:
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.13.0/victoriametrics-logs-datasource-v0.13.0.zip;victoriametrics-logs-datasource"
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.13.4/victoriametrics-logs-datasource-v0.13.4.zip;victoriametrics-logs-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-logs-datasource"
networks:
- vm_net
@ -45,7 +45,7 @@ services:
# storing logs and serving read queries.
victorialogs:
container_name: victorialogs
image: victoriametrics/victoria-logs:v1.6.1-victorialogs
image: victoriametrics/victoria-logs:v1.7.0-victorialogs
command:
- "--storageDataPath=/vlogs"
- "--httpListenAddr=:9428"
@ -60,7 +60,7 @@ services:
# scraping, storing metrics and serve read requests.
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.109.0
image: victoriametrics/victoria-metrics:v1.109.1
ports:
- 8428:8428
volumes:
@ -79,7 +79,7 @@ services:
# depending on the requested path.
vmauth:
container_name: vmauth
image: victoriametrics/vmauth:v1.109.0
image: victoriametrics/vmauth:v1.109.1
depends_on:
- "victoriametrics"
- "victorialogs"
@ -96,7 +96,7 @@ services:
# vmalert executes alerting and recording rules according to given rule type.
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.109.0
image: victoriametrics/vmalert:v1.109.1
depends_on:
- "vmauth"
- "alertmanager"

View file

@ -4,7 +4,7 @@ services:
# And forward them to --remoteWrite.url
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.109.0
image: victoriametrics/vmagent:v1.109.1
depends_on:
- "victoriametrics"
ports:
@ -22,7 +22,7 @@ services:
# storing metrics and serve read requests.
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.109.0
image: victoriametrics/victoria-metrics:v1.109.1
ports:
- 8428:8428
- 8089:8089
@ -65,7 +65,7 @@ services:
# vmalert executes alerting and recording rules
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.109.0
image: victoriametrics/vmalert:v1.109.1
depends_on:
- "victoriametrics"
- "alertmanager"

View file

@ -2,7 +2,7 @@ apiVersion: 1
datasources:
- name: VictoriaMetrics - cluster
type: victoriametrics-datasource
type: victoriametrics-metrics-datasource
access: proxy
url: http://vmauth:8427/select/0/prometheus
isDefault: true

View file

@ -2,7 +2,7 @@ apiVersion: 1
datasources:
- name: VictoriaMetrics
type: victoriametrics-datasource
type: victoriametrics-metrics-datasource
access: proxy
url: http://victoriametrics:8428
isDefault: true

View file

@ -1,7 +1,7 @@
services:
# meta service will be ignored by compose
.victorialogs:
image: docker.io/victoriametrics/victoria-logs:v1.6.1-victorialogs
image: docker.io/victoriametrics/victoria-logs:v1.7.0-victorialogs
command:
- -storageDataPath=/vlogs
- -loggerFormat=json
@ -19,7 +19,7 @@ services:
retries: 10
dd-proxy:
image: docker.io/victoriametrics/vmauth:v1.109.0
image: docker.io/victoriametrics/vmauth:v1.109.1
restart: on-failure
volumes:
- ./:/etc/vmauth
@ -45,7 +45,7 @@ services:
replicas: 0
victoriametrics:
image: victoriametrics/victoria-metrics:latest
image: victoriametrics/victoria-metrics:v1.109.1
ports:
- '8428:8428'
command:

View file

@ -16,6 +16,6 @@ services:
- ./../../dashboards/vm/vmalert.json:/var/lib/grafana/dashboards/vmalert.json
- ./../../dashboards/vm/vmauth.json:/var/lib/grafana/dashboards/vmauth.json
environment:
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.10.3/victoriametrics-datasource-v0.10.3.zip;victoriametrics-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource"
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.12.1/victoriametrics-metrics-datasource-v0.12.1.zip;victoriametrics-metrics-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-metrics-datasource"
restart: always

View file

@ -15,8 +15,8 @@ services:
- ./../../dashboards/vm/vmagent.json:/var/lib/grafana/dashboards/vmagent.json
- ./../../dashboards/vm/vmalert.json:/var/lib/grafana/dashboards/vmalert.json
environment:
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.10.3/victoriametrics-datasource-v0.10.3.zip;victoriametrics-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource"
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.12.1/victoriametrics-metrics-datasource-v0.12.1.zip;victoriametrics-metrics-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-metrics-datasource"
networks:
- vm_net
restart: always

View file

@ -1,7 +1,7 @@
services:
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.109.0
image: victoriametrics/vmagent:v1.109.1
depends_on:
- "victoriametrics"
ports:
@ -18,7 +18,7 @@ services:
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.109.0
image: victoriametrics/victoria-metrics:v1.109.1
ports:
- 8428:8428
volumes:
@ -50,7 +50,7 @@ services:
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.109.0
image: victoriametrics/vmalert:v1.109.1
depends_on:
- "victoriametrics"
ports:
@ -72,7 +72,7 @@ services:
restart: always
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.18.8
image: victoriametrics/vmanomaly:v1.19.1
depends_on:
- "victoriametrics"
ports:

View file

@ -1,21 +1,23 @@
schedulers:
periodic:
# class: "scheduler.periodic.PeriodicScheduler"
infer_every: "1m"
fit_every: "2m"
fit_window: "3h"
fit_every: "1h"
fit_window: "2d" # 2d-14d based on the presense of weekly seasonality in your data
models:
prophet:
class: "model.prophet.ProphetModel"
class: "prophet"
args:
interval_width: 0.98
weekly_seasonality: False # comment it if your data has weekly seasonality
yearly_seasonality: False
reader:
datasource_url: "http://victoriametrics:8428/"
sampling_period: "60s"
sampling_period: "60s"
queries:
node_cpu_rate: "sum(rate(node_cpu_seconds_total[5m])) by (mode, instance, job)"
node_cpu_rate:
expr: "sum(rate(node_cpu_seconds_total[5m])) by (mode, instance, job)"
writer:
datasource_url: "http://victoriametrics:8428/"
@ -24,4 +26,4 @@ writer:
monitoring:
pull: # Enable /metrics endpoint.
addr: "0.0.0.0"
port: 8490
port: 8490

View file

@ -18,7 +18,7 @@ services:
- vlogs
generator:
image: golang:1.23.4-alpine
image: golang:1.23.5-alpine
restart: always
working_dir: /go/src/app
volumes:

View file

@ -2,7 +2,7 @@ version: '3'
services:
generator:
image: golang:1.23.4-alpine
image: golang:1.23.5-alpine
restart: always
working_dir: /go/src/app
volumes:

View file

@ -3,7 +3,7 @@ version: "3"
services:
# Run `make package-victoria-logs` to build victoria-logs image
vlogs:
image: docker.io/victoriametrics/victoria-logs:v1.6.1-victorialogs
image: docker.io/victoriametrics/victoria-logs:v1.7.0-victorialogs
volumes:
- vlogs:/vlogs
ports:

View file

@ -761,8 +761,8 @@ Some workloads may need fine-grained resource usage limits. In these cases the f
- `-search.maxDeleteSeries` at `vmselect` limits the number of unique time
series that can be deleted by a single
[/api/v1/admin/tsdb/delete_series](https://docs.victoriametrics.com/url-examples/#apiv1admintsdbdelete_series)
call. Deleting too many time series may require big amount of CPU and memory
at `vmstorage` and this limit guards against unplanned resource usage spikes.
call. The duration is limited via `-search.maxDeleteDuration` flag{{% available_from "#tip" %}}. Deleting too many time series may require big
amount of CPU and memory at `vmstorage` and this limit guards against unplanned resource usage spikes.
Also see [How to delete time series](#how-to-delete-time-series) section to
learn about different ways of deleting series.
- `-search.maxTagKeys` at `vmstorage` limits the number of items, which may be returned from
@ -1250,7 +1250,7 @@ Below is the output for `/path/to/vminsert -help`:
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
The maximum number of concurrent insert requests. Set higher value when clients send data over slow networks. Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage. See also -insert.maxQueueDuration (default 32)
The maximum number of concurrent insert requests. Set higher value when clients send data over slow networks. Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage. See also -insert.maxQueueDuration
-maxInsertRequestSize size
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
@ -1397,7 +1397,7 @@ Below is the output for `/path/to/vmselect -help`:
-clusternative.disableCompression
Whether to disable compression of the data sent to vmselect via -clusternativeListenAddr. This reduces CPU usage at the cost of higher network bandwidth usage
-clusternative.maxConcurrentRequests int
The maximum number of concurrent vmselect requests the server can process at -clusternativeListenAddr. It shouldn't be high, since a single request usually saturates a CPU core at the underlying vmstorage nodes, and many concurrently executed requests may require high amounts of memory. See also -clusternative.maxQueueDuration (default 32)
The maximum number of concurrent vmselect requests the server can process at -clusternativeListenAddr. It shouldn't be high, since a single request usually saturates a CPU core at the underlying vmstorage nodes, and many concurrently executed requests may require high amounts of memory. See also -clusternative.maxQueueDuration
-clusternative.maxQueueDuration duration
The maximum time the incoming query to -clusternativeListenAddr waits for execution when -clusternative.maxConcurrentRequests limit is reached (default 10s)
-clusternative.maxTagKeys int
@ -1425,7 +1425,7 @@ Below is the output for `/path/to/vmselect -help`:
-dedup.minScrapeInterval duration
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication for details
-deleteAuthKey value
authKey for metrics' deletion via /prometheus/api/v1/admin/tsdb/delete_series and /graphite/tags/delSeries
authKey for metrics' deletion via /prometheus/api/v1/admin/tsdb/delete_series and /graphite/tags/delSeries. It could be passed via authKey query arg.
Flag value can be read from the given file when using -deleteAuthKey=file:///abs/path/to/file or -deleteAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -deleteAuthKey=http://host/path or -deleteAuthKey=https://host/path
-denyQueryTracing
Whether to disable the ability to trace queries. See https://docs.victoriametrics.com/#query-tracing
@ -1581,6 +1581,8 @@ Below is the output for `/path/to/vmselect -help`:
A larger value makes the pushed-down filter more complex but fewer time series will be returned. This flag is useful when selective label contains numerous values, for example `instance`, and storage resources are abundant. (default 100)
-search.maxConcurrentRequests int
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration and -search.maxMemoryPerQuery (default 16)
-search.maxDeleteDuration duration
The maximum duration for /api/v1/admin/tsdb/delete_series call (default 5m)
-search.maxDeleteSeries int
The maximum number of time series, which can be deleted using /api/v1/admin/tsdb/delete_series. This option allows limiting memory usage (default 1000000)
-search.maxExportDuration duration
@ -1651,7 +1653,7 @@ Below is the output for `/path/to/vmselect -help`:
-search.queryStats.minQueryDuration duration
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
-search.resetCacheAuthKey value
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call. It could be passed via authKey query arg.
Flag value can be read from the given file when using -search.resetCacheAuthKey=file:///abs/path/to/file or -search.resetCacheAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -search.resetCacheAuthKey=http://host/path or -search.resetCacheAuthKey=https://host/path
-search.resetRollupResultCacheOnStartup
Whether to reset rollup result cache on startup. See https://docs.victoriametrics.com/#rollup-result-cache . See also -search.disableCache
@ -1846,7 +1848,7 @@ Below is the output for `/path/to/vmstorage -help`:
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
The maximum number of concurrent insert requests. Set higher value when clients send data over slow networks. Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage. See also -insert.maxQueueDuration (default 32)
The maximum number of concurrent insert requests. Set higher value when clients send data over slow networks. Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage. See also -insert.maxQueueDuration
-memory.allowedBytes size
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from the OS page cache resulting in higher disk IO usage
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
@ -1900,7 +1902,7 @@ Below is the output for `/path/to/vmstorage -help`:
-rpc.disableCompression
Whether to disable compression of the data sent from vmstorage to vmselect. This reduces CPU usage at the cost of higher network bandwidth usage
-search.maxConcurrentRequests int
The maximum number of concurrent vmselect requests the vmstorage can process at -vmselectAddr. It shouldn't be high, since a single request usually saturates a CPU core, and many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration (default 32)
The maximum number of concurrent vmselect requests the vmstorage can process at -vmselectAddr. It shouldn't be high, since a single request usually saturates a CPU core, and many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration
-search.maxQueueDuration duration
The maximum time the incoming vmselect request waits for execution when -search.maxConcurrentRequests limit is reached (default 10s)
-search.maxTagKeys int

View file

@ -55,8 +55,8 @@ under the current directory:
```sh
docker pull victoriametrics/victoria-metrics:latest
docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 victoriametrics/victoria-metrics:latest
docker pull victoriametrics/victoria-metrics:v1.109.1
docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 victoriametrics/victoria-metrics:v1.109.1
```

View file

@ -1478,8 +1478,12 @@ Each request to `/api/v1/import/csv` may contain arbitrary number of CSV lines.
Example for importing CSV data via `/api/v1/import/csv`:
```sh
# Import via POST data:
curl -d "GOOG,1.23,4.56,NYSE" 'http://localhost:8428/api/v1/import/csv?format=2:metric:ask,3:metric:bid,1:label:ticker,4:label:market'
curl -d "MSFT,3.21,1.67,NASDAQ" 'http://localhost:8428/api/v1/import/csv?format=2:metric:ask,3:metric:bid,1:label:ticker,4:label:market'
# Import via file upload:
curl -X POST 'http://localhost:8428/api/v1/import/csv?format=2:metric:ask,3:metric:bid,1:label:ticker,4:label:market' -T exported_data.csv
```
After that the data may be read via [/api/v1/export](#how-to-export-data-in-json-line-format) endpoint:
@ -1765,8 +1769,8 @@ By default, VictoriaMetrics is tuned for an optimal resource usage under typical
- `-search.maxDeleteSeries` limits the number of unique time series that can be
deleted by a single
[/api/v1/admin/tsdb/delete_series](https://docs.victoriametrics.com/url-examples/#apiv1admintsdbdelete_series)
call. Deleting too many time series may require big amount of CPU and memory
and this limit guards against unplanned resource usage spikes. Also see
call. The duration is limited via `-search.maxDeleteDuration` flag{{% available_from "#tip" %}}. Deleting too many time series may require big
amount of CPU and memory and this limit guards against unplanned resource usage spikes. Also see
[How to delete time series](#how-to-delete-time-series) section to learn about
different ways of deleting series.
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels).
@ -2845,7 +2849,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-dedup.minScrapeInterval duration
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication
-deleteAuthKey value
authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries
authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries. It could be passed via authKey query arg.
Flag value can be read from the given file when using -deleteAuthKey=file:///abs/path/to/file or -deleteAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -deleteAuthKey=http://host/path or -deleteAuthKey=https://host/path
-denyQueriesOutsideRetention
Whether to deny queries outside the configured -retentionPeriod. When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. This may be useful when multiple data sources with distinct retentions are hidden behind query-tee
@ -2983,7 +2987,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
The maximum number of concurrent insert requests. Set higher value when clients send data over slow networks. Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage. See also -insert.maxQueueDuration (default 32)
The maximum number of concurrent insert requests. Set higher value when clients send data over slow networks. Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage. See also -insert.maxQueueDuration
-maxIngestionRate int
The maximum number of samples vmsingle can receive per second. Data ingestion is paused when the limit is exceeded
By default there are no limits on samples ingestion rate.
@ -3199,6 +3203,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
Log queries with execution time exceeding this value. Zero disables slow query logging. See also -search.logQueryMemoryUsage (default 5s)
-search.maxConcurrentRequests int
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration and -search.maxMemoryPerQuery (default 16)
-search.maxDeleteDuration duration
The maximum duration for /api/v1/admin/tsdb/delete_series call (default 5m)
-search.maxDeleteSeries int
The maximum number of time series, which can be deleted using /api/v1/admin/tsdb/delete_series. This option allows limiting memory usage (default 1000000)
-search.maxExportDuration duration
The maximum duration for /api/v1/export call (default 720h0m0s)
-search.maxExportSeries int
@ -3271,7 +3279,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.queryStats.minQueryDuration duration
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
-search.resetCacheAuthKey value
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call. It could be passed via authKey query arg.
Flag value can be read from the given file when using -search.resetCacheAuthKey=file:///abs/path/to/file or -search.resetCacheAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -search.resetCacheAuthKey=http://host/path or -search.resetCacheAuthKey=https://host/path
-search.resetRollupResultCacheOnStartup
Whether to reset rollup result cache on startup. See https://docs.victoriametrics.com/#rollup-result-cache . See also -search.disableCache

View file

@ -135,20 +135,21 @@ The helm chart repository [https://github.com/VictoriaMetrics/helm-charts/](http
> Note that helm charts versioning uses its own versioning scheme. The version of the charts not tied to the version of VictoriaMetrics components.
Bump `tag` field in `values.yaml` with new release version.
Bump `appVersion` field in `Chart.yaml` with new release version.
Add new line to "Next release" section in `CHANGELOG.md` about version update (the line must always start with "`-`"). Do **NOT** change headers in `CHANGELOG.md`.
Bump `version` field in `Chart.yaml` with incremental semver version (based on the `CHANGELOG.md` analysis).
Do these updates to the following charts:
1. Update `vmagent` chart version in [`values.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-agent/values.yaml) and [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-agent/Chart.yaml)
1. Update `vmalert` chart version in [`values.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-alert/values.yaml) and [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-alert/Chart.yaml)
1. Update `vmauth` chart version in [`values.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-auth/values.yaml) and [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-auth/Chart.yaml)
1. Update `cluster` chart versions in [`values.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-cluster/values.yaml), bump version for `vmselect`, `vminsert` and `vmstorage` and [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-cluster/Chart.yaml)
1. Update `k8s-stack` chart versions in [`values.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-k8s-stack/values.yaml), bump version for `vmselect`, `vminsert`, `vmstorage`, `vmsingle`, `vmalert`, `vmagent` and [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-k8s-stack/Chart.yaml)
1. Update `single-node` chart version in [`values.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-single/values.yaml) and [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-single/Chart.yaml)
1. Update `vmgateway` chart version in [`values.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-gateway/values.yaml) and [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-gateway/Chart.yaml)
1. Update `vmagent` chart `version` and `appVersion` in [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-agent/Chart.yaml)
1. Update `vmalert` chart `version` and `appVersion` in [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-alert/Chart.yaml)
1. Update `vmauth` chart `version` and `appVersion` in [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-auth/Chart.yaml)
1. Update `cluster` chart `version` and `appVersion` in [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-cluster/Chart.yaml)
1. Update `k8s-stack` chart `version` and `appVersion` in [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-k8s-stack/Chart.yaml)
1. Update `single-node` chart `version` and `appVersion` in [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-single/Chart.yaml)
1. Update `vmgateway` chart `version` and `appVersion` in [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-gateway/Chart.yaml)
See commit example [here](https://github.com/VictoriaMetrics/helm-charts/commit/0ec3ab81795cb098d4741451b66886cc6d9be36c).
Once updated, run the following commands:

View file

@ -16,6 +16,22 @@ according to [these docs](https://docs.victoriametrics.com/victorialogs/quicksta
## tip
* FEATURE: [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/): add an ability to limit query concurrency for the `<q>` [query](https://docs.victoriametrics.com/victorialogs/logsql/#query-syntax) via `options(concurrency=N) <q>` syntax. This may be needed for reducing RAM and CPU usage at the cost of longer query execution times. See [these docs](https://docs.victoriametrics.com/victorialogs/logsql/#query-options) for details.
* FEATURE: add [`hash` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#hash-pipe) for calculating hashes over the selected log fields. This may be useful for splitting the selected logs into distinct buckets. For example, the following query splits `user_id` fields into 4 buckets with the help of `hash` pipe: `_time:5m | hash(user_id) as h | math h%4 as bucket | stats by (bucket) count()`.
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): reflect column settings for the table view in URL, so the table view can be shared via link. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7662).
## [v1.7.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.7.0-victorialogs)
Released at 2025-01-20
* FEATURE: [`join` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#join-pipe): add an ability to execute `INNER JOIN` by adding `inner` suffix to the `join` pipe.
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): updated the default graph type in the `hits panel` to bars with color fill. Removed options for `lines`, `stepped lines`, and `points`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7101).
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): reduce logs text size and improved styles in grouped view. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7479).
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add the ability to select fields for display instead of the `_msg` field. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7419).
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add various display configuration settings for the grouped view. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7815)
* BUGFIX: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): fix an issue where pressing the "Enter" key in the query editor did not execute the query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8058).
## [v1.6.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.6.1-victorialogs)
Released at 2025-01-16

View file

@ -192,3 +192,93 @@ VictoriaLogs because per-block overhead translates to a single log record, and
this overhead is big.
The `2MB` limit is hadrcoded and is unlikely to change.
## How to determine which log fields occupy the most of disk space?
[Run](https://docs.victoriametrics.com/victorialogs/querying/) the following [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) query
based on [`block_stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#block_stats-pipe):
```logsql
_time:1d
| block_stats
| stats by (field)
sum(values_bytes) as values_bytes,
sum(bloom_bytes) as bloom_bytes,
sum(rows) as rows
| math
(values_bytes+bloom_bytes) as total_bytes,
round(total_bytes / rows, 0.01) as bytes_per_row
| first 10 (total_bytes desc)
```
This query returns top 10 [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
which occupy the most of disk space across the logs ingested during the last day. The occupied disk space
is returned in the `total_bytes` field.
If you use [VictoriaLogs web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui)
or [Grafana plugin for VictoriaLogs](https://docs.victoriametrics.com/victorialogs/victorialogs-datasource/),
then make sure the selected time range covers the last day. Otherwise the query above returns
results on the intersection of the last day and the selected time range.
See [why the log field occupies a lot of disk space](#why-the-log-field-occupies-a-lot-of-disk-space).
## Why the log field occupies a lot of disk space?
See [how to determine which log fields occupy the most of disk space](#how-to-determine-which-log-fields-occupy-the-most-of-disk-space).
Log field may occupy a lot of disk space if it contains values with many unique parts (aka "random" values).
Such values do not compress well, so they occupy a lot of disk space. If you want reducing the amounts of occupied disk space,
then either remove the given log field from the [ingested](https://docs.victoriametrics.com/victorialogs/data-ingestion/) logs
or remove the unique parts from the log field before ingesting it into VictoriaLogs.
## How to detect the most frequently seen logs?
Use [`collapse_nums` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#collapse_nums-pipe).
For example, the following [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) query
returns top 10 the most freqently seen [log messages](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) over the last hour:
```logsql
_time:1h | collapse_nums prettify | top 10 (_msg)
```
Add [`_stream` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to the `top (...)` list in order to get top 10 the most frequently seen logs with the `_stream` field:
```logsql
_time:1h | collapse_nums prettify | top 10 (_stream, _msg)
```
## How to get field names seen in the selected logs?
Use [`field_names` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#field_names-pipe).
For example, the following [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) query
returns all the [field names](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) seen
across all the logs during the last hour:
```logsql
_time:1h | field_names | sort by (name)
```
The `hits` field in the returned results contains an estimated number of logs with the given log field.
## How to get unique field values seen in the selected logs?
Use [`field_values` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#field_values-pipe).
For example, the following [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) query
returns all the values for the `level` field across all the logs seen during the last hour:
```logsql
_time:1h | field_values level
```
The `hits` field in the returned results contains an esitmated number of logs with the given value for the `level` field.
## How to get the number of unique log streams on the given time range?
Use [`count_uniq` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#count_uniq-pipe)
over [`_stream`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) field.
For example, the following [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) query
returns the number of unique [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
across all the logs over the last day:
```logsql
_time:1d | count_uniq(_stream)
```

View file

@ -9,7 +9,8 @@ aliases:
- /VictoriaLogs/LogsQL.html
---
LogsQL is a simple yet powerful query language for [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/).
See [examples](https://docs.victoriametrics.com/victorialogs/logsql-examples/) and [tutorial](#logsql-tutorial)
See [examples](https://docs.victoriametrics.com/victorialogs/logsql-examples/), [LogsQL tutorial](#logsql-tutorial)
and [SQL to LogsQL conversion guide](https://docs.victoriametrics.com/victorialogs/sql-to-logsql/)
in order to feel the language.
LogsQL provides the following features:
@ -1329,8 +1330,9 @@ LogsQL supports the following pipes:
- [`first`](#first-pipe) returns the first N logs after sorting them by the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`format`](#format-pipe) formats output field from input [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`join`](#join-pipe) joins query results by the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`hash`](#hash-pipe) returns the hash over the given [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) value.
- [`last`](#last-pipe) returns the last N logs after sorting them by the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`len`](#len-pipe) calculates byte length of the given [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) value.
- [`len`](#len-pipe) returns byte length of the given [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) value.
- [`limit`](#limit-pipe) limits the number selected logs.
- [`math`](#math-pipe) performs mathematical calculations over [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- [`offset`](#offset-pipe) skips the given number of selected logs.
@ -1382,7 +1384,8 @@ See also:
### collapse_nums pipe
`| collapse_nums at <field>` pipe replaces all the decimal and hexadecimal numbers at the given [`<field>`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) with `<N>` placeholder.
`<q> | collapse_nums at <field>` pipe replaces all the decimal and hexadecimal numbers at the given [`<field>`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
returned by the `<q>` [query](#query-syntax) with `<N>` placeholder.
For example, if the `_msg` field contains `2024-10-20T12:34:56Z request duration 1.34s`, then it is replaced with `<N>-<N>-<N>T<N>:<N>:<N>Z request duration <N>.<N>s` by the following query:
```logsql
@ -1432,7 +1435,7 @@ See also:
#### Conditional collapse_nums
If the [`collapse_nums` pipe](#collapse_nums-pipe) mustn't be applied to every [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
If the [`collapse_nums` pipe](#collapse_nums-pipe) must be applied only to some [log entries](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
then add `if (<filters>)` after `collapse_nums`.
The `<filters>` can contain arbitrary [filters](#filters). For example, the following query collapses nums in the `foo` field only if `user_type` field equals to `admin`:
@ -1485,7 +1488,8 @@ See also:
### drop_empty_fields pipe
`| drop_empty_fields` pipe drops [fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) with empty values. It also skips log entries with zero non-empty fields.
`<q> | drop_empty_fields` pipe drops [fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) with empty values from results returned by `<q>` [query](#query-syntax).
It also skips log entries with zero non-empty fields.
For example, the following query drops possible empty `email` field generated by [`extract` pipe](#extract-pipe) if the `foo` field doesn't contain email:
@ -1501,10 +1505,11 @@ See also:
### extract pipe
`| extract "pattern" from field_name` [pipe](#pipes) allows extracting arbitrary text into output fields according to the [`pattern`](#format-for-extract-pipe-pattern) from the given
[`field_name`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). Existing log fields remain unchanged after the `| extract ...` pipe.
`<q> | extract "pattern" from field_name` [pipe](#pipes) extracts text into output fields according to the [`pattern`](#format-for-extract-pipe-pattern) from the given
[`field_name`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) returned by `<q>` [query](#query-syntax).
Existing log fields remain unchanged after the `| extract ...` pipe.
`| extract ...` can be useful for extracting additional fields needed for further data processing with other pipes such as [`stats` pipe](#stats-pipe) or [`sort` pipe](#sort-pipe).
`extract` pipe can be useful for extracting additional fields needed for further data processing with other pipes such as [`stats` pipe](#stats-pipe) or [`sort` pipe](#sort-pipe).
For example, the following query selects logs with the `error` [word](#word) for the last day,
extracts ip address from [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) into `ip` field and then calculates top 10 ip addresses
@ -1517,7 +1522,7 @@ _time:1d error | extract "ip=<ip> " from _msg | top 10 (ip)
It is expected that `_msg` field contains `ip=...` substring ending with space. For example, `error ip=1.2.3.4 from user_id=42`.
If there is no such substring in the current `_msg` field, then the `ip` output field will be empty.
If the `| extract ...` pipe is applied to [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field), then the `from _msg` part can be omitted.
If the `extract` pipe is applied to [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field), then the `from _msg` part can be omitted.
For example, the following query is equivalent to the previous one:
```logsql
@ -1643,8 +1648,8 @@ _time:5m | extract "ip=<ip> " keep_original_fields
### extract_regexp pipe
`| extract_regexp "pattern" from field_name` [pipe](#pipes) extracts substrings from the [`field_name` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
according to the provided `pattern`, and stores them into field names according to the named fields inside the `pattern`.
`<q> | extract_regexp "pattern" from field_name` [pipe](#pipes) extracts substrings from the [`field_name` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
returned from `<q>` [query](#query-syntax) according to the provided `pattern`, and stores them into field names according to the named fields inside the `pattern`.
The `pattern` must contain [RE2 regular expression](https://github.com/google/re2/wiki/Syntax) with named fields (aka capturing groups) in the form `(?P<capture_field_name>...)`.
Matching substrings are stored to the given `capture_field_name` [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
For example, the following query extracts ipv4 addresses from [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field)
@ -1705,8 +1710,8 @@ _time:5m | extract_regexp "ip=(?P<ip>([0-9]+[.]){3}[0-9]+)" keep_original_fields
### facets pipe
`| facets` [pipe](#pipes) returns the most frequent values per every seen [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the number of hits across the selected logs.
`<q> | facets` [pipe](#pipes) returns the most frequent values per every seen [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
returned by `<q>` [query](#query-syntax). It also returns an estimated number of hits per every returned `field=value` pair.
For example, the following query returns the most frequent values per every seen log field across logs with the `error` [word](#word) over the last hour:
@ -1751,8 +1756,8 @@ See also:
### field_names pipe
`| field_names` [pipe](#pipes) returns all the names of [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with an estimated number of logs per each field name.
`<q> | field_names` [pipe](#pipes) returns all the names of [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with an estimated number of logs per each field name returned from `<q>` [query](#query-syntax).
For example, the following query returns all the field names with the number of matching logs over the last 5 minutes:
@ -1770,8 +1775,8 @@ See also:
### field_values pipe
`| field_values field_name` [pipe](#pipes) returns all the values for the given [`field_name` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the number of logs per each value.
`<q> | field_values field_name` [pipe](#pipes) returns all the values for the given [`field_name` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the number of logs per each value returned from `<q>` [query](#query-syntax).
For example, the following query returns all the values with the number of matching logs for the field `level` over logs for the last 5 minutes:
```logsql
@ -1818,7 +1823,7 @@ See also:
### filter pipe
The `| filter ...` [pipe](#pipes) allows filtering the selected logs entries with arbitrary [filters](#filters).
The `<q> | filter ...` [pipe](#pipes) filters logs returned by `<q>` [query](#query-syntax) with the given [filter](#filters).
For example, the following query returns `host` [field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) values
if the number of log messages with the `error` [word](#word) for them over the last hour exceeds `1_000`:
@ -1847,7 +1852,8 @@ See also:
### first pipe
`| first N by (fields)` [pipe](#pipes) returns the first `N` logs after sorting them by the given [`fields`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
`<q> | first N by (fields)` [pipe](#pipes) returns the first `N` logs from `<q>` [query](#query-syntax) after sorting them
by the given [`fields`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
For example, the following query returns the first 10 logs with the smallest value of `request_duration` [field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
over the last 5 minutes:
@ -1872,8 +1878,8 @@ See also:
### format pipe
`| format "pattern" as result_field` [pipe](#pipes) combines [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
according to the `pattern` and stores it to the `result_field`.
`<q> | format "pattern" as result_field` [pipe](#pipes) combines [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
from `<q>` [query](#query-syntax) results according to the `pattern` and stores it into `result_field`.
For example, the following query stores `request from <ip>:<port>` text into [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field),
by substituting `<ip>` and `<port>` with the corresponding [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) values:
@ -1970,7 +1976,7 @@ See also:
#### Conditional format
If the [`format` pipe](#format-pipe) mustn't be applied to every [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
If the [`format` pipe](#format-pipe) must be applied only to some [log entries](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
then add `if (<filters>)` just after the `format` word.
The `<filters>` can contain arbitrary [filters](#filters). For example, the following query stores the formatted result to `message` field
only if `ip` and `host` [fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) aren't empty:
@ -1981,13 +1987,13 @@ _time:5m | format if (ip:* and host:*) "request from <ip>:<host>" as message
### join pipe
The `| join by (<fields>) (<query>)` [pipe](#pipes) joins the current results with the `<query>` results by the given set of comma-separated `<fields>`.
The `<q1> | join by (<fields>) (<q2>)` [pipe](#pipes) joins `<q1>` [query](#query-syntax) results with the `<q2>` results by the given set of comma-separated `<fields>`.
This pipe works in the following way:
1. It executes the `<query>` and remembers its' results. It may contain arbitrary [LogsQL query](https://docs.victoriametrics.com/victorialogs/logsql/).
1. For each input row it searches for matching rows in the `<query>` results by the given `<fields>`.
1. If the `<query>` results have no matching rows, then the input row is sent to the output as is.
1. If the `<query>` results has matching rows, then for each matching row the input row is extended
1. It executes the `<q2>` [query](#query-syntax) and remembers its' results.
1. For each input row from `<q1>` it searches for matching rows in the `<q2>` results by the given `<fields>`.
1. If the `<q2>` results have no matching rows, then the input row is sent to the output as is.
1. If the `<q2>` results have matching rows, then for each matching row the input row is extended
with new fields seen at the matching row, and the result is sent to the output.
This logic is similar to `LEFT JOIN` in SQL. For example, the following query returns the number of per-user logs across two applications - `app1` and `app2` (
@ -2000,15 +2006,14 @@ _time:1d {app="app1"} | stats by (user) count() app1_hits
)
```
If you need results similar to `JOIN` in SQL, then apply [`filter` pipe](#filter-pipe) with [`*` filter](https://docs.victoriametrics.com/victorialogs/logsql/#any-value-filter)
on fields, which must be non-empty after the join. For example, the following query returns stats only for users, which exist in both applications `app1` and `app2`:
If you need results similar to `INNER JOIN` in SQL, then add `inner` suffix after the `join` pipe.
For example, the following query returns stats only for users, which exist in both applications `app1` and `app2`:
```logsql
_time:1d {app="app1"} | stats by (user) count() app1_hits
| join by (user) (
_time:1d {app="app2"} | stats by (user) count() app2_hits
)
| filter app2_hits:*
) inner
```
It is possible adding a prefix to all the field names returned by the `<query>` by specifying the needed prefix after the `<query>`.
@ -2034,9 +2039,26 @@ See also:
- [conditional `stats`](https://docs.victoriametrics.com/victorialogs/logsql/#stats-with-additional-filters)
- [`filter` pipe](#filter-pipe)
### hash pipe
`<q> | hash(field) as result_field` calculates hash value for the given [`field`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
and stores it into the `result_field`, for every log entry returned by `<q>` [query](#query-syntax).
For example, the following query calculates the hash value over `user_id` field and stores it into `user_id_hash` field, across logs for the last 5 minutes:
```logsql
_time:5m | hash(user_id) as user_id_hash
```
See also:
- [`math` pipe](#math-pipe)
- [`filter` pipe](#filter-pipe)
### last pipe
`| last N by (fields)` [pipe](#pipes) returns the last `N` logs after sorting them by the given [`fields`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
`<q> | last N by (fields)` [pipe](#pipes) returns the last `N` logs from `<q>` [query](#query-syntax) after sorting them
by the given [`fields`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
For example, the following query returns the last 10 logs with the biggest value of `request_duration` [field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
over the last 5 minutes:
@ -2060,7 +2082,9 @@ See also:
### len pipe
The `| len(field) as result` [pipe](#pipes) stores byte length of the given `field` value into the `result` field.
`<q> | len(field) as result` [pipe](#pipes) stores byte length of the given `field` value into the `result` field
across all the logs returned by `<q>` [query](#query-syntax).
For example, the following query shows top 5 log entries with the maximum byte length of `_msg` field across
logs for the last 5 minutes:
@ -2102,8 +2126,8 @@ See also:
### math pipe
`| math ...` [pipe](#pipes) performs mathematical calculations over [numeric values](#numeric-values) stored in [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
It has the following format:
`<q> | math ...` [pipe](#pipes) performs mathematical calculations over [numeric values](#numeric-values) of [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
returned by `<q>` [query](#query-syntax). It has the following format:
```
| math
@ -2198,8 +2222,8 @@ See also:
### pack_json pipe
`| pack_json as field_name` [pipe](#pipes) packs all [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) into JSON object
and stores it as a string in the given `field_name`.
`<q> | pack_json as field_name` [pipe](#pipes) packs all the [fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) of every log
entry returned by `<q>` [query](#query-syntax) into JSON object and stores it as a string in the given `field_name`.
For example, the following query packs all the fields into JSON object and stores it into [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field)
for logs over the last 5 minutes:
@ -2237,8 +2261,8 @@ See also:
### pack_logfmt pipe
`| pack_logfmt as field_name` [pipe](#pipes) packs all [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) into [logfmt](https://brandur.org/logfmt) message
and stores it as a string in the given `field_name`.
`<q> | pack_logfmt as field_name` [pipe](#pipes) packs all the [fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) per every log entry
returned by `<q>` [query](#query-syntax) into [logfmt](https://brandur.org/logfmt) message and stores it as a string in the given `field_name`.
For example, the following query packs all the fields into [logfmt](https://brandur.org/logfmt) message and stores it
into [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) for logs over the last 5 minutes:
@ -2300,8 +2324,8 @@ See also:
### replace pipe
`| replace ("old", "new") at field` [pipe](#pipes) replaces all the occurrences of the `old` substring with the `new` substring
in the given [`field`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
`<q> | replace ("old", "new") at field` [pipe](#pipes) replaces all the occurrences of the `old` substring with the `new` substring
in the given [`field`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) over all the logs returned by `<q>` [query](#query-syntax).
For example, the following query replaces all the `secret-password` substrings with `***` in the [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field)
for logs over the last 5 minutes:
@ -2337,7 +2361,7 @@ See also:
#### Conditional replace
If the [`replace` pipe](#replace-pipe) mustn't be applied to every [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
If the [`replace` pipe](#replace-pipe) must be applied only to some [log entries](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
then add `if (<filters>)` after `replace`.
The `<filters>` can contain arbitrary [filters](#filters). For example, the following query replaces `secret` with `***` in the `password` field
only if `user_type` field equals to `admin`:
@ -2348,8 +2372,8 @@ _time:5m | replace if (user_type:=admin) ("secret", "***") at password
### replace_regexp pipe
`| replace_regexp ("regexp", "replacement") at field` [pipe](#pipes) replaces all the substrings matching the given `regexp` with the given `replacement`
in the given [`field`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
`<q> | replace_regexp ("regexp", "replacement") at field` [pipe](#pipes) replaces all the substrings matching the given `regexp` with the given `replacement`
in the given [`field`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) over all the logs returned by `<q>` [query](#query-syntax).
The `regexp` must contain regular expression with [RE2 syntax](https://github.com/google/re2/wiki/Syntax).
The `replacement` may contain `$N` or `${N}` placeholders, which are substituted with the `N-th` capturing group in the `regexp`.
@ -2390,7 +2414,7 @@ See also:
#### Conditional replace_regexp
If the [`replace_regexp` pipe](#replace_regexp-pipe) mustn't be applied to every [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
If the [`replace_regexp` pipe](#replace_regexp-pipe) must be applied only to some [log entries](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
then add `if (<filters>)` after `replace_regexp`.
The `<filters>` can contain arbitrary [filters](#filters). For example, the following query replaces `password: ...` substrings ending with whitespace
with `***` in the `foo` field only if `user_type` field equals to `admin`:
@ -2401,8 +2425,8 @@ _time:5m | replace_regexp if (user_type:=admin) ("password: [^ ]+", "") at foo
### sort pipe
By default logs are selected in arbitrary order because of performance reasons. If logs must be sorted, then `| sort by (field1, ..., fieldN)` [pipe](#pipes) can be used.
The returned logs are sorted by the given [fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
By default logs are selected in arbitrary order because of performance reasons. If logs must be sorted, then `<q> | sort by (field1, ..., fieldN)` [pipe](#pipes) can be used
for sorting logs returned by `<q>` [query](#query-syntax) by the given [fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
using [natural sorting](https://en.wikipedia.org/wiki/Natural_sort_order).
For example, the following query returns logs for the last 5 minutes sorted by [`_stream`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
@ -2495,8 +2519,8 @@ See also:
### stats pipe
`| stats ...` pipe allows calculating various stats over the selected logs. For example, the following LogsQL query
uses [`count` stats function](#count-stats) for calculating the number of logs for the last 5 minutes:
`<q> | stats ...` pipe calculate various stats over the logs returned by `<q>` [query](#query-syntax).
For example, the following LogsQL query uses [`count` stats function](#count-stats) for calculating the number of logs for the last 5 minutes:
```logsql
_time:5m | stats count() as logs_total
@ -2538,13 +2562,13 @@ _time:5m | count(), count_uniq(_stream)
See also:
- [stats pipe functions](#stats-pipe-functions)
- [stats by fields](#stats-by-fields)
- [stats by time buckets](#stats-by-time-buckets)
- [stats by time buckets with timezone offset](#stats-by-time-buckets-with-timezone-offset)
- [stats by field buckets](#stats-by-field-buckets)
- [stats by IPv4 buckets](#stats-by-ipv4-buckets)
- [stats with additional filters](#stats-with-additional-filters)
- [stats pipe functions](#stats-pipe-functions)
- [`math` pipe](#math-pipe)
- [`sort` pipe](#sort-pipe)
- [`uniq` pipe](#uniq-pipe)
@ -2557,13 +2581,14 @@ See also:
The following LogsQL syntax can be used for calculating independent stats per group of log fields:
```logsql
... | stats by (field1, ..., fieldM)
<q> | stats by (field1, ..., fieldM)
stats_func1(...) as result_name1,
...
stats_funcN(...) as result_nameN
```
This calculates `stats_func*` per each `(field1, ..., fieldM)` group of [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
This calculates `stats_func*` per each `(field1, ..., fieldM)` group of [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
seen in the logs returned by `<q>` [query](#query-syntax).
For example, the following query calculates the number of logs and unique ip addresses over the last 5 minutes,
grouped by `(host, path)` fields:
@ -2580,6 +2605,8 @@ _time:5m | stats (host, path) count() logs_total, count_uniq(ip) ips_total
See also:
- [`stats` pipe](#stats-pipe)
- [`stats` pipe functions](#stats-pipe-functions)
- [`row_min`](#row_min-stats)
- [`row_max`](#row_max-stats)
- [`row_any`](#row_any-stats)
@ -2589,15 +2616,15 @@ See also:
The following syntax can be used for calculating stats grouped by time buckets:
```logsql
... | stats by (_time:step)
<q> | stats by (_time:step)
stats_func1(...) as result_name1,
...
stats_funcN(...) as result_nameN
```
This calculates `stats_func*` per each `step` of [`_time`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) field.
The `step` can have any [duration value](#duration-values). For example, the following LogsQL query returns per-minute number of logs and unique ip addresses
over the last 5 minutes:
This calculates `stats_func*` per each `step` of the [`_time`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) field
across logs returned by `<q>` [query](#query-syntax). The `step` can have any [duration value](#duration-values).
For example, the following LogsQL query returns per-minute number of logs and unique ip addresses over the last 5 minutes:
```
_time:5m | stats by (_time:1m) count() logs_total, count_uniq(ip) ips_total
@ -2616,6 +2643,12 @@ Additionally, the following `step` values are supported:
- `month` - equals to one month. It properly takes into account the number of days per each month.
- `year` - equals to one year. It properly takes into account the number of days per each year.
See also:
- [`stats` pipe](#stats-pipe)
- [`stats` pipe functions](#stats-pipe-functions)
- [`math` pipe](#math-pipe)
#### Stats by time buckets with timezone offset
VictoriaLogs stores [`_time`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) values as [Unix time](https://en.wikipedia.org/wiki/Unix_time)
@ -2623,7 +2656,7 @@ in nanoseconds. This time corresponds to [UTC](https://en.wikipedia.org/wiki/Coo
grouped by days or weeks at non-UTC timezone. This is possible with the following syntax:
```logsql
... | stats by (_time:step offset timezone_offset) ...
<q> | stats by (_time:step offset timezone_offset) ...
```
For example, the following query calculates per-day number of logs over the last week, in `UTC+02:00` [time zone](https://en.wikipedia.org/wiki/Time_zone):
@ -2632,9 +2665,16 @@ For example, the following query calculates per-day number of logs over the last
_time:1w | stats by (_time:1d offset 2h) count() logs_total
```
See also:
- [`stats` pipe](#stats-pipe)
- [`stats` pipe functions](#stats-pipe-functions)
- [`math` pipe](#math-pipe)
#### Stats by field buckets
Every log field inside `| stats by (...)` can be bucketed in the same way at `_time` field in [this example](#stats-by-time-buckets).
Every log field inside `<q> | stats by (...)` can be bucketed in the same way as `_time` field in [this example](#stats-by-time-buckets).
Any [numeric value](#numeric-values) can be used as `step` value for the bucket. For example, the following query calculates
the number of requests for the last hour, bucketed by 10KB of `request_size_bytes` [field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model):
@ -2642,6 +2682,10 @@ the number of requests for the last hour, bucketed by 10KB of `request_size_byte
_time:1h | stats by (request_size_bytes:10KB) count() requests
```
- [`stats` pipe](#stats-pipe)
- [`stats` pipe functions](#stats-pipe-functions)
- [`math` pipe](#math-pipe)
#### Stats by IPv4 buckets
Stats can be bucketed by [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) containing [IPv4 addresses](https://en.wikipedia.org/wiki/IP_address)
@ -2652,9 +2696,14 @@ extracted from the `ip` [log field](https://docs.victoriametrics.com/victorialog
_time:5m | stats by (ip:/24) count() requests_per_subnet
```
- [`stats` pipe](#stats-pipe)
- [`stats` pipe functions](#stats-pipe-functions)
- [`math` pipe](#math-pipe)
- [`ipv4_range` filter](#ipv4-range-filter)
#### Stats with additional filters
Sometimes it is needed to calculate stats on different subsets of matching logs. This can be done by inserting `if (<any_filters>)` condition
Sometimes it is needed to calculate [stats](#stats-pipe) on different subsets of matching logs. This can be done by inserting `if (<any_filters>)` condition
between [stats function](#stats-pipe-functions) and `result_name`, where `any_filter` can contain arbitrary [filters](#filters).
For example, the following query calculates individually the number of [logs messages](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field)
with `GET`, `POST` and `PUT` [words](#word), additionally to the total number of logs over the last 5 minutes:
@ -2671,14 +2720,15 @@ If zero input rows match the given `if (...)` filter, then zero result is return
See also:
- [`join` pipe](#join-pipe)
- [`stats` pipe](#stats-pipe)
- [`stats` pipe functions](#stats-pipe-functions)
- [`join` pipe](#join-pipe)
### stream_context pipe
`| stream_context ...` [pipe](#pipes) allows selecting surrounding logs for the matching logs in [logs stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
in the way similar to `grep -A` / `grep -B`. The returned log chunks are delimited with `---` [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field)
for easier investigation.
`<q> | stream_context ...` [pipe](#pipes) allows selecting surrounding logs in [logs stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
across the logs returned by `<q>` [query](#query-syntax) in the way similar to `grep -A` / `grep -B`.
The returned log chunks are delimited with `---` [log message](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) for easier investigation.
For example, the following query returns up to 10 additional logs after every log message with the `panic` [word](#word) across all the logs for the last 5 minutes:
@ -2714,8 +2764,8 @@ See also:
### top pipe
`| top N by (field1, ..., fieldN)` [pipe](#pipes) returns top `N` sets for `(field1, ..., fieldN)` [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the maximum number of matching log entries.
`<q> | top N by (field1, ..., fieldN)` [pipe](#pipes) returns top `N` sets for `(field1, ..., fieldN)` [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
with the maximum number of matching log entries across logs returned by `<q>` [query](#query-syntax).
For example, the following query returns top 7 [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
with the maximum number of log entries over the last 5 minutes. The number of entries are returned in the `hits` field:
@ -2769,8 +2819,8 @@ See also:
### union pipe
`q1 | union (q2)` [pipe](#pipes) returns results of `q1` followed by results of `q2`. It works similar to `UNION ALL` in SQL.
`q1` and `q2` may contain arbitrary [LogsQL queries](#logsql-tutorial).
`<q1> | union (<q2>)` [pipe](#pipes) returns results of `<q1>` [query](#query-syntax) followed by results of `<q2>` [query](#query-syntax).
It works similar to `UNION ALL` in SQL. `<q1>` and `q2` may contain arbitrary [LogsQL queries](#logsql-tutorial).
For example, the following query returns logs with `error` [word](#word) for the last 5 minutes, plus logs with `panic` word for the last hour:
```logsql
@ -2784,7 +2834,8 @@ See also:
### uniq pipe
`| uniq ...` [pipe](#pipes) returns unique results over the selected logs. For example, the following LogsQL query
`<q> | uniq by (field1, ..., fieldN)` [pipe](#pipes) returns unique values for the given [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
over the logs returned by `<q>` [query](#query-syntax). For example, the following LogsQL query
returns unique values for `ip` [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
over logs for the last 5 minutes:
@ -2831,8 +2882,9 @@ See also:
### unpack_json pipe
`| unpack_json from field_name` [pipe](#pipes) unpacks `{"k1":"v1", ..., "kN":"vN"}` JSON from the given input [`field_name`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
into `k1`, ... `kN` output field names with the corresponding `v1`, ..., `vN` values. It overrides existing fields with names from the `k1`, ..., `kN` list. Other fields remain untouched.
`<q> | unpack_json from field_name` [pipe](#pipes) unpacks `{"k1":"v1", ..., "kN":"vN"}` JSON from the given [`field_name`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
of `<q>` [query](#query-syntax) results into `k1`, ... `kN` output field names with the corresponding `v1`, ..., `vN` values.
It overrides existing fields with names from the `k1`, ..., `kN` list. Other fields remain untouched.
Nested JSON is unpacked according to the rules defined [here](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
@ -2906,7 +2958,7 @@ See also:
#### Conditional unpack_json
If the [`unpack_json` pipe](#unpack_json-pipe) mustn't be applied to every [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
If the [`unpack_json` pipe](#unpack_json-pipe) must be applied only to some [log entries](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
then add `if (<filters>)` after `unpack_json`.
The `<filters>` can contain arbitrary [filters](#filters). For example, the following query unpacks JSON fields from `foo` field only if `ip` field in the current log entry isn't set or empty:
@ -2916,8 +2968,8 @@ _time:5m | unpack_json if (ip:"") from foo
### unpack_logfmt pipe
`| unpack_logfmt from field_name` [pipe](#pipes) unpacks `k1=v1 ... kN=vN` [logfmt](https://brandur.org/logfmt) fields
from the given [`field_name`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) into `k1`, ... `kN` field names
`<q> | unpack_logfmt from field_name` [pipe](#pipes) unpacks `k1=v1 ... kN=vN` [logfmt](https://brandur.org/logfmt) fields
from the given [`field_name`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) of `<q>` [query](#query-syntax) results into `k1`, ... `kN` field names
with the corresponding `v1`, ..., `vN` values. It overrides existing fields with names from the `k1`, ..., `kN` list. Other fields remain untouched.
For example, the following query unpacks [logfmt](https://brandur.org/logfmt) fields from the [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field)
@ -2989,7 +3041,7 @@ See also:
#### Conditional unpack_logfmt
If the [`unpack_logfmt` pipe](#unpack_logfmt-pipe) mustn't be applied to every [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
If the [`unpack_logfmt` pipe](#unpack_logfmt-pipe) must be applied only to some [log entries](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
then add `if (<filters>)` after `unpack_logfmt`.
The `<filters>` can contain arbitrary [filters](#filters). For example, the following query unpacks logfmt fields from `foo` field
only if `ip` field in the current log entry isn't set or empty:
@ -3000,8 +3052,9 @@ _time:5m | unpack_logfmt if (ip:"") from foo
### unpack_syslog pipe
`| unpack_syslog from field_name` [pipe](#pipes) unpacks [syslog](https://en.wikipedia.org/wiki/Syslog) message
from the given [`field_name`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). It understands the following Syslog formats:
`<q> | unpack_syslog from field_name` [pipe](#pipes) unpacks [syslog](https://en.wikipedia.org/wiki/Syslog) message
from the given [`field_name`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) of `<q>` [query](#query-syntax) results.
It understands the following Syslog formats:
- [RFC3164](https://datatracker.ietf.org/doc/html/rfc3164) aka `<PRI>MMM DD hh:mm:ss HOSTNAME APP-NAME[PROCID]: MESSAGE`
- [RFC5424](https://datatracker.ietf.org/doc/html/rfc5424) aka `<PRI>1 TIMESTAMP HOSTNAME APP-NAME PROCID MSGID [STRUCTURED-DATA] MESSAGE`
@ -3079,7 +3132,7 @@ See also:
#### Conditional unpack_syslog
If the [`unpack_syslog` pipe](#unpack_syslog-pipe) mustn't be applied to every [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
If the [`unpack_syslog` pipe](#unpack_syslog-pipe) must be applied only to some [log entries](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
then add `if (<filters>)` after `unpack_syslog`.
The `<filters>` can contain arbitrary [filters](#filters). For example, the following query unpacks syslog message fields from `foo` field
only if `hostname` field in the current log entry isn't set or empty:
@ -3090,8 +3143,8 @@ _time:5m | unpack_syslog if (hostname:"") from foo
### unroll pipe
`| unroll by (field1, ..., fieldN)` [pipe](#pipes) can be used for unrolling JSON arrays from `field1`, `fieldN`
[log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) into separate rows.
`<q> | unroll by (field1, ..., fieldN)` [pipe](#pipes) can be used for unrolling JSON arrays from `field1`, ..., `fieldN`
[log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) of `<q>` [query](#query-syntax) results into separate rows.
For example, the following query unrolls `timestamp` and `value` [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) from logs for the last 5 minutes:
@ -3108,7 +3161,7 @@ See also:
#### Conditional unroll
If the [`unroll` pipe](#unroll-pipe) mustn't be applied to every [log entry](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
If the [`unroll` pipe](#unroll-pipe) must be applied only to some [log enties](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model),
then add `if (<filters>)` after `unroll`.
The `<filters>` can contain arbitrary [filters](#filters). For example, the following query unrolls `value` field only if `value_type` field equals to `json_array`:
@ -3689,9 +3742,27 @@ Internally duration values are converted into nanoseconds.
- It is highly recommended specifying [time filter](#time-filter) in order to narrow down the search to specific time range.
- It is highly recommended specifying [stream filter](#stream-filter) in order to narrow down the search
to specific [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields).
- It is recommended specifying [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) you need in query results
with the [`field` pipe](#fields-pipe), if the selected log entries contain big number of fields, which aren't interesting to you.
This saves disk read IO and CPU time needed for reading and unpacking all the log fields from disk.
- Move faster filters such as [word filter](#word-filter) and [phrase filter](#phrase-filter) to the beginning of the query.
This rule doesn't apply to [time filter](#time-filter) and [stream filter](#stream-filter), which can be put at any place of the query.
- Move more specific filters, which match lower number of log entries, to the beginning of the query.
This rule doesn't apply to [time filter](#time-filter) and [stream filter](#stream-filter), which can be put at any place of the query.
- If the selected logs are passed to [pipes](#pipes) for further transformations and statistics' calculations, then it is recommended
reducing the number of selected logs by using more specific [filters](#filters), which return lower number of logs to process by [pipes](#pipes).
## Query options
VictoriaLogs supports the following options, which can be passed in the beginning of [LogsQL query](#query-syntax) `<q>` via `options(opt1=v1, ..., optN=vN) <q>` syntax:
- `concurrency` - query concurrency. By default the query is executed in parallel on all the available CPU cores.
This usually provides the best query performance. Sometimes it is needed to reduce the number of used CPU cores,
in order to reduce RAM usage and/or CPU usage.
This can be done by setting `concurrency` option to the value smaller than the number of available CPU cores.
For example, the following query executes on at max 2 CPU cores:
```logsql
options(concurrency=2) _time:1d | count_uniq(user_id)
```

View file

@ -33,8 +33,8 @@ Just download archive for the needed Operating system and architecture, unpack i
For example, the following commands download VictoriaLogs archive for Linux/amd64, unpack and run it:
```sh
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.6.1-victorialogs/victoria-logs-linux-amd64-v1.6.1-victorialogs.tar.gz
tar xzf victoria-logs-linux-amd64-v1.6.1-victorialogs.tar.gz
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.7.0-victorialogs/victoria-logs-linux-amd64-v1.7.0-victorialogs.tar.gz
tar xzf victoria-logs-linux-amd64-v1.7.0-victorialogs.tar.gz
./victoria-logs-prod
```
@ -58,7 +58,7 @@ Here is the command to run VictoriaLogs in a Docker container:
```sh
docker run --rm -it -p 9428:9428 -v ./victoria-logs-data:/victoria-logs-data \
docker.io/victoriametrics/victoria-logs:v1.6.1-victorialogs
docker.io/victoriametrics/victoria-logs:v1.7.0-victorialogs
```
See also:

View file

@ -30,7 +30,9 @@ If you have questions about VictoriaLogs, then read [this FAQ](https://docs.vict
Also feel free asking any questions at [VictoriaMetrics community Slack chat](https://victoriametrics.slack.com/),
you can join it via [Slack Inviter](https://slack.victoriametrics.com/).
See [Quick start docs](https://docs.victoriametrics.com/victorialogs/quickstart/) for start working with VictoriaLogs.
See [quick start docs](https://docs.victoriametrics.com/victorialogs/quickstart/) for start working with VictoriaLogs.
If you want playing with [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) query language, then go to [VictoriaLogs playground](https://play-vmlogs.victoriametrics.com/).
## Tuning
@ -401,7 +403,7 @@ Pass `-help` to VictoriaLogs in order to see the list of supported command-line
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
The maximum number of concurrent insert requests. Set higher value when clients send data over slow networks. Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage. See also -insert.maxQueueDuration (default 32)
The maximum number of concurrent insert requests. Set higher value when clients send data over slow networks. Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage. See also -insert.maxQueueDuration
-memory.allowedBytes size
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from the OS page cache resulting in higher disk IO usage
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)

View file

@ -23,8 +23,8 @@ or from [docker images](https://hub.docker.com/r/victoriametrics/vlogscli/tags).
### Running `vlogscli` from release binary
```sh
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.6.1-victorialogs/vlogscli-linux-amd64-v1.6.1-victorialogs.tar.gz
tar xzf vlogscli-linux-amd64-v1.6.1-victorialogs.tar.gz
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.7.0-victorialogs/vlogscli-linux-amd64-v1.7.0-victorialogs.tar.gz
tar xzf vlogscli-linux-amd64-v1.7.0-victorialogs.tar.gz
./vlogscli-prod
```

View file

@ -0,0 +1,95 @@
---
weight: 120
title: SQL to LogsQL tutorial
menu:
docs:
parent: "victorialogs"
weight: 120
---
This is a tutorial for the migration from SQL to [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/).
It is expected you are familiar with SQL and know [how to execute queries at VictoriaLogs](https://docs.victoriametrics.com/victorialogs/querying/).
## data model
SQL is usually used for querying relational tables. Every such table contains a pre-defined set of columns with pre-defined types.
LogsQL is used for querying logs. Logs are stored in [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields).
So log streams is an analogue of tables in relational databases. Log streams and relational tables have the following major differences:
- Log streams are created automatically when the first log entry (row) is ingested into them.
- There is no pre-defined scheme in log streams - logs with arbitrary set of fields can be ingested into every log stream.
Both names and values in every log entry have string type. They may contain arbitrary string data.
- Every log entry (row) can be represented as a flat JSON object: `{"f1":"v1",...,"fN":"vN"}`. See [these docs](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model).
- By default VictoriaLogs selects log entries across all the log streams. The needed set of log streams can be specified
via [stream filters](https://docs.victoriametrics.com/victorialogs/logsql/#stream-filter).
- By default VictoriaLogs returns all the fields across the selected logs. The set of returned fields
can be limited with [`fields` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#fields-pipe).
## query structure
SQL query structure is quite convoluted:
```sql
SELECT
<fields, aggregations, calculations, transformations>
FROM <table>
<optional JOINs>
<optional filters with optional subqueries>
<optional GROUP BY>
<optional HAVING>
<optional ORDER BY>
<optional LIMIT and OFFSET>
<optional UNION>
```
[LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) query structure is much simpler:
```logsql
<filters>
| <optional_pipe1>
| ...
| <optional_pipeN>
```
The `<filters>` part selects the needed logs (rows) according to the provided [filters](https://docs.victoriametrics.com/victorialogs/logsql/#filters).
Then the provided [pipes](https://docs.victoriametrics.com/victorialogs/logsql/#pipes) are executed sequentlially.
Every such pipe receives all the rows from the previous stage, performs some calculations and/or transformations,
and then pushes the resulting rows to the next stage. This simplifies reading and understanding the query - just read it from the beginning
to the end in order to understand what does it do at every stage.
LogsQL pipes cover all the functionality from SQL: aggregations, calculations, transformations, subqueries, joins, post-filters, sorting, etc.
See the [conversion rules](#conversion-rules) on how to convert SQL to LogsQL.
## conversion rules
The following rules must be used for converting SQL query into LogsQL query:
* If the SQL query contains `WHERE`, then convert it into [LogsQL filters](https://docs.victoriametrics.com/victorialogs/logsql/#filters).
Otherwise just start LogsQL query with [`*`](https://docs.victoriametrics.com/victorialogs/logsql/#any-value-filter).
For example, `SELECT * FROM table WHERE field1=value1 AND field2<>value2` is converted into `field1:=value1 field2:!=value2`,
while `SELECT * FROM table` is converted into `*`.
* Subqueries inside `WHERE` must be converted into [`in` filters](https://docs.victoriametrics.com/victorialogs/logsql/#multi-exact-filter).
For example, `SELECT * FROM table WHERE id IN (SELECT id2 FROM table)` is converted into `id:in(* | fields id2)`.
* If the `SELECT` part isn't equal to `*` and there are no `GROUP BY` / aggregate functions in the SQL query, then enumerate
the selected columns at [`fields` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#fields-pipe).
For example, `SELECT field1, field2 FROM table` is converted into `* | fields field1, field2`.
* If the SQL query contains `JOIN`, then convert it into [`join` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#join-pipe).
* If the SQL query contains `GROUP BY` / aggregate functions, then convert them to [`stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe).
For example, `SELECT count(*) FROM table` is converted into `* | count()`, while `SELECT user_id, count(*) FROM table GROUP BY user_id`
is converted to `* | stats by (user_id) count()`.
* If the SQL query contains additional calculations and/or transformations at the `SELECT`, which aren't covered yet by `GROUP BY`,
then convert them into the corresponding [LogsQL pipes](https://docs.victoriametrics.com/victorialogs/logsql/#pipes).
The most frequently used pipes are [`math`](https://docs.victoriametrics.com/victorialogs/logsql/#math-pipe)
and [`format`](https://docs.victoriametrics.com/victorialogs/logsql/#format-pipe).
For example, `SELECT field1 + 10 AS x, CONCAT("foo", field2) AS y FROM table` is converted into `* | math field1 + 10 as x | format "foo<field2>" as y | fields x, y`.
* If the SQL query contains `HAVING`, then convert it into [`filter` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#filter-pipe).
For example, `SELECT user_id, count(*) AS c FROM table GROUP BY user_id HAVING c > 100` is converted into `* | stats by (user_id) count() c | filter c:>100`.
* If the SQL query contains `ORDER BY`, `LIMIT` and `OFFSET`, then convert them into [`sort` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#sort-pipe).
For example, `SELECT * FROM table ORDER BY field1, field2 LIMIT 10 OFFSET 20` is converted into `* | sort by (field1, field2) limit 10 offset 20`.
* If the SQL query contains `UNION`, then convert it into [`union` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#union-pipe).
For example `SELECT * FROM table WHERE filters1 UNION ALL SELECT * FROM table WHERE filters2` is converted into `filters1 | union (filters2)`.
[LogsQL pipes](https://docs.victoriametrics.com/victorialogs/logsql/#pipes) support much wider functionality comparing to SQL,
so spend your spare time by reading [pipe docs](https://docs.victoriametrics.com/victorialogs/logsql/) and playing with them
at [VictoriaLogs playground](https://play-vmlogs.victoriametrics.com/).

View file

@ -11,6 +11,21 @@ aliases:
---
Please find the changelog for VictoriaMetrics Anomaly Detection below.
## v1.19.1
Released: 2025-01-21
- FIX: Resolved writer warnings for configurations where `reader.tenant_id` equals `writer.tenant_id` and **is not** `multitenant`, as this is a valid setup. Enhanced tenant_id-related log messages across config validation, reader, and writer for improved clarity.
## v1.19.0
Released: 2025-01-20
- FEATURE: Added support for per-query `tenant_id` in the [`VmReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader). This allows overriding the reader-level `tenant_id` within a single global `vmanomaly` configuration on a *per-query* basis, enabling isolation of data for different tenants in separate queries when querying the [VictoriaMetrics cluster version](https://docs.victoriametrics.com/cluster-victoriametrics/). For details, see the [documentation](https://docs.victoriametrics.com/anomaly-detection/components/reader/?highlight=tenant_id#per-query-parameters).
- IMPROVEMEMT: Speedup the model infer stage on multicore systems.
- IMPROVEMEMT: Speedup the model fitting stage by 1.253x, depending on configuration complexity.
- IMPROVEMENT: Reduced service RAM usage by 5-10%, depending on configuration complexity.
- FIX: Now [`VmReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) properly handles the cases where the number of queries processed in parallel (up to `reader.queries` cardinality) exceeds the default limit of 10 HTTP(S) connections, preventing potential data loss from discarded queries. The pool limit will automatically adjust to match `reader.queries` cardinality.
- FIX: Corrected the construction of write endpoints for cluster VictoriaMetrics `url`s (`tenant_id` arg is set) in `monitoring.push` [section configurations](https://docs.victoriametrics.com/anomaly-detection/components/monitoring/?highlight=tenant_id#push-config-parameters).
## v1.18.8
Released: 2024-12-03

View file

@ -56,13 +56,13 @@ Respective config is defined in a [`reader`](https://docs.victoriametrics.com/an
## Handling timezones
Starting from [v1.18.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1180), `vmanomaly` supports timezone-aware anomaly detection through a `tz` argument, available both globally (in the [`reader`](https://docs.victoriametrics.com/anomaly-detection/components/reader#vm-reader) section) and at the [query level](https://docs.victoriametrics.com/anomaly-detection/components/reader/#per-query-parameters).
`vmanomaly` supports timezone-aware anomaly detection {{% available_from "v1.18.0" anomaly %}} through a `tz` argument, available both at the [reader level](https://docs.victoriametrics.com/anomaly-detection/components/reader#vm-reader) and at the [query level](https://docs.victoriametrics.com/anomaly-detection/components/reader/#per-query-parameters).
For models that depend on seasonality, such as [`ProphetModel`](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet) and [`OnlineQuantileModel`](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-seasonal-quantile), handling timezone shifts is crucial. Changes like Daylight Saving Time (DST) can disrupt seasonality patterns learned by models, resulting in inaccurate anomaly predictions as the periodic patterns shift with time. Proper timezone configuration ensures that seasonal cycles align with expected intervals, even as DST changes occur.
To enable timezone handling:
1. **Globally**: Set `tz` in the [`reader`](https://docs.victoriametrics.com/anomaly-detection/components/reader#vm-reader) section to a specific timezone (e.g., `Europe/Berlin`) to apply this setting to all queries.
2. **Per query**: Override the global setting by specifying `tz` at the individual [query level](https://docs.victoriametrics.com/anomaly-detection/components/reader/#per-query-parameters) for targeted adjustments.
1. **Reader-level**: Set `tz` in the [`reader`](https://docs.victoriametrics.com/anomaly-detection/components/reader#vm-reader) section to a specific timezone (e.g., `Europe/Berlin`) to apply this setting to all queries.
2. **Query-level**: Override the reader-level setting by specifying `tz` at the individual [query level](https://docs.victoriametrics.com/anomaly-detection/components/reader/#per-query-parameters) for targeted adjustments.
**Example:**
@ -87,7 +87,7 @@ models:
## Choosing the right model for vmanomaly
Selecting the best model for `vmanomaly` depends on the data's nature and the [types of anomalies](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-2/#categories-of-anomalies) to detect. For instance, [Z-score](https://docs.victoriametrics.com/anomaly-detection/components/models#z-score) is suitable for data without trends or seasonality, while more complex patterns might require models like [Prophet](https://docs.victoriametrics.com/anomaly-detection/components/models#prophet).
Also, starting from [v1.12.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1120) it's possible to auto-tune the most important params of selected model class, find [the details here](https://docs.victoriametrics.com/anomaly-detection/components/models#autotuned).
Also, it's possible to auto-tune the most important params of selected model class {{% available_from "v1.12.0" anomaly %}}, find [the details here](https://docs.victoriametrics.com/anomaly-detection/components/models#autotuned).
Please refer to [respective blogpost on anomaly types and alerting heuristics](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-2/) for more details.
@ -100,7 +100,8 @@ While `vmanomaly` detects anomalies and produces scores, it *does not directly g
Produced anomaly scores are designed in such a way that values from 0.0 to 1.0 indicate non-anomalous data, while a value greater than 1.0 is generally classified as an anomaly. However, there are no perfect models for anomaly detection, that's why reasonable defaults expressions like `anomaly_score > 1` may not work 100% of the time. However, anomaly scores, produced by `vmanomaly` are written back as metrics to VictoriaMetrics, where tools like [`vmalert`](https://docs.victoriametrics.com/vmalert) can use [MetricsQL](https://docs.victoriametrics.com/metricsql/) expressions to fine-tune alerting thresholds and conditions, balancing between avoiding [false negatives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-negative) and reducing [false positives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-positive).
## How to backtest particular configuration on historical data?
Starting from [v1.7.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v172) you can produce (and write back to VictoriaMetrics TSDB) anomaly scores for historical (backtesting) period, using `BacktestingScheduler` [component](https://docs.victoriametrics.com/anomaly-detection/components/scheduler#backtesting-scheduler) to imitate consecutive "production runs" of `PeriodicScheduler` [component](https://docs.victoriametrics.com/anomaly-detection/components/scheduler#periodic-scheduler). Please find an example config below:
Anomaly scores for historical (backtesting) period can be produced and written back {{% available_from "v1.7.2" anomaly %}} to VictoriaMetrics TSDB using `BacktestingScheduler` [component](https://docs.victoriametrics.com/anomaly-detection/components/scheduler#backtesting-scheduler) to imitate consecutive "production runs" of `PeriodicScheduler` [component](https://docs.victoriametrics.com/anomaly-detection/components/scheduler#periodic-scheduler). Please find an example config below:
```yaml
schedulers:
@ -149,9 +150,7 @@ Configuration above will produce N intervals of full length (`fit_window`=14d +
### On-disk mode
> **Note**: Starting from [v1.13.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1130), there is an option to save anomaly detection models to the host filesystem after the `fit` stage (instead of keeping them in memory by default). This is particularly useful for **resource-intensive setups** (e.g., many models, many metrics, or larger [`fit_window` argument](https://docs.victoriametrics.com/anomaly-detection/components/scheduler#periodic-scheduler-config-example)) and for 3rd-party models that store fit data (such as [ProphetModel](https://docs.victoriametrics.com/anomaly-detection/components/models#prophet) or [HoltWinters](https://docs.victoriametrics.com/anomaly-detection/components/models#holt-winters)). This reduces RAM consumption significantly, though at the cost of slightly slower `infer` stages. To enable this, set the environment variable `VMANOMALY_MODEL_DUMPS_DIR` to the desired location. If using [Helm charts](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-anomaly/README.md), starting from chart version `1.3.0` `.persistentVolume.enabled` should be set to `true` in [values.yaml](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-anomaly/values.yaml).
> **Note**: Starting from [v1.16.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1160), a similar optimization is available for data read from VictoriaMetrics TSDB. To use this, set the environment variable `VMANOMALY_DATA_DUMPS_DIR` to the desired location.
> **Note**: {{% available_from "v1.13.0" anomaly %}} there is an option to save anomaly detection models to the host filesystem after the `fit` stage (instead of keeping them in memory by default). This is particularly useful for **resource-intensive setups** (e.g., many models, many metrics, or larger [`fit_window` argument](https://docs.victoriametrics.com/anomaly-detection/components/scheduler#periodic-scheduler-config-example)) and for 3rd-party models that store fit data (such as [ProphetModel](https://docs.victoriametrics.com/anomaly-detection/components/models#prophet) or [HoltWinters](https://docs.victoriametrics.com/anomaly-detection/components/models#holt-winters)). This reduces RAM consumption significantly, though at the cost of slightly slower `infer` stages. To enable this, set the environment variable `VMANOMALY_MODEL_DUMPS_DIR` to the desired location. If using [Helm charts](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-anomaly/README.md), starting from chart version `1.3.0` `.persistentVolume.enabled` should be set to `true` in [values.yaml](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-anomaly/values.yaml). Similar optimization is available for data read from VictoriaMetrics TSDB {{% available_from "v1.16.0" anomaly %}}. To use this, set the environment variable `VMANOMALY_DATA_DUMPS_DIR` to the desired location.
Here's an example of how to set it up in docker-compose using volumes:
```yaml
@ -159,7 +158,7 @@ services:
# ...
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.18.8
image: victoriametrics/vmanomaly:v1.19.1
# ...
ports:
- "8490:8490"
@ -189,7 +188,7 @@ For Helm chart users, refer to the `persistentVolume` [section](https://github.c
### Online models
> **Note**: Starting from [v1.15.0](https://docs.victoriametrics.com/anomaly-detection/changelog#v1150) with the introduction of [online models](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-models), you can additionally reduce resource consumption (e.g., flatten `fit` stage peaks by querying less data from VictoriaMetrics at once).
With the introduction of [online models](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-models) {{% available_from "v1.15.0" anomaly %}} , you can additionally reduce resource consumption (e.g., flatten `fit` stage peaks by querying less data from VictoriaMetrics at once).
- **Reduced Latency**: Online models update incrementally, which can lead to faster response times for anomaly detection since the model continuously adapts to new data without waiting for a batch `fit`.
- **Scalability**: Handling smaller data chunks at a time reduces memory and computational overhead, making it easier to scale the anomaly detection system.
@ -338,7 +337,7 @@ For **horizontal** scalability, `vmanomaly` can be deployed as multiple independ
### Splitting the config
Starting from [v1.18.5](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1185), a CLI utility named `config_splitter.py` is available in vmanoamly. The config splitter tool enables splitting a parent vmanomaly YAML configuration file into multiple sub-configurations based on logical entities such as `schedulers`, `queries`, `models`, `extra_filters`. The resulting sub-configurations are fully validated, functional, account for many-to-many relationships between models and their associated queries, and the schedulers they are linked to. These sub-configurations can then be saved to a specified directory for further use:
CLI utility named `config_splitter` is available in `vmanomaly` {{% available_from "v1.18.5" anomaly %}}. The config splitter tool enables splitting a parent vmanomaly YAML configuration file into multiple sub-configurations based on logical entities such as `schedulers`, `queries`, `models`, `extra_filters`. The resulting sub-configurations are fully validated, functional, account for many-to-many relationships between models and their associated queries, and the schedulers they are linked to. These sub-configurations can then be saved to a specified directory for further use:
```shellhelp
usage: config_splitter.py [-h] --splitBy {schedulers,models,queries,extra_filters} --outputDir OUTPUT_DIR [--fileNameFormat {raw,hash,int}] [--loggerLevel {WARNING,INFO,ERROR,FATAL,DEBUG}]
@ -365,25 +364,31 @@ options:
Heres an example of using the config splitter to divide configurations based on the `extra_filters` argument from the reader section:
```sh
docker pull victoriametrics/vmanomaly:v1.18.8 && docker image tag victoriametrics/vmanomaly:v1.18.8 vmanomaly
docker pull victoriametrics/vmanomaly:v1.19.1 && docker image tag victoriametrics/vmanomaly:v1.19.1 vmanomaly
```
```sh
export YOUR_INPUT_CONFIG_PATH=path/to/input/config.yml
export YOUR_OUTPUT_DIR_PATH=path/to/output/directory
export INPUT_CONFIG_PATH=/absolute/path/to/input/config.yml
export OUTPUT_DIR_PATH=/absolute/path/to/output/directory
docker run -it --rm \
-v $YOUR_INPUT_CONFIG_PATH:/input_config.yml \
-v $YOUR_OUTPUT_DIR_PATH:/output_dir \
vmanomaly python3 /vmanomaly/config_splitter.py \
/input_config.yml \
--user="$(id -u):$(id -g)" \
--cap-drop=ALL \
-e "VM_LICENSE_FILE=/.secret/license" \
-v "$PWD/.secret/license":/.secret/license \
-v "$INPUT_CONFIG_PATH":/vmanomaly/input_config.yml \
-v "$OUTPUT_DIR_PATH":/vmanomaly/output \
--entrypoint python3 \
vmanomaly:latest \
-m config_splitter \
/vmanomaly/input_config.yml \
--splitBy=extra_filters \
--outputDir=/output_dir \
--outputDir=/vmanomaly/output \
--fileNameFormat=raw \
--loggerLevel=INFO
```
After running the command, the output directory (specified by `YOUR_OUTPUT_DIR_PATH`) will contain 1+ split configuration files like the examples below. Each file can be used to launch a separate vmanomaly instance. Use similar approach to split on other entities, like `models` or `schedulers`.
After running the command, the output directory (specified by `OUTPUT_DIR_PATH`) will contain 1+ split configuration files like the examples below. Each file can be used to launch a separate vmanomaly instance. Use similar approach to split on other entities, like `models`, `queries` or `schedulers`.
```yaml
# config file #1, for 1st vmanomaly instance

View file

@ -159,14 +159,15 @@ Total amount of CPU time consumed by the system in seconds by CPU processing mod
<tr>
<td>
`host_network_receive_errors` & `host_network_transmit_errors`
<span style="white-space: nowrap;">`host_network_transmit_errors`</span>
& `host_network_receive_errors`
</td>
<td>
`node_network_receive_errs_total`,
`node_network_receive_packets_total`,
`node_network_transmit_errs_total`,
`node_network_transmit_packets_total`
`node_network_transmit_errs_total`,
<span style="white-space: nowrap;">`node_network_transmit_packets_total`</span>
<td>
Total number of errors encountered while receiving/transmitting packets on the network interfaces of a node.

View file

@ -101,13 +101,13 @@ Below are the steps to get `vmanomaly` up and running inside a Docker container:
1. Pull Docker image:
```sh
docker pull victoriametrics/vmanomaly:v1.18.8
docker pull victoriametrics/vmanomaly:v1.19.1
```
2. (Optional step) tag the `vmanomaly` Docker image:
```sh
docker image tag victoriametrics/vmanomaly:v1.18.8 vmanomaly
docker image tag victoriametrics/vmanomaly:v1.19.1 vmanomaly
```
3. Start the `vmanomaly` Docker container with a *license file*, use the command below.
@ -141,7 +141,7 @@ docker run -it --user 1000:1000 \
services:
# ...
vmanomaly:
image: victoriametrics/vmanomaly:v1.18.8
image: victoriametrics/vmanomaly:v1.19.1
volumes:
$YOUR_LICENSE_FILE_PATH:/license
$YOUR_CONFIG_FILE_PATH:/config.yml
@ -177,19 +177,30 @@ Here is an example of config file that will run [Facebook Prophet](https://faceb
```yaml
schedulers:
2h_1m:
1d_1m:
# https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler
class: 'periodic'
infer_every: '1m'
fit_every: '2h'
fit_every: '1d'
fit_window: '2w'
models:
# https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet
prophet_model:
class: "prophet" # or "model.prophet.ProphetModel" until v1.13.0
class: 'prophet'
provide_series: ['anomaly_score', 'yhat', 'yhat_lower', 'yhat_upper'] # for debugging
tz_aware: True
tz_use_cyclical_encoding: True
tz_seasonalities: # intra-day + intra-week seasonality
- name: 'hod' # intra-day seasonality, hour of the day
fourier_order: 4 # keep it 3-8 based on intraday pattern complexity
prior_scale: 10
- name: 'dow' # intra-week seasonality, time of the week
fourier_order: 2 # keep it 2-4, as dependencies are learned separately for each weekday
# inner model args (key-value pairs) accepted by
# https://facebook.github.io/prophet/docs/quick_start.html#python-api
args:
interval_width: 0.98
interval_width: 0.98 # see https://facebook.github.io/prophet/docs/uncertainty_intervals.html
reader:
# https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader

View file

@ -994,7 +994,7 @@ monitoring:
Let's pull the docker image for `vmanomaly`:
```sh
docker pull victoriametrics/vmanomaly:v1.18.8
docker pull victoriametrics/vmanomaly:v1.19.1
```
Now we can run the docker container putting as volumes both config and model file:
@ -1008,7 +1008,7 @@ docker run -it \
-v $(PWD)/license:/license \
-v $(PWD)/custom_model.py:/vmanomaly/model/custom.py \
-v $(PWD)/custom.yaml:/config.yaml \
victoriametrics/vmanomaly:v1.18.8 /config.yaml \
victoriametrics/vmanomaly:v1.19.1 /config.yaml \
--licenseFile=/license
```
@ -1017,7 +1017,7 @@ Please find more detailed instructions (license, etc.) [here](https://docs.victo
### Output
As the result, this model will return metric with labels, configured previously in `config.yaml`.
In this particular example, 2 metrics will be produced. Also, there will be added other metrics from input query result.
In this particular example, 2 metrics will be produced. Also, there will be added other labels from input query result.
```text
{__name__="custom_anomaly_score", for="ingestion_rate", model_alias="custom_model", scheduler_alias="s1", run="test-format"},

View file

@ -28,7 +28,7 @@ There are 2 models to monitor VictoriaMetrics Anomaly Detection behavior - [push
<tr>
<th>Parameter</th>
<th>Default</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
@ -263,8 +263,8 @@ For detailed guidance on configuring mTLS parameters such as `verify_tls`, `tls_
<thead>
<tr>
<th>Metric</th>
<th>Type</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Type</span></th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
@ -273,7 +273,10 @@ For detailed guidance on configuring mTLS parameters such as `verify_tls`, `tls_
<span style="white-space: nowrap;">`vmanomaly_start_time_seconds`</span>
</td>
<td>Gauge</td>
<td>
<span style="white-space: nowrap;">Gauge</span>
</td>
<td>vmanomaly start time in UNIX time</td>
</tr>
<tr>
@ -323,7 +326,7 @@ Label names [description](#labelnames)
<tr>
<th>Metric</th>
<th>Type</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
<th>Labelnames</th>
</tr>
</thead>
@ -434,7 +437,7 @@ Label names [description](#labelnames)
<tr>
<th>Metric</th>
<th>Type</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
<th>Labelnames</th>
</tr>
</thead>
@ -539,6 +542,7 @@ Label names [description](#labelnames)
</td>
<td>The number of times model runs (of class `model_alias`) failed due to internal service errors during the `stage` (`fit`, `infer`, `fit_infer`), based on results from the `query_key` query, within the specified scheduler `scheduler_alias`, in the `vmanomaly` service running in `preset` mode.</td>
<td>
`stage`, `query_key`, `model_alias`, <span style="white-space: nowrap;">`scheduler_alias`</span>, `preset`
</td>
</tr>
@ -557,7 +561,7 @@ Label names [description](#labelnames)
<tr>
<th>Metric</th>
<th>Type</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
<th>Labelnames</th>
</tr>
</thead>

View file

@ -8,9 +8,6 @@ menu:
aliases:
- /anomaly-detection/components/reader.html
---
<!--
There are 4 sources available to read data into VM Anomaly Detection from: VictoriaMetrics, (ND)JSON file, QueryRange, or CSV file. Depending on the data source, different parameters should be specified in the config file in the `reader` section.
-->
VictoriaMetrics Anomaly Detection (`vmanomaly`) primarily uses [VmReader](#vm-reader) to ingest data. This reader focuses on fetching time-series data directly from VictoriaMetrics with the help of powerful [MetricsQL](https://docs.victoriametrics.com/metricsql/) expressions for aggregating, filtering and grouping your data, ensuring seamless integration and efficient data handling.
@ -54,7 +51,7 @@ reader:
### Per-query parameters
There is change{{% available_from "v1.13.0" anomaly %}} of [`queries`](https://docs.victoriametrics.com/anomaly-detection/components/reader?highlight=queries#vm-reader) arg format. Now each query alias supports the next (sub)fields:
There is change{{% available_from "v1.13.0" anomaly %}} of [`queries`](https://docs.victoriametrics.com/anomaly-detection/components/reader?highlight=queries#vm-reader) arg format. Now each query alias supports the next (sub)fields, which *override reader-level parameters*, if set:
- `expr` (string): MetricsQL/PromQL expression that defines an input for VmReader. As accepted by `/query_range?query=%s`. i.e. `avg(vm_blocks)`
@ -74,6 +71,14 @@ There is change{{% available_from "v1.13.0" anomaly %}} of [`queries`](https://d
- `tz`{{% available_from "v1.18.0" anomaly %}} (string): this optional argument enables timezone specification per query, overriding the readers default `tz`. This setting helps to account for local timezone shifts, such as [DST](https://en.wikipedia.org/wiki/Daylight_saving_time), in models that are sensitive to seasonal variations (e.g., [`ProphetModel`](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet) or [`OnlineQuantileModel`](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-seasonal-quantile)).
- `tenant_id` {{% available_from "v1.19.0" anomaly %}} (string): this optional argument enables tenant-level separation for queries (e.g. `query1` to get the data from tenant "0:0", `query2` - from tenant "1:0"). It works as follows:
- if *not set, inherits* reader-level `tenant_id`
- if *set, overrides* reader-level `tenant_id`
- *raises config validation error*, if *reader-level is not set* and *query-level is found* (mixing of VictoriaMetrics [single-node](https://docs.victoriametrics.com/single-server-victoriametrics/) and [cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) is prohibited in a single config)
- *raises config validation warning*, if `writer.tenant_id` is not explicitly set to `multitenant` when reader uses tenants, meaning [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) will be used for data querying.
- also *raises config validation error* if a set of `reader.queries` for [multivariate models](https://docs.victoriametrics.com/anomaly-detection/components/models/index.html#multivariate-models) has *different* tenant_ids (meaning tenant data is mixed, and special labels like `vm_project_id`, `vm_account_id` will have [ambiguous values](https://docs.victoriametrics.com/cluster-victoriametrics/#multitenancy-via-labels))
> **Note:** The recommended approach for using per-query `tenant_id`s is to set both `reader.tenant_id` and `writer.tenant_id` to `multitenant`. See [this section](https://docs.victoriametrics.com/anomaly-detection/components/writer/index.html#multitenancy-support) for more details. Configurations where `reader.tenant_id` equals `writer.tenant_id` and is not `multitenant` are also considered safe, provided there is a single, DISTINCT `tenant_id` defined in the reader (either at the reader level or the query level, if set).
### Per-query config example
```yaml
@ -81,14 +86,24 @@ reader:
class: 'vm'
sampling_period: '1m'
max_points_per_query: 10000
data_range: [0, 'inf']
tenant_id: 'multitenant'
# other reader params ...
queries:
ingestion_rate:
ingestion_rate_t1:
expr: 'sum(rate(vm_rows_inserted_total[5m])) by (type) > 0'
step: '2m' # overrides global `sampling_period` of 1m
data_range: [10, 'inf'] # meaning only positive values > 10 are expected, i.e. a value `y` < 10 will trigger anomaly score > 1
max_points_per_query: 5000 # overrides reader-level value of 10000 for `ingestion_rate` query
tz: 'America/New_York' # to override reader-wise `tz`
tenant_id: '1:0' # overriding tenant_id to isolate data
ingestion_rate_t2:
expr: 'sum(rate(vm_rows_inserted_total[5m])) by (type) > 0'
step: '2m' # overrides global `sampling_period` of 1m
data_range: [10, 'inf'] # meaning only positive values > 10 are expected, i.e. a value `y` < 10 will trigger anomaly score > 1
max_points_per_query: 5000 # overrides reader-level value of 10000 for `ingestion_rate` query
tz: 'America/New_York' # to override reader-wise `tz`
tenant_id: '2:0' # overriding tenant_id to isolate data
```
### Config parameters
@ -98,7 +113,7 @@ reader:
<tr>
<th>Parameter</th>
<th>Example</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
@ -384,7 +399,7 @@ Config file example:
reader:
class: "vm" # or "reader.vm.VmReader" until v1.13.0
datasource_url: "https://play.victoriametrics.com/"
tenant_id: "0:0"
tenant_id: '0:0'
tz: 'America/New_York'
data_range: [1, 'inf'] # reader-level
queries:
@ -393,6 +408,7 @@ reader:
step: '1m' # can override reader-level `sampling_period` on per-query level
data_range: [0, 'inf'] # if set, overrides reader-level data_range
tz: 'Australia/Sydney' # if set, overrides reader-level tz
# tenant_id: '1:0' # if set, overrides reader-level tenant_id
sampling_period: '1m'
query_from_last_seen_timestamp: True # false by default
latency_offset: '1ms'

View file

@ -109,7 +109,7 @@ Examples: `"50s"`, `"4m"`, `"3h"`, `"2d"`, `"1w"`.
<th>Parameter</th>
<th>Type</th>
<th>Example</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
@ -217,11 +217,11 @@ If a time zone is omitted, a timezone-naive datetime is used.
<table class="params">
<thead>
<tr>
<th>Format</th>
<th><span style="white-space: nowrap;">Format</span></th>
<th>Parameter</th>
<th>Type</th>
<th><span style="white-space: nowrap;">Type</span></th>
<th>Example</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
@ -282,11 +282,11 @@ If a time zone is omitted, a timezone-naive datetime is used.
<table class="params">
<thead>
<tr>
<th>Format</th>
<th><span style="white-space: nowrap;">Format</span></th>
<th>Parameter</th>
<th>Type</th>
<th><span style="white-space: nowrap;">Type</span></th>
<th>Example</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
@ -405,7 +405,7 @@ If a time zone is omitted, a timezone-naive datetime is used.
<span style="white-space: nowrap;">`n_jobs`</span>
</td>
<td>int</td>
<td><span style="white-space: nowrap;">int</span></td>
<td>
`1`
@ -440,17 +440,17 @@ This timeframe will be used for slicing on intervals `(fit_window, infer_window
<span style="white-space: nowrap;">Example</span>
</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
<tr>
<td>ISO 8601</td>
<td><span style="white-space: nowrap;">ISO 8601</span></td>
<td>
<span style="white-space: nowrap;">`from_iso`</span>
</td>
<td>str</td>
<td><span style="white-space: nowrap;">str</span></td>
<td>
`"2022-04-01T00:00:00Z", "2022-04-01T00:00:00+01:00", "2022-04-01T00:00:00+0100", "2022-04-01T00:00:00+01"`
@ -499,11 +499,11 @@ The same *explicit* logic as in [Periodic scheduler](#periodic-scheduler)
<table class="params">
<thead>
<tr>
<th>Format</th>
<th><span style="white-space: nowrap;">Format</span></th>
<th>Parameter</th>
<th>Type</th>
<th>Example</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Type</span></th>
<th><span style="white-space: nowrap;">Example</span></th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
@ -516,7 +516,7 @@ The same *explicit* logic as in [Periodic scheduler](#periodic-scheduler)
<td rowspan=2>str</td>
<td>
`"PT1M", "P1H"`
<span style="white-space: nowrap;">`"PT1M"`</span>, `"P1H"`
</td>
<td rowspan=2>What time range to use for training the models. Must be at least 1 second.</td>
</tr>
@ -524,7 +524,7 @@ The same *explicit* logic as in [Periodic scheduler](#periodic-scheduler)
<td>Prometheus-compatible</td>
<td>
`"1m", "1h"`
<span style="white-space: nowrap;">`"1m"`</span>, `"1h"`
</td>
</tr>
</tbody>
@ -537,9 +537,9 @@ In `BacktestingScheduler`, the inference window is *implicitly* defined as a per
<tr>
<th>Format</th>
<th>Parameter</th>
<th>Type</th>
<th>Example</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Type</span></th>
<th><span style="white-space: nowrap;">Example</span></th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>
@ -552,7 +552,7 @@ In `BacktestingScheduler`, the inference window is *implicitly* defined as a per
<td rowspan=2>str</td>
<td>
`"PT1M", "P1H"`
<span style="white-space: nowrap;">`"PT1M"`</span>, `"P1H"`
</td>
<td rowspan=2>What time range to use previously trained model to infer on new data until next retrain happens.</td>
</tr>
@ -560,7 +560,7 @@ In `BacktestingScheduler`, the inference window is *implicitly* defined as a per
<td>Prometheus-compatible</td>
<td>
`"1m", "1h"`
<span style="white-space: nowrap;">`"1m"`</span>, `"1h"`
</td>
</tr>
</tbody>

View file

@ -21,7 +21,7 @@ Future updates will introduce additional export methods, offering users more fle
<tr>
<th>Parameter</th>
<th>Example</th>
<th>Description</th>
<th><span style="white-space: nowrap;">Description</span></th>
</tr>
</thead>
<tbody>

View file

@ -2,9 +2,9 @@
- To use *vmanomaly*, part of the enterprise package, a license key is required. Obtain your key [here](https://victoriametrics.com/products/enterprise/trial/) for this tutorial or for enterprise use.
- In the tutorial, we'll be using the following VictoriaMetrics components:
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/single-server-victoriametrics) (v1.109.0)
- [vmalert](https://docs.victoriametrics.com/vmalert/) (v1.109.0)
- [vmagent](https://docs.victoriametrics.com/vmagent/) (v1.109.0)
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/single-server-victoriametrics) (v1.109.1)
- [vmalert](https://docs.victoriametrics.com/vmalert/) (v1.109.1)
- [vmagent](https://docs.victoriametrics.com/vmagent/) (v1.109.1)
- [Grafana](https://grafana.com/) (v.10.2.1)
- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/)
- [Node exporter](https://github.com/prometheus/node_exporter#node-exporter) (v1.7.0) and [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (v0.27.0)
@ -136,22 +136,24 @@ Below is an illustrative example of a `vmanomaly_config.yml` configuration file.
``` yaml
schedulers:
periodic:
# class: 'periodic' # or "scheduler.periodic.PeriodicScheduler" until v1.13.0
infer_every: "1m"
fit_every: "2m"
fit_window: "3h"
fit_every: "1h"
fit_window: "2d" # 2d-14d based on the presense of weekly seasonality in your data
models:
prophet:
class: "prophet" # or "model.prophet.ProphetModel" until v1.13.0
class: "prophet"
args:
interval_width: 0.98
weekly_seasonality: False # comment it if your data has weekly seasonality
yearly_seasonality: False
reader:
datasource_url: "http://victoriametrics:8428/"
sampling_period: "60s"
sampling_period: "60s"
queries:
node_cpu_rate: "sum(rate(node_cpu_seconds_total[5m])) by (mode, instance, job)"
node_cpu_rate:
expr: "sum(rate(node_cpu_seconds_total[5m])) by (mode, instance, job)"
writer:
datasource_url: "http://victoriametrics:8428/"
@ -313,7 +315,7 @@ Let's wrap it all up together into the `docker-compose.yml` file.
services:
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.109.0
image: victoriametrics/vmagent:v1.109.1
depends_on:
- "victoriametrics"
ports:
@ -330,7 +332,7 @@ services:
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.109.0
image: victoriametrics/victoria-metrics:v1.109.1
ports:
- 8428:8428
volumes:
@ -363,7 +365,7 @@ services:
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.109.0
image: victoriametrics/vmalert:v1.109.1
depends_on:
- "victoriametrics"
ports:
@ -385,7 +387,7 @@ services:
restart: always
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.18.8
image: victoriametrics/vmanomaly:v1.19.1
depends_on:
- "victoriametrics"
ports:

View file

@ -18,22 +18,41 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
## tip
## [v1.110.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.0)
Released at 2025-01-24
* SECURITY: upgrade Go builder from Go1.23.4 to Go1.23.5. See the list of issues addressed in [Go1.23.5](https://github.com/golang/go/issues?q=milestone%3AGo1.23.5+label%3ACherryPickApproved).
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/metricsql/): allow executing queries with `$__interval` and `$__rate_interval` - these placeholders are automatically replaced with `1i` (e.g. `step` arg value at [`/api/v1/query_range`](https://docs.victoriametrics.com/keyconcepts/#range-query)) during query execution. This simplifies copying queries from Grafana dashboards.
* FEATURE: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/) and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): add command-line flag `-search.maxDeleteDuration(default 5m)` to limit the duration of the `/api/v1/admin/tsdb/delete_series` call. Previously, the call is limited by `-search.maxQueryDuration`.
* FEATURE: [dashboards](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards): all dashboards that use [VictoriaMetrics Grafana datasource](https://github.com/VictoriaMetrics/victoriametrics-datasource) were updated to use a [new datasource ID](https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/tag/v0.12.0).
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): reflect column settings for the table view in URL, so the table view can be shared via link. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7662).
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): allow ingesting histograms with missing `_sum` metric via [OpenTelemetry ingestion protocol](https://docs.victoriametrics.com/#sending-data-via-opentelemetry) in the same way as Prometheus does.
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): respect staleness detection in increase, increase_pure and delta functions when time series has gaps and `-search.maxStalenessInterval` is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8072) for details.
* BUGFIX: all VictoriaMetrics [enterprise](https://docs.victoriametrics.com/enterprise/) components: properly trim whitespaces at the end of license provided via `-license` and `-licenseFile` command-line flags. Previously, the trailing whitespaces could cause the license verification to fail.
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): fix possible runtime panic during requests processing under heavy load. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051) for details.
* BUGFIX: [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): fix panic when trying to delete series by using [multitenant read](https://docs.victoriametrics.com/cluster-victoriametrics/#multitenancy-via-labels) endpoint. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8126) for the details.
* BUGFIX: [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): prevent panic when `vmselect` receives an error response from `vmstorage` during the query execution and request processing for other `vmstorage` nodes is still in progress. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8114) for the details.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix an issue where pressing the "Enter" key in the query editor did not execute the query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8058).
## [v1.109.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.109.1)
Released at 2025-01-17
* SECURITY: upgrade base docker image (Alpine) from 3.21.0 to 3.21.2. See [Alpine 3.21.1 release notes](https://alpinelinux.org/posts/Alpine-3.21.1-released.html) and [Alpine 3.21.2 release notes](https://alpinelinux.org/posts/Alpine-3.18.11-3.19.6-3.20.5-3.21.2-released.html).
* FEATURE: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): log metric names for signals with unsupported delta temporality on ingestion via [OpenTelemetry protocol for metrics](https://docs.victoriametrics.com/#sending-data-via-opentelemetry). Thanks to @chenlujjj for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8018).
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): log metric names for signals with unsupported delta temporality on ingestion via [OpenTelemetry protocol for metrics](https://docs.victoriametrics.com/#sending-data-via-opentelemetry). Thanks to @chenlujjj for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8018).
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): fix incorrect behavior of increase, increase_pure, delta caused by [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8002). This fix reverts to the previous behavior before [v1.109.0](https://docs.victoriametrics.com/changelog/#v11090). But allows controlling staleness detection for these functions explicitly via `-search.maxStalenessInterval`.
* BUGFIX: all VictoriaMetrics [enterprise](https://docs.victoriametrics.com/enterprise/) components: remove unnecessary delay before failing if all online verification attempts have failed. This should reduce the time required for the component to proceed if all online verification attempts have failed.
* BUGFIX: [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): prevent panic when sending `multitenant` [read request](https://docs.victoriametrics.com/cluster-victoriametrics/#multitenancy-via-labels) to `/api/v1/series/count`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8126) for the details.
## [v1.109.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.109.0)
Released at 2025-01-14
**Update note: This release contains [issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8045) that might affect correctness of increase, increase_pure and delta functions. It affects only vmselect or VictoriaMetrics single-node on query time. Please, rollback vmselects to the closest previous version as temporary workaround until it is fixed.**
**Update note: This release contains [issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8045) that might affect correctness of increase, increase_pure and delta functions. It affects only vmselect or VictoriaMetrics single-node on query time. Please, rollback vmselects to the closest previous version or update to [v1.109.1](https://docs.victoriametrics.com/changelog/#v11091).**
* FEATURE: all the VictoriaMetrics components: increase the default value for [`GOGC`](https://tip.golang.org/doc/gc-guide#GOGC) from `30` to `100`. This should reduce CPU usage at the cost of slightly higher memory usage. [Single-node VictoriaMetrics](https://docs.victoriametrics.com/), [vmagent](https://docs.victoriametrics.com/vmagent/) and [vmstorage](https://docs.victoriametrics.com/cluster-victoriametrics/#architecture-overview) components continue using `GOGC=30`, since they are optimized for low memory allocations and low memory usage, so they do not benefit from the increased GOGC value too much. It is possible to override the default `GOGC` value in any VictoriaMetrics component by setting `GOGC` environment variable to the desired value. For example, `GOGC=200 ./path/to/vmagent` starts `vmagent` with `GOGC=200`. See [these docs](https://tip.golang.org/doc/gc-guide#GOGC) about `GOGC` tuning. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7902).
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add export data functionality for the `Raw Query` page and the ability to import exported data into the `Query Analyzer` page. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7628).

View file

@ -82,7 +82,7 @@ VictoriaMetrics Enterprise components are available in the following forms:
It is allowed to run VictoriaMetrics Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
Binary releases of VictoriaMetrics Enterprise are available [at the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.109.0-enterprise.tar.gz`.
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.109.1-enterprise.tar.gz`.
In order to run binary release of VictoriaMetrics Enterprise component, please download the `*-enterprise.tar.gz` archive for your OS and architecture
from the [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) and unpack it. Then run the unpacked binary.
@ -100,8 +100,8 @@ For example, the following command runs VictoriaMetrics Enterprise binary with t
obtained at [this page](https://victoriametrics.com/products/enterprise/trial/):
```sh
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.0/victoria-metrics-linux-amd64-v1.109.0-enterprise.tar.gz
tar -xzf victoria-metrics-linux-amd64-v1.109.0-enterprise.tar.gz
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1-enterprise.tar.gz
tar -xzf victoria-metrics-linux-amd64-v1.109.1-enterprise.tar.gz
./victoria-metrics-prod -license=BASE64_ENCODED_LICENSE_KEY
```
@ -116,7 +116,7 @@ Alternatively, VictoriaMetrics Enterprise license can be stored in the file and
It is allowed to run VictoriaMetrics Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
Docker images for VictoriaMetrics Enterprise are available [at VictoriaMetrics DockerHub](https://hub.docker.com/u/victoriametrics).
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.109.0-enterprise`.
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.109.1-enterprise`.
In order to run Docker image of VictoriaMetrics Enterprise component, it is required to provide the license key via command-line
flag as described [here](#binary-releases).
@ -126,13 +126,13 @@ Enterprise license key can be obtained at [this page](https://victoriametrics.co
For example, the following command runs VictoriaMetrics Enterprise Docker image with the specified license key:
```sh
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.109.0-enterprise -license=BASE64_ENCODED_LICENSE_KEY
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.109.1-enterprise -license=BASE64_ENCODED_LICENSE_KEY
```
Alternatively, the license code can be stored in the file and then referred via `-licenseFile` command-line flag:
```sh
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.109.0-enterprise -licenseFile=/path/to/vm-license
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.109.1-enterprise -licenseFile=/path/to/vm-license
```
Example docker-compose configuration:
@ -141,7 +141,7 @@ version: "3.5"
services:
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.109.0
image: victoriametrics/victoria-metrics:v1.109.1
ports:
- 8428:8428
volumes:
@ -173,7 +173,7 @@ is used to provide key in plain-text:
```yaml
server:
image:
tag: v1.109.0-enterprise
tag: v1.109.1-enterprise
license:
key: {BASE64_ENCODED_LICENSE_KEY}
@ -184,7 +184,7 @@ In order to provide key via existing secret, the following values file is used:
```yaml
server:
image:
tag: v1.109.0-enterprise
tag: v1.109.1-enterprise
license:
secret:
@ -233,7 +233,7 @@ spec:
license:
key: {BASE64_ENCODED_LICENSE_KEY}
image:
tag: v1.109.0-enterprise
tag: v1.109.1-enterprise
```
In order to provide key via existing secret, the following custom resource is used:
@ -250,7 +250,7 @@ spec:
name: vm-license
key: license
image:
tag: v1.109.0-enterprise
tag: v1.109.1-enterprise
```
Example secret with license key:

View file

@ -236,27 +236,27 @@ services:
- grafana_data:/var/lib/grafana/
vmsingle:
image: victoriametrics/victoria-metrics:v1.109.0
image: victoriametrics/victoria-metrics:v1.109.1
command:
- -httpListenAddr=0.0.0.0:8429
vmstorage:
image: victoriametrics/vmstorage:v1.109.0-cluster
image: victoriametrics/vmstorage:v1.109.1-cluster
vminsert:
image: victoriametrics/vminsert:v1.109.0-cluster
image: victoriametrics/vminsert:v1.109.1-cluster
command:
- -storageNode=vmstorage:8400
- -httpListenAddr=0.0.0.0:8480
vmselect:
image: victoriametrics/vmselect:v1.109.0-cluster
image: victoriametrics/vmselect:v1.109.1-cluster
command:
- -storageNode=vmstorage:8401
- -httpListenAddr=0.0.0.0:8481
vmagent:
image: victoriametrics/vmagent:v1.109.0
image: victoriametrics/vmagent:v1.109.1
volumes:
- ./scrape.yaml:/etc/vmagent/config.yaml
command:
@ -265,7 +265,7 @@ services:
- -remoteWrite.url=http://vmsingle:8429/api/v1/write
vmgateway-cluster:
image: victoriametrics/vmgateway:v1.109.0-enterprise
image: victoriametrics/vmgateway:v1.109.1-enterprise
ports:
- 8431:8431
volumes:
@ -281,7 +281,7 @@ services:
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
vmgateway-single:
image: victoriametrics/vmgateway:v1.109.0-enterprise
image: victoriametrics/vmgateway:v1.109.1-enterprise
ports:
- 8432:8431
volumes:
@ -393,7 +393,7 @@ Once iDP configuration is done, vmagent configuration needs to be updated to use
```yaml
vmagent:
image: victoriametrics/vmagent:v1.109.0
image: victoriametrics/vmagent:v1.109.1
volumes:
- ./scrape.yaml:/etc/vmagent/config.yaml
- ./vmagent-client-secret:/etc/vmagent/oauth2-client-secret

View file

@ -28,7 +28,7 @@ The diagram below shows a proposed solution
1. Groups of vminserts A know about only vmstorages A and this is explicitly specified via `-storageNode` [configuration](https://docs.victoriametrics.com/cluster-victoriametrics/#cluster-setup).
1. Groups of vminserts B know about only vmstorages B and this is explicitly specified via `-storageNode` [configuration](https://docs.victoriametrics.com/cluster-victoriametrics/#cluster-setup).
1. Groups of vminserts C know about only vmstorages A and this is explicitly specified via `-storageNode` [configuration](https://docs.victoriametrics.com/cluster-victoriametrics/#cluster-setup).
1. Groups of vminserts C know about only vmstorages C and this is explicitly specified via `-storageNode` [configuration](https://docs.victoriametrics.com/cluster-victoriametrics/#cluster-setup).
1. vmselect reads data from all vmstorage nodes via `-storageNode` [configuration](https://docs.victoriametrics.com/cluster-victoriametrics/#cluster-setup)
with [deduplication](https://docs.victoriametrics.com/cluster-victoriametrics/#deduplication) setting equal to vmagent's scrape interval or minimum interval between collected samples.
1. vmagent routes incoming metrics to the given set of `vminsert` nodes using relabeling rules specified at `-remoteWrite.urlRelabelConfig` [configuration](https://docs.victoriametrics.com/vmagent/#relabeling).

File diff suppressed because one or more lines are too long

After

(image error) Size: 27 KiB

Binary file not shown.

Before

(image error) Size: 24 KiB

After

(image error) Size: 34 KiB

View file

@ -148,7 +148,7 @@ server:
* By running `helm install vmsingle vm/victoria-metrics-single` we install [VictoriaMetrics Single](https://docs.victoriametrics.com/single-server-victoriametrics/) to default [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) inside your cluster
* By adding `scrape: enable: true` we add and enable autodiscovery scraping from kubernetes cluster to [VictoriaMetrics Single](https://docs.victoriametrics.com/single-server-victoriametrics/)
* By adding `scrape: enabled: true` we add and enable autodiscovery scraping from kubernetes cluster to [VictoriaMetrics Single](https://docs.victoriametrics.com/single-server-victoriametrics/)
* On line 166 from [https://docs.victoriametrics.com/guides/examples/guide-vmsingle-values.yaml](https://docs.victoriametrics.com/guides/examples/guide-vmsingle-values.yaml) we added `metric_relabel_configs` section that will help us to show Kubernetes metrics on Grafana dashboard.

View file

@ -29,17 +29,17 @@ The command must display existing helm chart e.g.
```shell
NAME CHART VERSION APP VERSION DESCRIPTION
vm/victoria-logs-single 0.5.2 v0.15.0-victorialogs Victoria Logs Single version - high-performance...
vm/victoria-metrics-agent 0.10.9 v1.101.0 Victoria Metrics Agent - collects metrics from ...
vm/victoria-metrics-alert 0.9.9 v1.101.0 Victoria Metrics Alert - executes a list of giv...
vm/victoria-metrics-anomaly 1.3.0 v1.13.0 Victoria Metrics Anomaly Detection - a service ...
vm/victoria-metrics-auth 0.4.13 v1.101.0 Victoria Metrics Auth - is a simple auth proxy ...
vm/victoria-metrics-cluster 0.11.19 v1.101.0 Victoria Metrics Cluster version - high-perform...
vm/victoria-metrics-distributed 0.1.0 v1.101.0 A Helm chart for Running VMCluster on Multiple ...
vm/victoria-metrics-gateway 0.1.62 v1.101.0 Victoria Metrics Gateway - Auth & Rate-Limittin...
vm/victoria-metrics-k8s-stack 0.23.2 v1.101.0 Kubernetes monitoring on VictoriaMetrics stack....
vm/victoria-metrics-operator 0.32.2 v0.45.0 Victoria Metrics Operator
vm/victoria-metrics-single 0.9.22 v1.101.0 Victoria Metrics Single version - high-performa...
vm/victoria-logs-single 0.8.13 v1.5.0 Victoria Logs Single version - high-performance...
vm/victoria-metrics-agent 0.15.5 v1.109.1 Victoria Metrics Agent - collects metrics from ...
vm/victoria-metrics-alert 0.13.7 v1.109.1 Victoria Metrics Alert - executes a list of giv...
vm/victoria-metrics-anomaly 1.6.11 v1.18.8 Victoria Metrics Anomaly Detection - a service ...
vm/victoria-metrics-auth 0.8.5 v1.109.1 Victoria Metrics Auth - is a simple auth proxy ...
vm/victoria-metrics-cluster 0.17.2 v1.109.1 Victoria Metrics Cluster version - high-perform...
vm/victoria-metrics-distributed 0.7.3 v1.109.1 A Helm chart for Running VMCluster on Multiple ...
vm/victoria-metrics-gateway 0.6.5 v1.109.1 Victoria Metrics Gateway - Auth & Rate-Limittin...
vm/victoria-metrics-k8s-stack 0.33.5 v1.109.1 Kubernetes monitoring on VictoriaMetrics stack....
vm/victoria-metrics-operator 0.40.4 v0.51.3 Victoria Metrics Operator
vm/victoria-metrics-single 0.13.6 v1.109.1 Victoria Metrics Single version - high-performa...
```
## Installing the chart

View file

@ -2,6 +2,15 @@
- TODO
## 0.8.14
**Release date:** 20 Jan 2025
![Helm: v3](https://img.shields.io/badge/Helm-v3.14%2B-informational?color=informational&logo=helm&link=https%3A%2F%2Fgithub.com%2Fhelm%2Fhelm%2Freleases%2Ftag%2Fv3.14.0) ![AppVersion: v1.6.1](https://img.shields.io/badge/v1.6.1-success?logo=VictoriaMetrics&labelColor=gray&link=https%3A%2F%2Fdocs.victoriametrics.com%2Fvictorialogs%2Fchangelog%23v161)
- update victorialogs version to v1.6.1
- add `.Values.statefulSet.updateStrategy`. See [this issue](https://github.com/VictoriaMetrics/helm-charts/issues/1928) for details.
## 0.8.13
**Release date:** 14 Jan 2025

View file

@ -1,6 +1,6 @@
![Version](https://img.shields.io/badge/0.8.13-gray?logo=Helm&labelColor=gray&link=https%3A%2F%2Fdocs.victoriametrics.com%2Fhelm%2Fvictoria-logs-single%2Fchangelog%2F%230813)
![Version](https://img.shields.io/badge/0.8.14-gray?logo=Helm&labelColor=gray&link=https%3A%2F%2Fdocs.victoriametrics.com%2Fhelm%2Fvictoria-logs-single%2Fchangelog%2F%230814)
![ArtifactHub](https://img.shields.io/badge/ArtifactHub-informational?logoColor=white&color=417598&logo=artifacthub&link=https%3A%2F%2Fartifacthub.io%2Fpackages%2Fhelm%2Fvictoriametrics%2Fvictoria-logs-single)
![License](https://img.shields.io/github/license/VictoriaMetrics/helm-charts?labelColor=green&label=&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2Fhelm-charts%2Fblob%2Fmaster%2FLICENSE)
![Slack](https://img.shields.io/badge/Join-4A154B?logo=slack&link=https%3A%2F%2Fslack.victoriametrics.com)
@ -1135,6 +1135,17 @@ readOnlyRootFilesystem: true
</pre>
</td>
<td><p>Deploy order policy for StatefulSet pods</p>
</td>
</tr>
<tr>
<td>server.statefulSet.updateStrategy</td>
<td>object</td>
<td><pre class="helm-vars-default-value language-yaml" lang="plaintext">
<code class="language-yaml">{}
</code>
</pre>
</td>
<td><p>StatefulSet update strategy. Check <a href="https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies" target="_blank">here</a> for details.</p>
</td>
</tr>
<tr>

Some files were not shown because too many files have changed in this diff Show more