From df012f155332d1c14d1e942aea8e4b7e2254b629 Mon Sep 17 00:00:00 2001
From: Dmytro Kozlov
Date: Tue, 19 Dec 2023 15:57:33 +0100
Subject: [PATCH 001/109] docs: remove default value from the
`maxConcurrentInserts` flag (#5494)
---
README.md | 2 +-
docs/Cluster-VictoriaMetrics.md | 4 ++--
docs/README.md | 2 +-
docs/Single-server-VictoriaMetrics.md | 2 +-
docs/VictoriaLogs/README.md | 2 +-
docs/vmagent.md | 2 +-
6 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/README.md b/README.md
index b387501ab..e81401aee 100644
--- a/README.md
+++ b/README.md
@@ -2709,7 +2709,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
- The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 32)
+ The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration.
-maxInsertRequestSize size
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md
index a15552702..83fd78292 100644
--- a/docs/Cluster-VictoriaMetrics.md
+++ b/docs/Cluster-VictoriaMetrics.md
@@ -1072,7 +1072,7 @@ Below is the output for `/path/to/vminsert -help`:
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
- The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 32)
+ The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration.
-maxInsertRequestSize size
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
@@ -1552,7 +1552,7 @@ Below is the output for `/path/to/vmstorage -help`:
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
- The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 32)
+ The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration.
-memory.allowedBytes size
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from the OS page cache resulting in higher disk IO usage
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
diff --git a/docs/README.md b/docs/README.md
index 9fe49efe8..0a48307a5 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -2712,7 +2712,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
- The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 32)
+ The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration.
-maxInsertRequestSize size
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md
index c27999349..fed6433f4 100644
--- a/docs/Single-server-VictoriaMetrics.md
+++ b/docs/Single-server-VictoriaMetrics.md
@@ -2720,7 +2720,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
- The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 32)
+ The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration.
-maxInsertRequestSize size
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
diff --git a/docs/VictoriaLogs/README.md b/docs/VictoriaLogs/README.md
index a9d674941..ef7dea0cf 100644
--- a/docs/VictoriaLogs/README.md
+++ b/docs/VictoriaLogs/README.md
@@ -218,7 +218,7 @@ Pass `-help` to VictoriaLogs in order to see the list of supported command-line
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
- The maximum number of concurrent insert requests. The default value should work for most cases, since it minimizes memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 12)
+ The maximum number of concurrent insert requests. The default value should work for most cases, since it minimizes memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration.
-memory.allowedBytes size
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from the OS page cache resulting in higher disk IO usage
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
diff --git a/docs/vmagent.md b/docs/vmagent.md
index 8cae9da03..c28ba2b0b 100644
--- a/docs/vmagent.md
+++ b/docs/vmagent.md
@@ -1696,7 +1696,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-loggerWarnsPerSecondLimit int
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
-maxConcurrentInserts int
- The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 32)
+ The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration.
-maxInsertRequestSize size
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
From a35e52114ba6bd957ca23e8d6b9469fda9c44028 Mon Sep 17 00:00:00 2001
From: Yury Molodov
Date: Tue, 19 Dec 2023 17:20:54 +0100
Subject: [PATCH 002/109] vmui: add vmanomaly explorer (#5401)
---
app/vmui/Makefile | 8 +
app/vmui/packages/vmui/config-overrides.js | 6 +-
app/vmui/packages/vmui/package.json | 6 +-
app/vmui/packages/vmui/src/AppAnomaly.tsx | 41 ++++
.../Line/LegendAnomaly/LegendAnomaly.tsx | 86 +++++++++
.../Chart/Line/LegendAnomaly/style.scss | 23 +++
.../Chart/Line/LineChart/LineChart.tsx | 13 +-
.../GlobalSettings/GlobalSettings.tsx | 9 +-
.../LimitsConfigurator/LimitsConfigurator.tsx | 6 +-
.../ServerConfigurator/ServerConfigurator.tsx | 56 +++++-
.../Configurators/GlobalSettings/style.scss | 16 ++
.../ExploreMetricItemGraph.tsx | 27 +--
.../vmui/src/components/Main/Icons/index.tsx | 10 +
.../src/components/Main/Select/Select.tsx | 8 +-
.../src/components/Main/Select/style.scss | 14 ++
.../components/Views/GraphView/GraphView.tsx | 29 ++-
.../packages/vmui/src/constants/navigation.ts | 91 +++++----
.../vmui/src/hooks/uplot/useLineTooltip.ts | 7 +-
.../packages/vmui/src/hooks/useFetchQuery.ts | 7 +-
.../layouts/AnomalyLayout/AnomalyLayout.tsx | 59 ++++++
.../AnomalyLayout/ControlsAnomalyLayout.tsx | 38 ++++
.../vmui/src/layouts/Header/Header.tsx | 25 ++-
.../layouts/Header/HeaderNav/HeaderNav.tsx | 27 ++-
.../Header/SidebarNav/SidebarHeader.tsx | 7 +-
.../src/layouts/LogsLayout/LogsLayout.tsx | 2 +-
.../vmui/src/layouts/LogsLayout/style.scss | 27 ---
.../src/layouts/MainLayout/MainLayout.tsx | 5 +-
.../CustomPanel/CustomPanelTabs/GraphTab.tsx | 72 +++++++
.../CustomPanel/CustomPanelTabs/TableTab.tsx | 47 +++++
.../CustomPanel/CustomPanelTabs/index.tsx | 45 +++++
.../CustomPanelTraces/CustomPanelTraces.tsx | 43 +++++
.../pages/CustomPanel/DisplayTypeSwitch.tsx | 9 +-
.../WarningLimitSeries/WarningLimitSeries.tsx | 50 +++++
.../vmui/src/pages/CustomPanel/index.tsx | 178 +++++-------------
.../vmui/src/pages/CustomPanel/style.scss | 3 +-
.../pages/ExploreAnomaly/ExploreAnomaly.tsx | 118 ++++++++++++
.../ExploreAnomalyHeader.tsx | 112 +++++++++++
.../ExploreAnomalyHeader/style.scss | 37 ++++
.../hooks/useFetchAnomalySeries.ts | 66 +++++++
.../ExploreAnomaly/hooks/useSetQueryParams.ts | 31 +++
.../PredefinedPanel/PredefinedPanel.tsx | 4 +-
.../hooks/useFetchDashboards.ts | 3 +-
app/vmui/packages/vmui/src/router/index.ts | 23 ++-
.../vmui/src/state/customPanel/reducer.ts | 6 +-
app/vmui/packages/vmui/src/types/appType.ts | 4 +
app/vmui/packages/vmui/src/types/index.ts | 6 +-
app/vmui/packages/vmui/src/types/uplot.ts | 13 +-
app/vmui/packages/vmui/src/utils/color.ts | 31 +--
.../vmui/src/utils/default-server-url.ts | 10 +-
app/vmui/packages/vmui/src/utils/storage.ts | 1 +
.../packages/vmui/src/utils/uplot/bands.ts | 41 ++++
.../packages/vmui/src/utils/uplot/index.ts | 1 +
.../packages/vmui/src/utils/uplot/scales.ts | 80 +++++++-
.../packages/vmui/src/utils/uplot/series.ts | 108 ++++++++---
54 files changed, 1452 insertions(+), 343 deletions(-)
create mode 100644 app/vmui/packages/vmui/src/AppAnomaly.tsx
create mode 100644 app/vmui/packages/vmui/src/components/Chart/Line/LegendAnomaly/LegendAnomaly.tsx
create mode 100644 app/vmui/packages/vmui/src/components/Chart/Line/LegendAnomaly/style.scss
create mode 100644 app/vmui/packages/vmui/src/layouts/AnomalyLayout/AnomalyLayout.tsx
create mode 100644 app/vmui/packages/vmui/src/layouts/AnomalyLayout/ControlsAnomalyLayout.tsx
delete mode 100644 app/vmui/packages/vmui/src/layouts/LogsLayout/style.scss
create mode 100644 app/vmui/packages/vmui/src/pages/CustomPanel/CustomPanelTabs/GraphTab.tsx
create mode 100644 app/vmui/packages/vmui/src/pages/CustomPanel/CustomPanelTabs/TableTab.tsx
create mode 100644 app/vmui/packages/vmui/src/pages/CustomPanel/CustomPanelTabs/index.tsx
create mode 100644 app/vmui/packages/vmui/src/pages/CustomPanel/CustomPanelTraces/CustomPanelTraces.tsx
create mode 100644 app/vmui/packages/vmui/src/pages/CustomPanel/WarningLimitSeries/WarningLimitSeries.tsx
create mode 100644 app/vmui/packages/vmui/src/pages/ExploreAnomaly/ExploreAnomaly.tsx
create mode 100644 app/vmui/packages/vmui/src/pages/ExploreAnomaly/ExploreAnomalyHeader/ExploreAnomalyHeader.tsx
create mode 100644 app/vmui/packages/vmui/src/pages/ExploreAnomaly/ExploreAnomalyHeader/style.scss
create mode 100644 app/vmui/packages/vmui/src/pages/ExploreAnomaly/hooks/useFetchAnomalySeries.ts
create mode 100644 app/vmui/packages/vmui/src/pages/ExploreAnomaly/hooks/useSetQueryParams.ts
create mode 100644 app/vmui/packages/vmui/src/types/appType.ts
create mode 100644 app/vmui/packages/vmui/src/utils/uplot/bands.ts
diff --git a/app/vmui/Makefile b/app/vmui/Makefile
index eb3354044..7d4297a2b 100644
--- a/app/vmui/Makefile
+++ b/app/vmui/Makefile
@@ -22,6 +22,14 @@ vmui-logs-build: vmui-package-base-image
--entrypoint=/bin/bash \
vmui-builder-image -c "npm install && npm run build:logs"
+vmui-anomaly-build: vmui-package-base-image
+ docker run --rm \
+ --user $(shell id -u):$(shell id -g) \
+ --mount type=bind,src="$(shell pwd)/app/vmui",dst=/build \
+ -w /build/packages/vmui \
+ --entrypoint=/bin/bash \
+ vmui-builder-image -c "npm install && npm run build:anomaly"
+
vmui-release: vmui-build
docker build -t ${DOCKER_NAMESPACE}/vmui:latest -f app/vmui/Dockerfile-web ./app/vmui/packages/vmui
docker tag ${DOCKER_NAMESPACE}/vmui:latest ${DOCKER_NAMESPACE}/vmui:${PKG_TAG}
diff --git a/app/vmui/packages/vmui/config-overrides.js b/app/vmui/packages/vmui/config-overrides.js
index 4b7a1e1c3..663e569e3 100644
--- a/app/vmui/packages/vmui/config-overrides.js
+++ b/app/vmui/packages/vmui/config-overrides.js
@@ -14,10 +14,12 @@ module.exports = override(
new webpack.NormalModuleReplacementPlugin(
/\.\/App/,
function (resource) {
- // eslint-disable-next-line no-undef
- if (process.env.REACT_APP_LOGS === "true") {
+ if (process.env.REACT_APP_TYPE === "logs") {
resource.request = "./AppLogs";
}
+ if (process.env.REACT_APP_TYPE === "anomaly") {
+ resource.request = "./AppAnomaly";
+ }
}
)
)
diff --git a/app/vmui/packages/vmui/package.json b/app/vmui/packages/vmui/package.json
index 7e9e58c06..ecf5e53c8 100644
--- a/app/vmui/packages/vmui/package.json
+++ b/app/vmui/packages/vmui/package.json
@@ -32,9 +32,11 @@
"scripts": {
"prestart": "npm run copy-metricsql-docs",
"start": "react-app-rewired start",
- "start:logs": "cross-env REACT_APP_LOGS=true npm run start",
+ "start:logs": "cross-env REACT_APP_TYPE=logs npm run start",
+ "start:anomaly": "cross-env REACT_APP_TYPE=anomaly npm run start",
"build": "GENERATE_SOURCEMAP=false react-app-rewired build",
- "build:logs": "cross-env REACT_APP_LOGS=true npm run build",
+ "build:logs": "cross-env REACT_APP_TYPE=logs npm run build",
+ "build:anomaly": "cross-env REACT_APP_TYPE=anomaly npm run build",
"lint": "eslint src --ext tsx,ts",
"lint:fix": "eslint src --ext tsx,ts --fix",
"analyze": "source-map-explorer 'build/static/js/*.js'",
diff --git a/app/vmui/packages/vmui/src/AppAnomaly.tsx b/app/vmui/packages/vmui/src/AppAnomaly.tsx
new file mode 100644
index 000000000..de139cd98
--- /dev/null
+++ b/app/vmui/packages/vmui/src/AppAnomaly.tsx
@@ -0,0 +1,41 @@
+import React, { FC, useState } from "preact/compat";
+import { HashRouter, Route, Routes } from "react-router-dom";
+import AppContextProvider from "./contexts/AppContextProvider";
+import ThemeProvider from "./components/Main/ThemeProvider/ThemeProvider";
+import AnomalyLayout from "./layouts/AnomalyLayout/AnomalyLayout";
+import ExploreAnomaly from "./pages/ExploreAnomaly/ExploreAnomaly";
+import router from "./router";
+import CustomPanel from "./pages/CustomPanel";
+
+const AppLogs: FC = () => {
+ const [loadedTheme, setLoadedTheme] = useState(false);
+
+ return <>
+
+
+ <>
+
+ {loadedTheme && (
+
+ }
+ >
+ }
+ />
+ }
+ />
+
+
+ )}
+ >
+
+
+ >;
+};
+
+export default AppLogs;
diff --git a/app/vmui/packages/vmui/src/components/Chart/Line/LegendAnomaly/LegendAnomaly.tsx b/app/vmui/packages/vmui/src/components/Chart/Line/LegendAnomaly/LegendAnomaly.tsx
new file mode 100644
index 000000000..51d17fa73
--- /dev/null
+++ b/app/vmui/packages/vmui/src/components/Chart/Line/LegendAnomaly/LegendAnomaly.tsx
@@ -0,0 +1,86 @@
+import React, { FC, useMemo } from "preact/compat";
+import { ForecastType, SeriesItem } from "../../../../types";
+import { anomalyColors } from "../../../../utils/color";
+import "./style.scss";
+
+type Props = {
+ series: SeriesItem[];
+};
+
+const titles: Record = {
+ [ForecastType.yhat]: "yhat",
+ [ForecastType.yhatLower]: "yhat_lower/_upper",
+ [ForecastType.yhatUpper]: "yhat_lower/_upper",
+ [ForecastType.anomaly]: "anomalies",
+ [ForecastType.training]: "training data",
+ [ForecastType.actual]: "y"
+};
+
+const LegendAnomaly: FC = ({ series }) => {
+
+ const uniqSeriesStyles = useMemo(() => {
+ const uniqSeries = series.reduce((accumulator, currentSeries) => {
+ const hasForecast = Object.prototype.hasOwnProperty.call(currentSeries, "forecast");
+ const isNotUpper = currentSeries.forecast !== ForecastType.yhatUpper;
+ const isUniqForecast = !accumulator.find(s => s.forecast === currentSeries.forecast);
+ if (hasForecast && isUniqForecast && isNotUpper) {
+ accumulator.push(currentSeries);
+ }
+ return accumulator;
+ }, [] as SeriesItem[]);
+
+ const trainingSeries = {
+ ...uniqSeries[0],
+ forecast: ForecastType.training,
+ color: anomalyColors[ForecastType.training],
+ };
+ uniqSeries.splice(1, 0, trainingSeries);
+
+ return uniqSeries.map(s => ({
+ ...s,
+ color: typeof s.stroke === "string" ? s.stroke : anomalyColors[s.forecast || ForecastType.actual],
+ forecast: titles[s.forecast || ForecastType.actual],
+ }));
+ }, [series]);
+
+ const container = document.getElementById("legendAnomaly");
+ if (!container) return null;
+
+ return <>
+
+ {/* TODO: remove .filter() after the correct training data has been added */}
+ {uniqSeriesStyles.filter(f => f.forecast !== titles[ForecastType.training]).map((s, i) => (
+
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
@@ -545,14 +542,12 @@ To configure DataDog agent via [configuration file](https://github.com/DataDog/d
add the following line:
-vmagent also can accept Datadog metrics format. Depending on where vmagent will forward data,
+[vmagent](https://docs.victoriametrics.com/vmagent.html) also can accept Datadog metrics format. Depending on where vmagent will forward data,
pick [single-node or cluster URL](https://docs.victoriametrics.com/url-examples.html#datadog) formats.
### Sending metrics to Datadog and VictoriaMetrics
@@ -567,12 +562,10 @@ sending via ENV variable `DD_ADDITIONAL_ENDPOINTS` or via configuration file `ad
Run DataDog using the following ENV variable with VictoriaMetrics as additional metrics receiver:
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
@@ -582,19 +575,16 @@ To configure DataDog Dual Shipping via [configuration file](https://docs.datadog
add the following line:
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
@@ -548,14 +545,12 @@ To configure DataDog agent via [configuration file](https://github.com/DataDog/d
add the following line:
-vmagent also can accept Datadog metrics format. Depending on where vmagent will forward data,
+[vmagent](https://docs.victoriametrics.com/vmagent.html) also can accept Datadog metrics format. Depending on where vmagent will forward data,
pick [single-node or cluster URL](https://docs.victoriametrics.com/url-examples.html#datadog) formats.
### Sending metrics to Datadog and VictoriaMetrics
@@ -570,12 +565,10 @@ sending via ENV variable `DD_ADDITIONAL_ENDPOINTS` or via configuration file `ad
Run DataDog using the following ENV variable with VictoriaMetrics as additional metrics receiver:
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
@@ -585,19 +578,16 @@ To configure DataDog Dual Shipping via [configuration file](https://docs.datadog
add the following line:
### Send via cURL
-See how to send data to VictoriaMetrics via
-[DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line.
+See how to send data to VictoriaMetrics via DataDog "submit metrics" API [here](https://docs.victoriametrics.com/url-examples.html#datadogapiv2series).
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export).
@@ -608,7 +598,7 @@ according to [DataDog metric naming recommendations](https://docs.datadoghq.com/
If you need accepting metric names as is without sanitizing, then pass `-datadog.sanitizeMetricName=false` command-line flag to VictoriaMetrics.
Extra labels may be added to all the written time series by passing `extra_label=name=value` query args.
-For example, `/datadog/api/v1/series?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics.
+For example, `/datadog/api/v2/series?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics.
DataDog agent sends the [configured tags](https://docs.datadoghq.com/getting_started/tagging/) to
undocumented endpoint - `/datadog/intake`. This endpoint isn't supported by VictoriaMetrics yet.
@@ -2583,7 +2573,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-csvTrimTimestamp duration
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-datadog.maxInsertRequestSize size
- The maximum size in bytes of a single DataDog POST request to /api/v1/series
+ The maximum size in bytes of a single DataDog POST request to /datadog/api/v2/series
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 67108864)
-datadog.sanitizeMetricName
Sanitize metric names for the ingested DataDog data to comply with DataDog behaviour described at https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics (default true)
diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md
index fed6433f4..5e7bbcd0d 100644
--- a/docs/Single-server-VictoriaMetrics.md
+++ b/docs/Single-server-VictoriaMetrics.md
@@ -527,10 +527,8 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
## How to send data from DataDog agent
-VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/)
-or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/)
-via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics)
-at `/datadog/api/v1/series` path.
+VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/)
+via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v2/series` path.
### Sending metrics to VictoriaMetrics
@@ -542,12 +540,11 @@ or via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configu
To configure DataDog agent via ENV variable add the following prefix:
-
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
@@ -556,14 +553,12 @@ To configure DataDog agent via [configuration file](https://github.com/DataDog/d
add the following line:
-vmagent also can accept Datadog metrics format. Depending on where vmagent will forward data,
+[vmagent](https://docs.victoriametrics.com/vmagent.html) also can accept Datadog metrics format. Depending on where vmagent will forward data,
pick [single-node or cluster URL](https://docs.victoriametrics.com/url-examples.html#datadog) formats.
### Sending metrics to Datadog and VictoriaMetrics
@@ -578,12 +573,10 @@ sending via ENV variable `DD_ADDITIONAL_ENDPOINTS` or via configuration file `ad
Run DataDog using the following ENV variable with VictoriaMetrics as additional metrics receiver:
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
@@ -593,19 +586,16 @@ To configure DataDog Dual Shipping via [configuration file](https://docs.datadog
add the following line:
### Send via cURL
-See how to send data to VictoriaMetrics via
-[DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line.
+See how to send data to VictoriaMetrics via DataDog "submit metrics" API [here](https://docs.victoriametrics.com/url-examples.html#datadogapiv2series).
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export).
@@ -616,7 +606,7 @@ according to [DataDog metric naming recommendations](https://docs.datadoghq.com/
If you need accepting metric names as is without sanitizing, then pass `-datadog.sanitizeMetricName=false` command-line flag to VictoriaMetrics.
Extra labels may be added to all the written time series by passing `extra_label=name=value` query args.
-For example, `/datadog/api/v1/series?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics.
+For example, `/datadog/api/v2/series?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics.
DataDog agent sends the [configured tags](https://docs.datadoghq.com/getting_started/tagging/) to
undocumented endpoint - `/datadog/intake`. This endpoint isn't supported by VictoriaMetrics yet.
@@ -2591,7 +2581,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-csvTrimTimestamp duration
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-datadog.maxInsertRequestSize size
- The maximum size in bytes of a single DataDog POST request to /api/v1/series
+ The maximum size in bytes of a single DataDog POST request to /datadog/api/v2/series
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 67108864)
-datadog.sanitizeMetricName
Sanitize metric names for the ingested DataDog data to comply with DataDog behaviour described at https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics (default true)
diff --git a/docs/url-examples.md b/docs/url-examples.md
index bc1a9983e..35d3f55fe 100644
--- a/docs/url-examples.md
+++ b/docs/url-examples.md
@@ -473,7 +473,7 @@ http://vminsert:8480/insert/0/datadog
### /datadog/api/v1/series
-**Imports data in DataDog format into VictoriaMetrics**
+**Imports data in DataDog v1 format into VictoriaMetrics**
Single-node VictoriaMetrics:
@@ -531,7 +531,79 @@ echo '
Additional information:
-* [How to send data from datadog agent](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent)
+* [How to send data from DataDog agent](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent)
+* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
+
+
+### /datadog/api/v2/series
+
+**Imports data in [DataDog v2](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) format into VictoriaMetrics**
+
+Single-node VictoriaMetrics:
+
diff --git a/docs/vmanomaly.md b/docs/vmanomaly.md
index 152d2b3ea..17f10f355 100644
--- a/docs/vmanomaly.md
+++ b/docs/vmanomaly.md
@@ -126,43 +126,67 @@ optionally preserving labels).
## Usage
-The vmanomaly accepts only one parameter -- config file path:
+> Starting from v1.5.0, vmanomaly requires a license key to run. You can obtain a trial license key [here](https://victoriametrics.com/products/enterprise/trial/).
-```sh
-python3 vmanomaly.py config_zscore.yaml
-```
-or
-```sh
-python3 -m vmanomaly config_zscore.yaml
-```
+> See [Getting started guide](https://docs.victoriametrics.com/guides/guide-vmanomaly-vmalert.html).
-It is also possible to split up config into multiple files, just list them all in the command line:
+### Config file
+There are 4 required sections in config file:
-```sh
-python3 -m vmanomaly model_prophet.yaml io_csv.yaml scheduler_oneoff.yaml
+* `scheduler` - defines how often to run and make inferences, as well as what timerange to use to train the model.
+* `model` - specific model parameters and configurations,
+* `reader` - how to read data and where it is located
+* `writer` - where and how to write the generated output.
+
+[`monitoring`](#monitoring) - defines how to monitor work of *vmanomaly* service. This config section is *optional*.
+
+#### Config example
+Here is an example of config file that will run FB Prophet model, that will be retrained every 2 hours on 14 days of previous data. It will generate inference (including `anomaly_score` metric) every 1 minute.
+
+
+You need to put your datasource urls to use it:
+
+```yaml
+scheduler:
+ infer_every: "1m"
+ fit_every: "2h"
+ fit_window: "14d"
+
+model:
+ class: "model.prophet.ProphetModel"
+ args:
+ interval_width: 0.98
+
+reader:
+ datasource_url: [YOUR_DATASOURCE_URL] #Example: "http://victoriametrics:8428/"
+ queries:
+ cache: "sum(rate(vm_cache_entries))"
+
+writer:
+ datasource_url: [YOUR_DATASOURCE_URL] # Example: "http://victoriametrics:8428/"
```
### Monitoring
-vmanomaly can be monitored by using push or pull approach.
+*vmanomaly* can be monitored by using push or pull approach.
It can push metrics to VictoriaMetrics or expose metrics in Prometheus exposition format.
#### Push approach
-vmanomaly can push metrics to VictoriaMetrics single-node or cluster version.
+*vmanomaly* can push metrics to VictoriaMetrics single-node or cluster version.
In order to enable push approach, specify `push` section in config file:
```yaml
monitoring:
push:
- url: "http://victoriametrics:8428/"
+ url: [YOUR_DATASOURCE_URL] #Example: "http://victoriametrics:8428/"
extra_labels:
job: "vmanomaly-push"
```
#### Pull approach
-vmanomaly can export internal metrics in Prometheus exposition format at `/metrics` page.
+*vmanomaly* can export internal metrics in Prometheus exposition format at `/metrics` page.
These metrics can be scraped via [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus.
In order to enable pull approach, specify `pull` section in config file:
@@ -176,10 +200,30 @@ monitoring:
This will expose metrics at `http://0.0.0.0:8080/metrics` page.
-### Licensing
+### Run vmanomaly Docker Container
-Starting from v1.5.0 vmanomaly requires a license key to run. You can obtain a trial license
-key [here](https://victoriametrics.com/products/enterprise/trial/).
+To use *vmanomaly* you need to pull docker image:
+
+```sh
+docker pull us-docker.pkg.dev/victoriametrics-test/public/vmanomaly-trial:latest
+```
+
+You can put a tag on it for your convinience:
+
+```sh
+docker image tag us-docker.pkg.dev/victoriametrics-test/public/vmanomaly-trial vmanomaly
+```
+Here is an example of how to run *vmanomaly* docker container with [license file](#licensing):
+
+```sh
+docker run -it --net [YOUR_NETWORK] \
+ -v [YOUR_LICENSE_FILE_PATH]:/license.txt \
+ -v [YOUR_CONFIG_FILE_PATH]:/config.yml \
+ vmanomaly /config.yml \
+ --license-file=/license.txt
+```
+
+### Licensing
The license key can be passed via the following command-line flags:
```
@@ -194,10 +238,7 @@ The license key can be passed via the following command-line flags:
verification offline.
```
-Usage example:
-```
-python3 -m vmanomaly --license-file /path/to/license_file.yaml config.yaml
-```
+
In order to make it easier to monitor the license expiration date, the following metrics are exposed(see
[Monitoring](#monitoring) section for details on how to scrape them):
@@ -212,7 +253,7 @@ vm_license_expires_in_seconds 4.886608e+06
```
Example alerts for [vmalert](https://docs.victoriametrics.com/vmalert.html):
-{% raw %}
+
```yaml
groups:
- name: vm-license
@@ -236,4 +277,4 @@ groups:
description: "{{ $labels.instance }} of job {{ $labels.job }} license expires in {{ $value | humanizeDuration }}.
Please make sure to update the license before it expires."
```
-{% endraw %}
+
From 1f477aba419e4287b829318f14c45063cd5d0c94 Mon Sep 17 00:00:00 2001
From: Hui Wang
Date: Fri, 22 Dec 2023 23:07:47 +0800
Subject: [PATCH 019/109] =?UTF-8?q?vmalert:=20automatically=20add=20`expor?=
=?UTF-8?q?ted=5F`=20prefix=20for=20original=20evaluation=E2=80=A6=20(#539?=
=?UTF-8?q?8)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
automatically add `exported_` prefix for original evaluation result label if it's conflicted with external or reserved one,
previously it was overridden.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5161
Signed-off-by: hagen1778
Co-authored-by: hagen1778
---
app/vmalert/rule/alerting.go | 40 ++++++++------
app/vmalert/rule/alerting_test.go | 87 +++++++++++++++++++++++++-----
app/vmalert/rule/recording.go | 5 +-
app/vmalert/rule/recording_test.go | 16 +++---
docs/CHANGELOG.md | 1 +
docs/vmalert.md | 29 +---------
6 files changed, 111 insertions(+), 67 deletions(-)
diff --git a/app/vmalert/rule/alerting.go b/app/vmalert/rule/alerting.go
index e10405250..7ca4427eb 100644
--- a/app/vmalert/rule/alerting.go
+++ b/app/vmalert/rule/alerting.go
@@ -237,11 +237,30 @@ type labelSet struct {
origin map[string]string
// processed labels includes origin labels
// plus extra labels (group labels, service labels like alertNameLabel).
- // in case of conflicts, extra labels are preferred.
+ // in case of key conflicts, origin labels are renamed with prefix `exported_` and extra labels are preferred.
+ // see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5161
// used as labels attached to notifier.Alert and ALERTS series written to remote storage.
processed map[string]string
}
+// add adds a value v with key k to origin and processed label sets.
+// On k conflicts in processed set, the passed v is preferred.
+// On k conflicts in origin set, the original value is preferred and copied
+// to processed with `exported_%k` key. The copy happens only if passed v isn't equal to origin[k] value.
+func (ls *labelSet) add(k, v string) {
+ ls.processed[k] = v
+ ov, ok := ls.origin[k]
+ if !ok {
+ ls.origin[k] = v
+ return
+ }
+ if ov != v {
+ // copy value only if v and ov are different
+ key := fmt.Sprintf("exported_%s", k)
+ ls.processed[key] = ov
+ }
+}
+
// toLabels converts labels from given Metric
// to labelSet which contains original and processed labels.
func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*labelSet, error) {
@@ -267,24 +286,14 @@ func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*l
return nil, fmt.Errorf("failed to expand labels: %w", err)
}
for k, v := range extraLabels {
- ls.processed[k] = v
- if _, ok := ls.origin[k]; !ok {
- ls.origin[k] = v
- }
+ ls.add(k, v)
}
-
// set additional labels to identify group and rule name
if ar.Name != "" {
- ls.processed[alertNameLabel] = ar.Name
- if _, ok := ls.origin[alertNameLabel]; !ok {
- ls.origin[alertNameLabel] = ar.Name
- }
+ ls.add(alertNameLabel, ar.Name)
}
if !*disableAlertGroupLabel && ar.GroupName != "" {
- ls.processed[alertGroupNameLabel] = ar.GroupName
- if _, ok := ls.origin[alertGroupNameLabel]; !ok {
- ls.origin[alertGroupNameLabel] = ar.GroupName
- }
+ ls.add(alertGroupNameLabel, ar.GroupName)
}
return ls, nil
}
@@ -414,8 +423,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
}
h := hash(ls.processed)
if _, ok := updated[h]; ok {
- // duplicate may be caused by extra labels
- // conflicting with the metric labels
+ // duplicate may be caused the removal of `__name__` label
curState.Err = fmt.Errorf("labels %v: %w", ls.processed, errDuplicate)
return nil, curState.Err
}
diff --git a/app/vmalert/rule/alerting_test.go b/app/vmalert/rule/alerting_test.go
index 91d90e31e..3e5e3503f 100644
--- a/app/vmalert/rule/alerting_test.go
+++ b/app/vmalert/rule/alerting_test.go
@@ -768,14 +768,16 @@ func TestAlertingRule_Exec_Negative(t *testing.T) {
ar.q = fq
// successful attempt
+ // label `job` will be overridden by rule extra label, the original value will be reserved by "exported_job"
fq.Add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "bar"))
+ fq.Add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "baz"))
_, err := ar.exec(context.TODO(), time.Now(), 0)
if err != nil {
t.Fatal(err)
}
- // label `job` will collide with rule extra label and will make both time series equal
- fq.Add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "baz"))
+ // label `__name__` will be omitted and get duplicated results here
+ fq.Add(metricWithValueAndLabels(t, 1, "__name__", "foo_1", "job", "bar"))
_, err = ar.exec(context.TODO(), time.Now(), 0)
if !errors.Is(err, errDuplicate) {
t.Fatalf("expected to have %s error; got %s", errDuplicate, err)
@@ -899,20 +901,22 @@ func TestAlertingRule_Template(t *testing.T) {
metricWithValueAndLabels(t, 10, "__name__", "second", "instance", "bar", alertNameLabel, "override"),
},
map[uint64]*notifier.Alert{
- hash(map[string]string{alertNameLabel: "override label", "instance": "foo"}): {
+ hash(map[string]string{alertNameLabel: "override label", "exported_alertname": "override", "instance": "foo"}): {
Labels: map[string]string{
- alertNameLabel: "override label",
- "instance": "foo",
+ alertNameLabel: "override label",
+ "exported_alertname": "override",
+ "instance": "foo",
},
Annotations: map[string]string{
"summary": `first: Too high connection number for "foo"`,
"description": `override: It is 2 connections for "foo"`,
},
},
- hash(map[string]string{alertNameLabel: "override label", "instance": "bar"}): {
+ hash(map[string]string{alertNameLabel: "override label", "exported_alertname": "override", "instance": "bar"}): {
Labels: map[string]string{
- alertNameLabel: "override label",
- "instance": "bar",
+ alertNameLabel: "override label",
+ "exported_alertname": "override",
+ "instance": "bar",
},
Annotations: map[string]string{
"summary": `second: Too high connection number for "bar"`,
@@ -941,14 +945,18 @@ func TestAlertingRule_Template(t *testing.T) {
},
map[uint64]*notifier.Alert{
hash(map[string]string{
- alertNameLabel: "OriginLabels",
- alertGroupNameLabel: "Testing",
- "instance": "foo",
+ alertNameLabel: "OriginLabels",
+ "exported_alertname": "originAlertname",
+ alertGroupNameLabel: "Testing",
+ "exported_alertgroup": "originGroupname",
+ "instance": "foo",
}): {
Labels: map[string]string{
- alertNameLabel: "OriginLabels",
- alertGroupNameLabel: "Testing",
- "instance": "foo",
+ alertNameLabel: "OriginLabels",
+ "exported_alertname": "originAlertname",
+ alertGroupNameLabel: "Testing",
+ "exported_alertgroup": "originGroupname",
+ "instance": "foo",
},
Annotations: map[string]string{
"summary": `Alert "originAlertname(originGroupname)" for instance foo`,
@@ -1092,3 +1100,54 @@ func newTestAlertingRuleWithKeepFiring(name string, waitFor, keepFiringFor time.
rule.KeepFiringFor = keepFiringFor
return rule
}
+
+func TestAlertingRule_ToLabels(t *testing.T) {
+ metric := datasource.Metric{
+ Labels: []datasource.Label{
+ {Name: "instance", Value: "0.0.0.0:8800"},
+ {Name: "group", Value: "vmalert"},
+ {Name: "alertname", Value: "ConfigurationReloadFailure"},
+ },
+ Values: []float64{1},
+ Timestamps: []int64{time.Now().UnixNano()},
+ }
+
+ ar := &AlertingRule{
+ Labels: map[string]string{
+ "instance": "override", // this should override instance with new value
+ "group": "vmalert", // this shouldn't have effect since value in metric is equal
+ },
+ Expr: "sum(vmalert_alerting_rules_error) by(instance, group, alertname) > 0",
+ Name: "AlertingRulesError",
+ GroupName: "vmalert",
+ }
+
+ expectedOriginLabels := map[string]string{
+ "instance": "0.0.0.0:8800",
+ "group": "vmalert",
+ "alertname": "ConfigurationReloadFailure",
+ "alertgroup": "vmalert",
+ }
+
+ expectedProcessedLabels := map[string]string{
+ "instance": "override",
+ "exported_instance": "0.0.0.0:8800",
+ "alertname": "AlertingRulesError",
+ "exported_alertname": "ConfigurationReloadFailure",
+ "group": "vmalert",
+ "alertgroup": "vmalert",
+ }
+
+ ls, err := ar.toLabels(metric, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ if !reflect.DeepEqual(ls.origin, expectedOriginLabels) {
+ t.Errorf("origin labels mismatch, got: %v, want: %v", ls.origin, expectedOriginLabels)
+ }
+
+ if !reflect.DeepEqual(ls.processed, expectedProcessedLabels) {
+ t.Errorf("processed labels mismatch, got: %v, want: %v", ls.processed, expectedProcessedLabels)
+ }
+}
diff --git a/app/vmalert/rule/recording.go b/app/vmalert/rule/recording.go
index 08a69a8fe..c015dfe06 100644
--- a/app/vmalert/rule/recording.go
+++ b/app/vmalert/rule/recording.go
@@ -194,6 +194,9 @@ func (rr *RecordingRule) toTimeSeries(m datasource.Metric) prompbmarshal.TimeSer
labels["__name__"] = rr.Name
// override existing labels with configured ones
for k, v := range rr.Labels {
+ if _, ok := labels[k]; ok && labels[k] != v {
+ labels[fmt.Sprintf("exported_%s", k)] = labels[k]
+ }
labels[k] = v
}
return newTimeSeries(m.Values, m.Timestamps, labels)
@@ -203,7 +206,7 @@ func (rr *RecordingRule) toTimeSeries(m datasource.Metric) prompbmarshal.TimeSer
func (rr *RecordingRule) updateWith(r Rule) error {
nr, ok := r.(*RecordingRule)
if !ok {
- return fmt.Errorf("BUG: attempt to update recroding rule with wrong type %#v", r)
+ return fmt.Errorf("BUG: attempt to update recording rule with wrong type %#v", r)
}
rr.Expr = nr.Expr
rr.Labels = nr.Labels
diff --git a/app/vmalert/rule/recording_test.go b/app/vmalert/rule/recording_test.go
index 65b391f19..019d50fc0 100644
--- a/app/vmalert/rule/recording_test.go
+++ b/app/vmalert/rule/recording_test.go
@@ -61,7 +61,7 @@ func TestRecordingRule_Exec(t *testing.T) {
},
[]datasource.Metric{
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo"),
- metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar"),
+ metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar", "source", "origin"),
},
[]prompbmarshal.TimeSeries{
newTimeSeries([]float64{2}, []int64{timestamp.UnixNano()}, map[string]string{
@@ -70,9 +70,10 @@ func TestRecordingRule_Exec(t *testing.T) {
"source": "test",
}),
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
- "__name__": "job:foo",
- "job": "bar",
- "source": "test",
+ "__name__": "job:foo",
+ "job": "bar",
+ "source": "test",
+ "exported_source": "origin",
}),
},
},
@@ -254,10 +255,7 @@ func TestRecordingRule_ExecNegative(t *testing.T) {
fq.Add(metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "bar"))
_, err = rr.exec(context.TODO(), time.Now(), 0)
- if err == nil {
- t.Fatalf("expected to get err; got nil")
- }
- if !strings.Contains(err.Error(), errDuplicate.Error()) {
- t.Fatalf("expected to get err %q; got %q insterad", errDuplicate, err)
+ if err != nil {
+ t.Fatal(err)
}
}
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 1e21d45c6..611d8f6c6 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -44,6 +44,7 @@ The sandbox cluster installation is running under the constant load generated by
* BUGFIX: `vminsert`: properly accept samples via [OpenTelemetry data ingestion protocol](https://docs.victoriametrics.com/#sending-data-via-opentelemetry) when these samples have no [resource attributes](https://opentelemetry.io/docs/instrumentation/go/resources/). Previously such samples were silently skipped.
* BUGFIX: `vmstorage`: added missing `-inmemoryDataFlushInterval` command-line flag, which was missing in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) after implementing [this feature](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3337) in [v1.85.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.85.0).
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): check `-external.url` schema when starting vmalert, must be `http` or `https`. Before, alertmanager could reject alert notifications if `-external.url` contained no or wrong schema.
+* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): automatically add `exported_` prefix for original evaluation result label if it's conflicted with external or reserved one, previously it was overridden. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5161).
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly handle queries, which wrap [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions) with multiple arguments without explicitly specified lookbehind window in square brackets into [aggregate functions](https://docs.victoriametrics.com/MetricsQL.html#aggregate-functions). For example, `sum(quantile_over_time(0.5, process_resident_memory_bytes))` was resulting to `expecting at least 2 args to ...; got 1 args` error. Thanks to @atykhyy for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5414).
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl.html): retry on import errors in `vm-native` mode. Before, retries happened only on writes into a network connection between source and destination. But errors returned by server after all the data was transmitted were logged, but not retried.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with [AWS IRSA authorization](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). Previously role chaining was not supported. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3822) for details.
diff --git a/docs/vmalert.md b/docs/vmalert.md
index a427e6a0f..65128ae13 100644
--- a/docs/vmalert.md
+++ b/docs/vmalert.md
@@ -100,6 +100,8 @@ See the full list of configuration flags in [configuration](#configuration) sect
If you run multiple `vmalert` services for the same datastore or AlertManager - do not forget
to specify different `-external.label` command-line flags in order to define which `vmalert` generated rules or alerts.
+If rule result metrics have label that conflict with `-external.label`, `vmalert` will automatically rename
+it with prefix `exported_`.
Configuration for [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)
and [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) rules is very
@@ -896,33 +898,6 @@ max(vmalert_alerting_rules_last_evaluation_series_fetched) by(group, alertname)
See more details [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4039).
This feature is available only if vmalert is using VictoriaMetrics v1.90 or higher as a datasource.
-### Series with the same labelset
-
-vmalert can produce the following error message during rules evaluation:
-```
-result contains metrics with the same labelset after applying rule labels
-```
-
-The error means there is a collision between [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
-after applying extra labels to result.
-
-For example, a rule with `expr: foo > 0` returns two distinct time series in response:
-```
-foo{bar="baz"} 1
-foo{bar="qux"} 2
-```
-
-If user configures `-external.label=bar=baz` cmd-line flag to enforce
-adding `bar="baz"` label-value pair, then time series won't be distinct anymore:
-```
-foo{bar="baz"} 1
-foo{bar="baz"} 2 # 'bar' label was overriden by `-external.label=bar=baz
-```
-
-The same issue can be caused by collision of configured `labels` on [Group](#groups) or [Rule](#rules) levels.
-To fix it one should avoid collisions by carefully picking label overrides in configuration.
-
-
## Security
See general recommendations regarding security [here](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#security).
From 910a39ad7227c77fa1922e0d55222d10eef5a563 Mon Sep 17 00:00:00 2001
From: hagen1778
Date: Fri, 22 Dec 2023 16:10:01 +0100
Subject: [PATCH 020/109] vendor: go mod tidy & go mod vendor
Signed-off-by: hagen1778
---
go.sum | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/go.sum b/go.sum
index 03a063261..6fdf75f23 100644
--- a/go.sum
+++ b/go.sum
@@ -63,8 +63,8 @@ github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkT
github.com/VictoriaMetrics/fasthttp v1.2.0 h1:nd9Wng4DlNtaI27WlYh5mGXCJOmee/2c2blTJwfyU9I=
github.com/VictoriaMetrics/fasthttp v1.2.0/go.mod h1:zv5YSmasAoSyv8sBVexfArzFDIGGTN4TfCKAtAw7IfE=
github.com/VictoriaMetrics/metrics v1.24.0/go.mod h1:eFT25kvsTidQFHb6U0oa0rTrDRdz4xTYjpL8+UPohys=
-github.com/VictoriaMetrics/metrics v1.29.0 h1:3qC+jcvymGJaQKt6wsXIlJieVFQwD/par9J1Bxul+Mc=
-github.com/VictoriaMetrics/metrics v1.29.0/go.mod h1:r7hveu6xMdUACXvB8TYdAj8WEsKzWB0EkpJN+RDtOf8=
+github.com/VictoriaMetrics/metrics v1.29.1 h1:yTORfGeO1T0C6P/tEeT4Mf7rBU5TUu3kjmHvmlaoeO8=
+github.com/VictoriaMetrics/metrics v1.29.1/go.mod h1:r7hveu6xMdUACXvB8TYdAj8WEsKzWB0EkpJN+RDtOf8=
github.com/VictoriaMetrics/metricsql v0.70.0 h1:G0k/m1yAF6pmk0dM3VT9/XI5PZ8dL7EbcLhREf4bgeI=
github.com/VictoriaMetrics/metricsql v0.70.0/go.mod h1:k4UaP/+CjuZslIjd+kCigNG9TQmUqh5v0TP/nMEy90I=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
From 95edeffbc6465a6122833aa886164c38901c1591 Mon Sep 17 00:00:00 2001
From: hagen1778
Date: Fri, 22 Dec 2023 16:42:33 +0100
Subject: [PATCH 021/109] docs: add link to sandbox to the Grafana section
Signed-off-by: hagen1778
---
README.md | 2 ++
docs/README.md | 2 ++
docs/Single-server-VictoriaMetrics.md | 2 ++
3 files changed, 6 insertions(+)
diff --git a/README.md b/README.md
index ca62eff47..66cd74aa4 100644
--- a/README.md
+++ b/README.md
@@ -363,6 +363,8 @@ See more in [description](https://github.com/VictoriaMetrics/grafana-datasource#
Creating a datasource may require [specific permissions](https://grafana.com/docs/grafana/latest/administration/data-source-management/).
If you don't see an option to create a data source - try contacting system administrator.
+Grafana playground is available for viewing at our [sandbox](https://play-grafana.victoriametrics.com).
+
## How to upgrade VictoriaMetrics
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.
diff --git a/docs/README.md b/docs/README.md
index 758ae2274..98ba260cb 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -366,6 +366,8 @@ See more in [description](https://github.com/VictoriaMetrics/grafana-datasource#
Creating a datasource may require [specific permissions](https://grafana.com/docs/grafana/latest/administration/data-source-management/).
If you don't see an option to create a data source - try contacting system administrator.
+Grafana playground is available for viewing at our [sandbox](https://play-grafana.victoriametrics.com).
+
## How to upgrade VictoriaMetrics
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.
diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md
index 5e7bbcd0d..040f6eea8 100644
--- a/docs/Single-server-VictoriaMetrics.md
+++ b/docs/Single-server-VictoriaMetrics.md
@@ -374,6 +374,8 @@ See more in [description](https://github.com/VictoriaMetrics/grafana-datasource#
Creating a datasource may require [specific permissions](https://grafana.com/docs/grafana/latest/administration/data-source-management/).
If you don't see an option to create a data source - try contacting system administrator.
+Grafana playground is available for viewing at our [sandbox](https://play-grafana.victoriametrics.com).
+
## How to upgrade VictoriaMetrics
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.
From 52692d001ac505a1d792f6a40aba6cc426dd5565 Mon Sep 17 00:00:00 2001
From: Dan Dascalescu
Date: Fri, 22 Dec 2023 14:02:14 -0500
Subject: [PATCH 022/109] docs: fix English and rm dupe sentence in README
(#5523)
---
README.md | 37 +++++++++++++++++--------------------
1 file changed, 17 insertions(+), 20 deletions(-)
diff --git a/README.md b/README.md
index 66cd74aa4..2d951cb53 100644
--- a/README.md
+++ b/README.md
@@ -22,17 +22,17 @@ The cluster version of VictoriaMetrics is available [here](https://docs.victoria
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
-There is also user-friendly database for logs - [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/).
+There is also a user-friendly database for logs - [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/).
-If you have questions about VictoriaMetrics, then feel free asking them at [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
+If you have questions about VictoriaMetrics, then feel free asking them in the [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
See [features available in enterprise package](https://docs.victoriametrics.com/enterprise.html).
Enterprise binaries can be downloaded and evaluated for free
from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
-See how to request a free trial license [here](https://victoriametrics.com/products/enterprise/trial/).
+You can also [request a free trial license](https://victoriametrics.com/products/enterprise/trial/).
-VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
+VictoriaMetrics is developed at a fast pace, so it is recommended to check the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) periodically, and to perform [regular upgrades](#how-to-upgrade-victoriametrics).
VictoriaMetrics has achieved security certifications for Database Software Development and Software-Based Monitoring Services. We apply strict security measures in everything we do. See our [Security page](https://victoriametrics.com/security/) for more details.
@@ -41,19 +41,19 @@ VictoriaMetrics has achieved security certifications for Database Software Devel
VictoriaMetrics has the following prominent features:
* It can be used as long-term storage for Prometheus. See [these docs](#prometheus-setup) for details.
-* It can be used as a drop-in replacement for Prometheus in Grafana, because it supports [Prometheus querying API](#prometheus-querying-api-usage).
-* It can be used as a drop-in replacement for Graphite in Grafana, because it supports [Graphite API](#graphite-api-usage).
+* It can be used as a drop-in replacement for Prometheus in Grafana, because it supports the [Prometheus querying API](#prometheus-querying-api-usage).
+* It can be used as a drop-in replacement for Graphite in Grafana, because it supports the [Graphite API](#graphite-api-usage).
VictoriaMetrics allows reducing infrastructure costs by more than 10x comparing to Graphite - see [this case study](https://docs.victoriametrics.com/CaseStudies.html#grammarly).
* It is easy to setup and operate:
* VictoriaMetrics consists of a single [small executable](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d)
without external dependencies.
* All the configuration is done via explicit command-line flags with reasonable defaults.
- * All the data is stored in a single directory pointed by `-storageDataPath` command-line flag.
+ * All the data is stored in a single directory specified by the `-storageDataPath` command-line flag.
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
can be done with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools.
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
-* It implements PromQL-like query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
-* It provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
+* It implements a PromQL-like query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
+* It provides a global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
* It provides high performance and good vertical and horizontal scalability for both
[data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
and [data querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
@@ -62,9 +62,9 @@ VictoriaMetrics has the following prominent features:
and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f)
when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
* It is optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
-* It provides high data compression, so up to 70x more data points may be stored into limited storage comparing to TimescaleDB
- according to [these benchmarks](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
- and up to 7x less storage space is required compared to Prometheus, Thanos or Cortex
+* It provides high data compression: up to 70x more data points may be stored into limited storage compared with TimescaleDB
+ according to [these benchmarks](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4),
+ and up to 7x less storage space is required compared to Prometheus, Thanos or Cortex.
according to [this benchmark](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
* It is optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc).
See [disk IO graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
@@ -75,7 +75,7 @@ VictoriaMetrics has the following prominent features:
from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
* It protects the storage from data corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to
[the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
-* It supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
+* It supports metrics scraping, ingestion and [backfilling](#backfilling) via the following protocols:
* [Metrics scraping from Prometheus exporters](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
* [Prometheus remote write API](#prometheus-setup).
* [Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
@@ -95,7 +95,7 @@ VictoriaMetrics has the following prominent features:
[high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data
and various [Enterprise workloads](https://docs.victoriametrics.com/enterprise.html).
-* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
+* It has an open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
* It can store data on [NFS-based storages](https://en.wikipedia.org/wiki/Network_File_System) such as [Amazon EFS](https://aws.amazon.com/efs/)
and [Google Filestore](https://cloud.google.com/filestore).
@@ -138,7 +138,7 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
### Install
-To quickly try VictoriaMetrics, just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
+To quickly try VictoriaMetrics, just download the [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
@@ -155,10 +155,10 @@ VictoriaMetrics can also be installed via these installation methods:
The following command-line flags are used the most:
-* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
+* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. The default path is `victoria-metrics-data` in the current working directory.
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month (31 days). The minimum retention period is 24h or 1d. See [these docs](#retention) for more details.
-Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
+Other flags have good enough default values, so set them only if you really need to. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
The following docs may be useful during initial VictoriaMetrics setup:
* [How to set up scraping of Prometheus-compatible targets](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
@@ -172,9 +172,6 @@ VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
-VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
-
-
### Environment variables
All the VictoriaMetrics components allow referring environment variables in `yaml` configuration files (such as `-promscrape.config`)
From 35dd6e5e8e180e8153c2d1b7c5e3a3918b77bda9 Mon Sep 17 00:00:00 2001
From: hagen1778
Date: Fri, 22 Dec 2023 21:34:26 +0100
Subject: [PATCH 023/109] docs: docs-sync after
52692d001ac505a1d792f6a40aba6cc426dd5565
Signed-off-by: hagen1778
---
docs/README.md | 37 ++++++++++++---------------
docs/Single-server-VictoriaMetrics.md | 37 ++++++++++++---------------
2 files changed, 34 insertions(+), 40 deletions(-)
diff --git a/docs/README.md b/docs/README.md
index 98ba260cb..6907c9089 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -25,17 +25,17 @@ The cluster version of VictoriaMetrics is available [here](https://docs.victoria
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
-There is also user-friendly database for logs - [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/).
+There is also a user-friendly database for logs - [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/).
-If you have questions about VictoriaMetrics, then feel free asking them at [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
+If you have questions about VictoriaMetrics, then feel free asking them in the [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
See [features available in enterprise package](https://docs.victoriametrics.com/enterprise.html).
Enterprise binaries can be downloaded and evaluated for free
from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
-See how to request a free trial license [here](https://victoriametrics.com/products/enterprise/trial/).
+You can also [request a free trial license](https://victoriametrics.com/products/enterprise/trial/).
-VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
+VictoriaMetrics is developed at a fast pace, so it is recommended to check the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) periodically, and to perform [regular upgrades](#how-to-upgrade-victoriametrics).
VictoriaMetrics has achieved security certifications for Database Software Development and Software-Based Monitoring Services. We apply strict security measures in everything we do. See our [Security page](https://victoriametrics.com/security/) for more details.
@@ -44,19 +44,19 @@ VictoriaMetrics has achieved security certifications for Database Software Devel
VictoriaMetrics has the following prominent features:
* It can be used as long-term storage for Prometheus. See [these docs](#prometheus-setup) for details.
-* It can be used as a drop-in replacement for Prometheus in Grafana, because it supports [Prometheus querying API](#prometheus-querying-api-usage).
-* It can be used as a drop-in replacement for Graphite in Grafana, because it supports [Graphite API](#graphite-api-usage).
+* It can be used as a drop-in replacement for Prometheus in Grafana, because it supports the [Prometheus querying API](#prometheus-querying-api-usage).
+* It can be used as a drop-in replacement for Graphite in Grafana, because it supports the [Graphite API](#graphite-api-usage).
VictoriaMetrics allows reducing infrastructure costs by more than 10x comparing to Graphite - see [this case study](https://docs.victoriametrics.com/CaseStudies.html#grammarly).
* It is easy to setup and operate:
* VictoriaMetrics consists of a single [small executable](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d)
without external dependencies.
* All the configuration is done via explicit command-line flags with reasonable defaults.
- * All the data is stored in a single directory pointed by `-storageDataPath` command-line flag.
+ * All the data is stored in a single directory specified by the `-storageDataPath` command-line flag.
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
can be done with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools.
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
-* It implements PromQL-like query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
-* It provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
+* It implements a PromQL-like query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
+* It provides a global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
* It provides high performance and good vertical and horizontal scalability for both
[data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
and [data querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
@@ -65,9 +65,9 @@ VictoriaMetrics has the following prominent features:
and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f)
when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
* It is optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
-* It provides high data compression, so up to 70x more data points may be stored into limited storage comparing to TimescaleDB
- according to [these benchmarks](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
- and up to 7x less storage space is required compared to Prometheus, Thanos or Cortex
+* It provides high data compression: up to 70x more data points may be stored into limited storage compared with TimescaleDB
+ according to [these benchmarks](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4),
+ and up to 7x less storage space is required compared to Prometheus, Thanos or Cortex.
according to [this benchmark](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
* It is optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc).
See [disk IO graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
@@ -78,7 +78,7 @@ VictoriaMetrics has the following prominent features:
from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
* It protects the storage from data corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to
[the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
-* It supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
+* It supports metrics scraping, ingestion and [backfilling](#backfilling) via the following protocols:
* [Metrics scraping from Prometheus exporters](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
* [Prometheus remote write API](#prometheus-setup).
* [Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
@@ -98,7 +98,7 @@ VictoriaMetrics has the following prominent features:
[high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data
and various [Enterprise workloads](https://docs.victoriametrics.com/enterprise.html).
-* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
+* It has an open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
* It can store data on [NFS-based storages](https://en.wikipedia.org/wiki/Network_File_System) such as [Amazon EFS](https://aws.amazon.com/efs/)
and [Google Filestore](https://cloud.google.com/filestore).
@@ -141,7 +141,7 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
### Install
-To quickly try VictoriaMetrics, just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
+To quickly try VictoriaMetrics, just download the [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
@@ -158,10 +158,10 @@ VictoriaMetrics can also be installed via these installation methods:
The following command-line flags are used the most:
-* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
+* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. The default path is `victoria-metrics-data` in the current working directory.
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month (31 days). The minimum retention period is 24h or 1d. See [these docs](#retention) for more details.
-Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
+Other flags have good enough default values, so set them only if you really need to. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
The following docs may be useful during initial VictoriaMetrics setup:
* [How to set up scraping of Prometheus-compatible targets](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
@@ -175,9 +175,6 @@ VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
-VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
-
-
### Environment variables
All the VictoriaMetrics components allow referring environment variables in `yaml` configuration files (such as `-promscrape.config`)
diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md
index 040f6eea8..129388b20 100644
--- a/docs/Single-server-VictoriaMetrics.md
+++ b/docs/Single-server-VictoriaMetrics.md
@@ -33,17 +33,17 @@ The cluster version of VictoriaMetrics is available [here](https://docs.victoria
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
-There is also user-friendly database for logs - [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/).
+There is also a user-friendly database for logs - [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/).
-If you have questions about VictoriaMetrics, then feel free asking them at [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
+If you have questions about VictoriaMetrics, then feel free asking them in the [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
See [features available in enterprise package](https://docs.victoriametrics.com/enterprise.html).
Enterprise binaries can be downloaded and evaluated for free
from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
-See how to request a free trial license [here](https://victoriametrics.com/products/enterprise/trial/).
+You can also [request a free trial license](https://victoriametrics.com/products/enterprise/trial/).
-VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
+VictoriaMetrics is developed at a fast pace, so it is recommended to check the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) periodically, and to perform [regular upgrades](#how-to-upgrade-victoriametrics).
VictoriaMetrics has achieved security certifications for Database Software Development and Software-Based Monitoring Services. We apply strict security measures in everything we do. See our [Security page](https://victoriametrics.com/security/) for more details.
@@ -52,19 +52,19 @@ VictoriaMetrics has achieved security certifications for Database Software Devel
VictoriaMetrics has the following prominent features:
* It can be used as long-term storage for Prometheus. See [these docs](#prometheus-setup) for details.
-* It can be used as a drop-in replacement for Prometheus in Grafana, because it supports [Prometheus querying API](#prometheus-querying-api-usage).
-* It can be used as a drop-in replacement for Graphite in Grafana, because it supports [Graphite API](#graphite-api-usage).
+* It can be used as a drop-in replacement for Prometheus in Grafana, because it supports the [Prometheus querying API](#prometheus-querying-api-usage).
+* It can be used as a drop-in replacement for Graphite in Grafana, because it supports the [Graphite API](#graphite-api-usage).
VictoriaMetrics allows reducing infrastructure costs by more than 10x comparing to Graphite - see [this case study](https://docs.victoriametrics.com/CaseStudies.html#grammarly).
* It is easy to setup and operate:
* VictoriaMetrics consists of a single [small executable](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d)
without external dependencies.
* All the configuration is done via explicit command-line flags with reasonable defaults.
- * All the data is stored in a single directory pointed by `-storageDataPath` command-line flag.
+ * All the data is stored in a single directory specified by the `-storageDataPath` command-line flag.
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
can be done with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools.
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
-* It implements PromQL-like query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
-* It provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
+* It implements a PromQL-like query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
+* It provides a global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
* It provides high performance and good vertical and horizontal scalability for both
[data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
and [data querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
@@ -73,9 +73,9 @@ VictoriaMetrics has the following prominent features:
and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f)
when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
* It is optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
-* It provides high data compression, so up to 70x more data points may be stored into limited storage comparing to TimescaleDB
- according to [these benchmarks](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
- and up to 7x less storage space is required compared to Prometheus, Thanos or Cortex
+* It provides high data compression: up to 70x more data points may be stored into limited storage compared with TimescaleDB
+ according to [these benchmarks](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4),
+ and up to 7x less storage space is required compared to Prometheus, Thanos or Cortex.
according to [this benchmark](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
* It is optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc).
See [disk IO graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
@@ -86,7 +86,7 @@ VictoriaMetrics has the following prominent features:
from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
* It protects the storage from data corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to
[the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
-* It supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
+* It supports metrics scraping, ingestion and [backfilling](#backfilling) via the following protocols:
* [Metrics scraping from Prometheus exporters](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
* [Prometheus remote write API](#prometheus-setup).
* [Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
@@ -106,7 +106,7 @@ VictoriaMetrics has the following prominent features:
[high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data
and various [Enterprise workloads](https://docs.victoriametrics.com/enterprise.html).
-* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
+* It has an open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
* It can store data on [NFS-based storages](https://en.wikipedia.org/wiki/Network_File_System) such as [Amazon EFS](https://aws.amazon.com/efs/)
and [Google Filestore](https://cloud.google.com/filestore).
@@ -149,7 +149,7 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
### Install
-To quickly try VictoriaMetrics, just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
+To quickly try VictoriaMetrics, just download the [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
@@ -166,10 +166,10 @@ VictoriaMetrics can also be installed via these installation methods:
The following command-line flags are used the most:
-* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
+* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. The default path is `victoria-metrics-data` in the current working directory.
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month (31 days). The minimum retention period is 24h or 1d. See [these docs](#retention) for more details.
-Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
+Other flags have good enough default values, so set them only if you really need to. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
The following docs may be useful during initial VictoriaMetrics setup:
* [How to set up scraping of Prometheus-compatible targets](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
@@ -183,9 +183,6 @@ VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
-VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
-
-
### Environment variables
All the VictoriaMetrics components allow referring environment variables in `yaml` configuration files (such as `-promscrape.config`)
From d0ca448093ded22b14e1f5e44a8f82276a8a5353 Mon Sep 17 00:00:00 2001
From: hagen1778
Date: Fri, 22 Dec 2023 21:41:27 +0100
Subject: [PATCH 024/109] docs: fix typo in VictoriaLogs upgrading procedure
Signed-off-by: hagen1778
---
docs/VictoriaLogs/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/VictoriaLogs/README.md b/docs/VictoriaLogs/README.md
index ef7dea0cf..b4b0c97fd 100644
--- a/docs/VictoriaLogs/README.md
+++ b/docs/VictoriaLogs/README.md
@@ -61,7 +61,7 @@ The following steps must be performed during the upgrade / downgrade procedure:
* Send `SIGINT` signal to VictoriaLogs process in order to gracefully stop it.
See [how to send signals to processes](https://stackoverflow.com/questions/33239959/send-signal-to-process-from-command-line).
* Wait until the process stops. This can take a few seconds.
-* Start the upgraded VictoriaMetrics.
+* Start the upgraded VictoriaLogs.
## Retention
From 47307c7a37e204123422b6aacbafd22e86dd26a5 Mon Sep 17 00:00:00 2001
From: Artem Navoiev
Date: Sun, 24 Dec 2023 22:51:43 +0100
Subject: [PATCH 025/109] docs: specify right link to grafana operator docs
Signed-off-by: Artem Navoiev
---
docs/grafana-datasource.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/grafana-datasource.md b/docs/grafana-datasource.md
index 1ce920549..61c43b1a9 100644
--- a/docs/grafana-datasource.md
+++ b/docs/grafana-datasource.md
@@ -239,7 +239,7 @@ spec:
allow_loading_unsigned_plugins: victoriametrics-datasource
```
-See [Grafana operator reference](https://grafana-operator.github.io/grafana-operator/docs/grafana/) to find more about
+See [Grafana operator reference](https://grafana.github.io/grafana-operator/docs/grafana/) to find more about
Grafana operator.
This example uses init container to download and install plugin.
From aecfabe3180f024a0642dec488195685cfe80006 Mon Sep 17 00:00:00 2001
From: Denys Holius <5650611+denisgolius@users.noreply.github.com>
Date: Fri, 5 Jan 2024 17:16:46 +0200
Subject: [PATCH 026/109] CHANGELOG.md: fixed wrong links to vmalert-tool
documentation page (#5570)
---
docs/CHANGELOG.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 611d8f6c6..9a71be3d7 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -89,7 +89,7 @@ Released at 2023-12-13
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): prevent from `FATAL: cannot flush metainfo` panic when [`-remoteWrite.multitenantURL`](https://docs.victoriametrics.com/vmagent.html#multitenancy) command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5357).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly decode zstd-encoded data blocks received via [VictoriaMetrics remote_write protocol](https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol). See [this issue comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5301#issuecomment-1815871992).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly add new labels at `output_relabel_configs` during [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html). Previously this could lead to corrupted labels in output samples. Thanks to @ChengChung for providing [detailed report for this bug](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5402).
-* BUGFIX: [vmalert-tool](https://docs.victoriametrics.com/#vmalert-tool): allow using arbitrary `eval_time` in [alert_rule_test](https://docs.victoriametrics.com/vmalert-tool.html#alert_test_case) case. Previously, test cases with `eval_time` not being a multiple of `evaluation_interval` would fail.
+* BUGFIX: [vmalert-tool](https://docs.victoriametrics.com/vmalert-tool.html): allow using arbitrary `eval_time` in [alert_rule_test](https://docs.victoriametrics.com/vmalert-tool.html#alert_test_case) case. Previously, test cases with `eval_time` not being a multiple of `evaluation_interval` would fail.
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): sanitize label names before sending the alert notification to Alertmanager. Before, vmalert would send notifications with labels containing characters not supported by Alertmanager validator, resulting into validation errors like `msg="Failed to validate alerts" err="invalid label set: invalid name "foo.bar"`.
* BUGFIX: [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html): fix `vmbackupmanager` not deleting previous object versions from S3 when applying retention policy with `-deleteAllObjectVersions` command-line flag.
* BUGFIX: [vminsert](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix panic when ingesting data via [NewRelic protocol](https://docs.victoriametrics.com/#how-to-send-data-from-newrelic-agent) into VictoriaMetrics cluster. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5416).
@@ -161,7 +161,7 @@ Released at 2023-11-15
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): reduce vertical space usage, so more information is visible on the screen without scrolling.
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): show query execution duration in the header of query input field. This should help optimizing query performance.
* FEATURE: support `Strict-Transport-Security`, `Content-Security-Policy` and `X-Frame-Options` HTTP response headers in the all VictoriaMetrics components. The values for headers can be specified via the following command-line flags: `-http.header.hsts`, `-http.header.csp` and `-http.header.frameOptions`.
-* FEATURE: [vmalert-tool](https://docs.victoriametrics.com/#vmalert-tool): add `unittest` command to run unittest for alerting and recording rules. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4789) for details.
+* FEATURE: [vmalert-tool](https://docs.victoriametrics.com/vmalert-tool.html): add `unittest` command to run unittest for alerting and recording rules. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4789) for details.
* FEATURE: dashboards/vmalert: add new panel `Missed evaluations` for indicating alerting groups that miss their evaluations.
* FEATURE: all: track requests with wrong auth key and wrong basic auth at `vm_http_request_errors_total` [metric](https://docs.victoriametrics.com/#monitoring) with `reason="wrong_auth_key"` and `reason="wrong_basic_auth"`. See [this issue](https://github.com/victoriaMetrics/victoriaMetrics/issues/4590). Thanks to @venkatbvc for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5166).
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): add ability to drop the specified number of `/`-delimited prefix parts from the request path before proxying the request to the matching backend. See [these docs](https://docs.victoriametrics.com/vmauth.html#dropping-request-path-prefix).
From f75874f5df19f7dd83077bd784737cd6316dbf9e Mon Sep 17 00:00:00 2001
From: Fred Navruzov
Date: Mon, 8 Jan 2024 10:31:36 +0100
Subject: [PATCH 027/109] docs: vmanomaly part 1 (#5558)
* add `AD` section, fix links, release docs and changelog
* - connect sections, refactor structure
* - resolve suggestions
- add FAQ section
- fix dead links
* - fix incorrect render of tables for Writer
- comment out internal readers/writers
- fix page ordering to some extent
* - link licensing requirements from v1.5.0 to main page
---------
Co-authored-by: Artem Navoiev
---
docs/.jekyll-metadata | Bin 0 -> 2277159 bytes
docs/anomaly-detection/CHANGELOG.md | 124 ++++++
docs/anomaly-detection/FAQ.md | 55 +++
docs/anomaly-detection/README.md | 60 +++
docs/anomaly-detection/components/README.md | 27 ++
.../components/models/README.md | 20 +
.../components/models/custom_model.md | 174 +++++++++
.../components/models/models.md | 323 ++++++++++++++++
.../components/monitoring.md | 297 +++++++++++++++
docs/anomaly-detection/components/reader.md | 262 +++++++++++++
.../anomaly-detection/components/scheduler.md | 354 ++++++++++++++++++
docs/anomaly-detection/components/writer.md | 270 +++++++++++++
docs/anomaly-detection/guides/README.md | 17 +
.../guides/guide-vmanomaly-vmalert.md | 9 +-
.../guide-vmanomaly-vmalert_alert-rule.webp | Bin
...guide-vmanomaly-vmalert_alerts-firing.webp | Bin
...guide-vmanomaly-vmalert_anomaly-score.webp | Bin
...uide-vmanomaly-vmalert_docker-compose.webp | Bin
.../guides/guide-vmanomaly-vmalert_files.webp | Bin
...vmanomaly-vmalert_node-cpu-rate-graph.webp | Bin
...de-vmanomaly-vmalert_yhat-lower-upper.webp | Bin
.../guides/guide-vmanomaly-vmalert_yhat.webp | Bin
docs/vmanomaly.md | 45 ++-
23 files changed, 2016 insertions(+), 21 deletions(-)
create mode 100644 docs/.jekyll-metadata
create mode 100644 docs/anomaly-detection/CHANGELOG.md
create mode 100644 docs/anomaly-detection/FAQ.md
create mode 100644 docs/anomaly-detection/README.md
create mode 100644 docs/anomaly-detection/components/README.md
create mode 100644 docs/anomaly-detection/components/models/README.md
create mode 100644 docs/anomaly-detection/components/models/custom_model.md
create mode 100644 docs/anomaly-detection/components/models/models.md
create mode 100644 docs/anomaly-detection/components/monitoring.md
create mode 100644 docs/anomaly-detection/components/reader.md
create mode 100644 docs/anomaly-detection/components/scheduler.md
create mode 100644 docs/anomaly-detection/components/writer.md
create mode 100644 docs/anomaly-detection/guides/README.md
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert.md (99%)
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert_alert-rule.webp (100%)
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert_alerts-firing.webp (100%)
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert_anomaly-score.webp (100%)
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert_docker-compose.webp (100%)
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert_files.webp (100%)
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert_node-cpu-rate-graph.webp (100%)
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert_yhat-lower-upper.webp (100%)
rename docs/{ => anomaly-detection}/guides/guide-vmanomaly-vmalert_yhat.webp (100%)
diff --git a/docs/.jekyll-metadata b/docs/.jekyll-metadata
new file mode 100644
index 0000000000000000000000000000000000000000..0a179684dab0a41b6ad1fc3ad18284df7bdcfa16
GIT binary patch
literal 2277159
zcmeFaPmE;Qb>_!x8CBgyG$b1k3}laOEWlt|%G?
z;j9-?`SR8I&iB3dos?%5cP9bOU!D8n-ur&{ocm7xORs+am7l+Ixcjf~ug<@-|Ka?j
zXD287pFdwOj!x#QyUXKSpS$(NcfNo7aQDt~y;#m~9em=^;l<}(dvy7Kzy6b-z47rs
z{nPh;dgpVmpUzIt-aoxqE_VKS`~MuzPZzg7_xkGMgXQAr{A~C3Z~xZkK6&=7Z>{F*
z#jk((qtCteM`x$=!`)ZE@ZO_K=sSnIuN}{yuAcM1x$}#|-9Ox4FQ0DX**l*v&(`z3
z^^^H>zW3nXgU{T%fA7KG{k{9&TYP)=_FMb!FHVn6E{^A`{qy;1y?;Fa*6iYBy}Em{
z`1Zx(c!U3j2Ru4kF3(O0_usE`SBv%h{in0Xmk<1U#9ynE7xzDVI$-{Lb>??w=Zo0~
z&u7Q=U_W(z{`9}c{Cn&3+5T$%?D>DcI$AydfiHjSe~bRtN9T*D>$@Lr|J$$ELq0k?
zUC&R~gMhzU$DYo-GHn{iE6W@o*K?mh9Qn`RU{H+0!S3v70;LcH>$d
zU2d8;`7lowrymUm{bldR>9*?a&TxGvsgAeXJgx)jX7gk|JHGp5y*$}$Oy*i!&zDb6
zX6wrr&}wl!|6q0_3%G}_&yLpT^Wg%1NvHV~-
z^UUSegTHll=@r8ttIV7oAD^8L-EL)6)&{y?y{Vp
zFK3J6!G2XF&el&poPBRVG^hGD#>Z!8k5A_BKVHus52)s>98zb;tNH0_xH5l#(jz^Y
zJv+NtuU<&&AjD}~;S17ww)$wbe;Mb~%Pn>|5vRSHhrrd7v-9=Q+2vb*5TkrGfBI6?
zj~>7I;qh{mr%Q=~yzQ@&>%aW9B{s2i`A8)=NV>EWiNuaB*OBC+-CeGCNqyO)>v1BD
zmK(c_GkT;v{7Gge(WR#f9a_FPSgdjArFEJcbZI$Zf%bN&?T!Ncx%42TKEJD87w~t<
z?72ADAkW^a=?x3{aFuC^D}0K5oY01=fuilw+uZv=GZ*$2PR|7@TtxyCKC~t~6yeIc
zi5@If3(lYICV@1#1_Tx?k7g{wOoF}HxE=NuiX8Y$Q-0?cp1IB2`g-{SL=1S+dv(Z5
z1X%vipLw3C{_Y2FKYMaRnQ9D_3M^$sa}x)a`=llZ36{dHnb?A*xM(oi*X3rZu`he@
z4Md7WSZ*Xb(x`(npRA#o3fG=2bYW>pG}<_Nxs7t3pHHS-ti$GhmHTk*VMZaAevJp6
z8F6*IL5a=v)2kM8;xZITu{=oQC)q~|t+*U2+CI(gz7I9?V)Nb9gMk>AnZS%?ju`(|
zP>o9~D7vw9CL9`(<028*u{qU;CObVxOh-=qSmuZa;1?c>1bN!__JTwVdD`oC2rMb`
zXD%)-X3s_?iO0Qf1Mr3vnQ}{tOv$DqQ)WexDH!eX<7O7cdWuYuz9Lg>l*Dn0OrbSJ
z&d>!#&X95RwI!P;jK{r;vp6e?oH0Slc$^|GBnsKz&6f
z(1IcpD%w8BDKdf96q$f+MJ7yJkpr`!$bq$4L9QYPTv?G*qOHgoG0PyEvvMfjD1|&u
zkza)S!b5#UewkEOWDIl_`OW8958nGrXp)F+T*1n
zYwTo2)<{c{H8x73RAddc6`7(vMW)C&da1~q=Vy^A*HmQ6bQGBa4MoO0TahuYpvV|l
zRAdFU6j?z%MOIX_eM%WoP+O4|*imG~bQBpgJw?VWTahuYqR5=+C^9A5iadsDDe?eZ
zQe;S~C^7*iiY${TZ(S<#+49p*9vqD_WLaK^JSoU>50co#kEI(*aL{9E91@A`SXN^s
z7wzt{Y9y&IyKy~Eq{cEODdUW8C@=3wW+uj^rwYATmLFNHap;wBc%?XuI?__dd|fg}sH-
zbAbw1k-&uIO=I{oK@qO3o0uw=1$!76(cl^oSg<$4w-II%?9Ij@6gjZGX?*>0G6kNr
zpS=JP1D^C=9r6+ZmOu1opXZ1dCr`fnoi|4ru>4IhaQmZlF9z6aL&}QgCJrq3NlgwC
zEQMV&u?0(U(O|T%%gs__U-sY|h!lyi+(>k!Q3qv?SVJ=vu02`k!g5!k(Z-k`+h`sr26x<-Kq-9wQS%Y(FnLMtwhA8nuJ
zcHfUb^I~%e^=U;T4I%j?Jk)G}-Aljp@iM
zH;o727anR|qktr0$kSf8FGAY@T}7VOHtyxdcL%p#|HDuG&d2}ZfBtX&<(-4ui~s20
z)y1uY*LH8e`t;z=^R9LWpL}@ddMCS29PY{&HjYR*b`sdX_R%J=SH8CMBD7C`oQ5Vf
zU55rW{YHY5JD(2jCDXr=u;klAhlLIcNo@be@>oK^s|pb03v(7#J~5=?UVmsf8)iI
z|8E|qPhRe1IclKy!FoC-18bO
zcGG@GQ}_Glg_A|eBbyeR`7-Ha3;CvhF~SNdt0b&R6HF$(@Wcm6uOyl&8+YkN@^aSf
z2I7%PGW>ATv|AF=w6|?BsF1n4k)X`|lP3ReBrtQM8U_~Y>o*dc{E}-hyAG@Q*gHrshUhQz#JTqL!zMM@t-2EHNXdUiO+qJI#M9(%}
zkd8XstsVN~=>KLCL5I7wC5!Sn_Fp{OKF*MYsXok?Gbo3K2mUEzkyKWdE0*J^r7Ozvc24Ta5(AS05cp@
z4|i|wkXc-9FP{+)cZ;W~_IEyB{yQHJ?>)T#@WF^(7Pe*1@6O7i9pqh?0C*)=3ZcZp
zQvV1Gkl-M%@~thA*j?pTD!FLalPgP7ho#>UTfQVBy_C)(snM_G4HC)3<eOpa|!ie;EZJ^kVctS#J?4^#?lIk+9;DBI5gsoMIsPKbEuG<(AZul1+7_%!)cvFxvaZ2@b`2>PV5kI#O(u
zr*Y~?p*3~P&;@nOka6_m)G_7x{b0LIJL6W=F=N{5m;qgN47{2;2HbW#1=Z_u>J)&z
zjQHwEpuRd1Xh9tb6>Xp6UeW|wQ%3@})sZl5bqvgcItJEe1-a@NaN8ARed|y6wWY0&
zS&TJx6jY#&JOAawo&W0L&fgzlkAv>3H(`$$Shq*bb?s4;r`w}un)ax{X#11)sIimn
zQ6nvT)Y#}x(jGO`wnvKg?2#hl=%qb!o^OwoYuY1aI`&9`hCO1QZI2jNuty9m+M|M6
z_NbtqJt`{NKBYY>sBMo5?AW7XI`)W}o;_lgZI2jNu}4mH?2!^}dmKYW_V_S9J2cl@s*aFHS*(OS>5g)FN^GKzQkx_=n4^3dOC%OYncJ3JwDHLtyrjOYjrBN@
zHOgX(j5BJJyb2+inKPE2DpW>U%we&{p_kTaUa2lEC+v%|uFM7wbg}dxqbz3{mbPnj92S>a1pBN0hI14Mw}3+)6a|WlOw)NRcMWWu_yIvLus>8k&h>?a4w@
zlnz0ojiawE+1wRt3k%Dl+_vf1fi2b^X4FM#pE$tG7pvn9!f39aUPP?YG#+#hMan3T
znF