mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-02-19 15:30:17 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
0eb733a31e
668 changed files with 7208 additions and 17569 deletions
2
.github/workflows/check-licenses.yml
vendored
2
.github/workflows/check-licenses.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@main
|
uses: actions/setup-go@main
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.3
|
go-version: 1.21.4
|
||||||
id: go
|
id: go
|
||||||
- name: Code checkout
|
- name: Code checkout
|
||||||
uses: actions/checkout@master
|
uses: actions/checkout@master
|
||||||
|
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
|
@ -57,7 +57,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.3
|
go-version: 1.21.4
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: true
|
cache: true
|
||||||
if: ${{ matrix.language == 'go' }}
|
if: ${{ matrix.language == 'go' }}
|
||||||
|
|
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
|
@ -32,7 +32,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.3
|
go-version: 1.21.4
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.3
|
go-version: 1.21.4
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ jobs:
|
||||||
id: go
|
id: go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.3
|
go-version: 1.21.4
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
|
|
2
.github/workflows/sync-docs.yml
vendored
2
.github/workflows/sync-docs.yml
vendored
|
@ -8,7 +8,7 @@ on:
|
||||||
workflow_dispatch: {}
|
workflow_dispatch: {}
|
||||||
env:
|
env:
|
||||||
PAGEFIND_VERSION: "1.0.3"
|
PAGEFIND_VERSION: "1.0.3"
|
||||||
HUGO_VERSION: "0.119.0"
|
HUGO_VERSION: "latest"
|
||||||
permissions:
|
permissions:
|
||||||
contents: read # This is required for actions/checkout and to commit back image update
|
contents: read # This is required for actions/checkout and to commit back image update
|
||||||
deployments: write
|
deployments: write
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -486,7 +486,7 @@ golangci-lint: install-golangci-lint
|
||||||
golangci-lint run
|
golangci-lint run
|
||||||
|
|
||||||
install-golangci-lint:
|
install-golangci-lint:
|
||||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.54.2
|
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.55.1
|
||||||
|
|
||||||
govulncheck: install-govulncheck
|
govulncheck: install-govulncheck
|
||||||
govulncheck ./...
|
govulncheck ./...
|
||||||
|
|
73
README.md
73
README.md
|
@ -442,7 +442,7 @@ This information is obtained from the `/api/v1/status/active_queries` HTTP endpo
|
||||||
[VMUI](#vmui) provides an ability to explore metrics exported by a particular `job` / `instance` in the following way:
|
[VMUI](#vmui) provides an ability to explore metrics exported by a particular `job` / `instance` in the following way:
|
||||||
|
|
||||||
1. Open the `vmui` at `http://victoriametrics:8428/vmui/`.
|
1. Open the `vmui` at `http://victoriametrics:8428/vmui/`.
|
||||||
1. Click the `Explore metrics` tab.
|
1. Click the `Explore Prometheus metrics` tab.
|
||||||
1. Select the `job` you want to explore.
|
1. Select the `job` you want to explore.
|
||||||
1. Optionally select the `instance` for the selected job to explore.
|
1. Optionally select the `instance` for the selected job to explore.
|
||||||
1. Select metrics you want to explore and compare.
|
1. Select metrics you want to explore and compare.
|
||||||
|
@ -1126,6 +1126,18 @@ For example, the following command builds the image on top of [scratch](https://
|
||||||
ROOT_IMAGE=scratch make package-victoria-metrics
|
ROOT_IMAGE=scratch make package-victoria-metrics
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Building VictoriaMetrics with Podman
|
||||||
|
|
||||||
|
VictoriaMetrics can be built with Podman in either rootful or rootless mode.
|
||||||
|
|
||||||
|
When building via rootlful Podman, simply add `DOCKER=podman` to the relevant `make` commandline. To build
|
||||||
|
via rootless Podman, add `DOCKER=podman DOCKER_RUN="podman run --userns=keep-id"` to the `make`
|
||||||
|
commandline.
|
||||||
|
|
||||||
|
For example: `make victoria-metrics-pure DOCKER=podman DOCKER_RUN="podman run --userns=keep-id"`
|
||||||
|
|
||||||
|
Note that `production` builds are not supported via Podman becuase Podman does not support `buildx`.
|
||||||
|
|
||||||
## Start with docker-compose
|
## Start with docker-compose
|
||||||
|
|
||||||
[Docker-compose](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/docker-compose.yml)
|
[Docker-compose](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/docker-compose.yml)
|
||||||
|
@ -1683,43 +1695,44 @@ See also [cardinality limiter](#cardinality-limiter) and [capacity planning docs
|
||||||
|
|
||||||
## High availability
|
## High availability
|
||||||
|
|
||||||
* Install multiple VictoriaMetrics instances in distinct datacenters (availability zones).
|
The general approach for achieving high availability is the following:
|
||||||
* Pass addresses of these instances to [vmagent](https://docs.victoriametrics.com/vmagent.html) via `-remoteWrite.url` command-line flag:
|
|
||||||
|
- to run two identically configured VictoriaMetrics instances in distinct datacenters (availability zones)
|
||||||
|
- to store the collected data simultaneously into these instances via [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus
|
||||||
|
- to query the first VictoriaMetrics instance and to fail over to the second instance when the first instance becomes temporarily unavailable.
|
||||||
|
|
||||||
|
Such a setup guarantees that the collected data isn't lost when one of VictoriaMetrics instance becomes unavailable.
|
||||||
|
The collected data continues to be written to the available VictoriaMetrics instance, so it should be available for querying.
|
||||||
|
Both [vmagent](https://docs.victoriametrics.com/vmagent.html) and Prometheus buffer the collected data locally if they cannot send it
|
||||||
|
to the configured remote storage. So the collected data will be written to the temporarily unavailable VictoriaMetrics instance
|
||||||
|
after it becomes available.
|
||||||
|
|
||||||
|
If you use [vmagent](https://docs.victoriametrics.com/vmagent.html) for storing the data into VictoriaMetrics,
|
||||||
|
then it can be configured with multiple `-remoteWrite.url` command-line flags, where every flag points to the VictoriaMetrics
|
||||||
|
instance in a particular availability zone, in order to replicate the collected data to all the VictoriaMetrics instances.
|
||||||
|
For example, the following command instructs `vmagent` to replicate data to `vm-az1` and `vm-az2` instances of VictoriaMetrics:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
/path/to/vmagent -remoteWrite.url=http://<victoriametrics-addr-1>:8428/api/v1/write -remoteWrite.url=http://<victoriametrics-addr-2>:8428/api/v1/write
|
/path/to/vmagent \
|
||||||
|
-remoteWrite.url=http://<vm-az1>:8428/api/v1/write \
|
||||||
|
-remoteWrite.url=http://<vm-az2>:8428/api/v1/write
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively these addresses may be passed to `remote_write` section in Prometheus config:
|
If you use Prometheus for collecting and writing the data to VictoriaMetrics,
|
||||||
|
then the following [`remote_write`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) section
|
||||||
|
in Prometheus config can be used for replicating the collected data to `vm-az1` and `vm-az2` VictoriaMetrics instances:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
remote_write:
|
remote_write:
|
||||||
- url: http://<victoriametrics-addr-1>:8428/api/v1/write
|
- url: http://<vm-az1>:8428/api/v1/write
|
||||||
queue_config:
|
- url: http://<vm-az2>:8428/api/v1/write
|
||||||
max_samples_per_send: 10000
|
|
||||||
# ...
|
|
||||||
- url: http://<victoriametrics-addr-N>:8428/api/v1/write
|
|
||||||
queue_config:
|
|
||||||
max_samples_per_send: 10000
|
|
||||||
```
|
```
|
||||||
|
|
||||||
* Apply the updated config:
|
It is recommended to use [vmagent](https://docs.victoriametrics.com/vmagent.html) instead of Prometheus for highly loaded setups,
|
||||||
|
since it uses lower amounts of RAM, CPU and network bandwidth than Prometheus.
|
||||||
|
|
||||||
```console
|
If you use identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances for collecting the same data
|
||||||
kill -HUP `pidof prometheus`
|
and sending it to VictoriaMetrics, then do not forget enabling [deduplication](#deduplication) at VictoriaMetrics side.
|
||||||
```
|
|
||||||
|
|
||||||
It is recommended to use [vmagent](https://docs.victoriametrics.com/vmagent.html) instead of Prometheus for highly loaded setups.
|
|
||||||
|
|
||||||
* Now Prometheus should write data into all the configured `remote_write` urls in parallel.
|
|
||||||
* Set up [Promxy](https://github.com/jacksontj/promxy) in front of all the VictoriaMetrics replicas.
|
|
||||||
* Set up Prometheus datasource in Grafana that points to Promxy.
|
|
||||||
|
|
||||||
If you have Prometheus HA pairs with replicas `r1` and `r2` in each pair, then configure each `r1`
|
|
||||||
to write data to `victoriametrics-addr-1`, while each `r2` should write data to `victoriametrics-addr-2`.
|
|
||||||
|
|
||||||
Another option is to write data simultaneously from Prometheus HA pair to a pair of VictoriaMetrics instances
|
|
||||||
with the enabled de-duplication. See [this section](#deduplication) for details.
|
|
||||||
|
|
||||||
## Deduplication
|
## Deduplication
|
||||||
|
|
||||||
|
@ -2513,6 +2526,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
```
|
```
|
||||||
-bigMergeConcurrency int
|
-bigMergeConcurrency int
|
||||||
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
||||||
|
-blockcache.missesBeforeCaching int
|
||||||
|
The number of cache misses before putting the block into cache. Higher values may reduce indexdb/dataBlocks cache size at the cost of higher CPU and disk read usage (default 2)
|
||||||
-cacheExpireDuration duration
|
-cacheExpireDuration duration
|
||||||
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
||||||
-configAuthKey string
|
-configAuthKey string
|
||||||
|
@ -2640,6 +2655,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
||||||
-loggerLevel string
|
-loggerLevel string
|
||||||
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
||||||
|
-loggerMaxArgLen int
|
||||||
|
The maximum length of a single logged argument. Longer arguments are replaced with 'arg_start..arg_end', where 'arg_start' and 'arg_end' is prefix and suffix of the arg with the length not exceeding -loggerMaxArgLen / 2 (default 500)
|
||||||
-loggerOutput string
|
-loggerOutput string
|
||||||
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
||||||
-loggerTimezone string
|
-loggerTimezone string
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
{
|
{
|
||||||
"files": {
|
"files": {
|
||||||
"main.css": "./static/css/main.9a224445.css",
|
"main.css": "./static/css/main.d1313636.css",
|
||||||
"main.js": "./static/js/main.02178f4b.js",
|
"main.js": "./static/js/main.1919fefe.js",
|
||||||
"static/js/522.b5ae4365.chunk.js": "./static/js/522.b5ae4365.chunk.js",
|
"static/js/522.da77e7b3.chunk.js": "./static/js/522.da77e7b3.chunk.js",
|
||||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.957b90ab4cb4852eec26.md",
|
"static/media/MetricsQL.md": "./static/media/MetricsQL.8644fd7c964802dd34a9.md",
|
||||||
"index.html": "./index.html"
|
"index.html": "./index.html"
|
||||||
},
|
},
|
||||||
"entrypoints": [
|
"entrypoints": [
|
||||||
"static/css/main.9a224445.css",
|
"static/css/main.d1313636.css",
|
||||||
"static/js/main.02178f4b.js"
|
"static/js/main.1919fefe.js"
|
||||||
]
|
]
|
||||||
}
|
}
|
|
@ -1 +1 @@
|
||||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.02178f4b.js"></script><link href="./static/css/main.9a224445.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.1919fefe.js"></script><link href="./static/css/main.d1313636.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/static/css/main.d1313636.css
Normal file
1
app/vlselect/vmui/static/css/main.d1313636.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.1919fefe.js
Normal file
2
app/vlselect/vmui/static/js/main.1919fefe.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -1,11 +1,11 @@
|
||||||
---
|
---
|
||||||
sort: 14
|
sort: 23
|
||||||
weight: 14
|
weight: 23
|
||||||
title: MetricsQL
|
title: MetricsQL
|
||||||
menu:
|
menu:
|
||||||
docs:
|
docs:
|
||||||
parent: "victoriametrics"
|
parent: 'victoriametrics'
|
||||||
weight: 14
|
weight: 23
|
||||||
aliases:
|
aliases:
|
||||||
- /ExtendedPromQL.html
|
- /ExtendedPromQL.html
|
||||||
- /MetricsQL.html
|
- /MetricsQL.html
|
||||||
|
@ -21,7 +21,8 @@ However, there are some [intentional differences](https://medium.com/@romanhavro
|
||||||
|
|
||||||
[Standalone MetricsQL package](https://godoc.org/github.com/VictoriaMetrics/metricsql) can be used for parsing MetricsQL in external apps.
|
[Standalone MetricsQL package](https://godoc.org/github.com/VictoriaMetrics/metricsql) can be used for parsing MetricsQL in external apps.
|
||||||
|
|
||||||
If you are unfamiliar with PromQL, then it is suggested reading [this tutorial for beginners](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085).
|
If you are unfamiliar with PromQL, then it is suggested reading [this tutorial for beginners](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085)
|
||||||
|
and introduction into [basic querying via MetricsQL](https://docs.victoriametrics.com/keyConcepts.html#metricsql).
|
||||||
|
|
||||||
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
||||||
|
|
||||||
|
@ -109,7 +110,7 @@ The list of MetricsQL features on top of PromQL:
|
||||||
* [histogram_quantile](#histogram_quantile) accepts optional third arg - `boundsLabel`.
|
* [histogram_quantile](#histogram_quantile) accepts optional third arg - `boundsLabel`.
|
||||||
In this case it returns `lower` and `upper` bounds for the estimated percentile.
|
In this case it returns `lower` and `upper` bounds for the estimated percentile.
|
||||||
See [this issue for details](https://github.com/prometheus/prometheus/issues/5706).
|
See [this issue for details](https://github.com/prometheus/prometheus/issues/5706).
|
||||||
* `default` binary operator. `q1 default q2` fills gaps in `q1` with the corresponding values from `q2`.
|
* `default` binary operator. `q1 default q2` fills gaps in `q1` with the corresponding values from `q2`. See also [drop_empty_series](#drop_empty_series).
|
||||||
* `if` binary operator. `q1 if q2` removes values from `q1` for missing values from `q2`.
|
* `if` binary operator. `q1 if q2` removes values from `q1` for missing values from `q2`.
|
||||||
* `ifnot` binary operator. `q1 ifnot q2` removes values from `q1` for existing values from `q2`.
|
* `ifnot` binary operator. `q1 ifnot q2` removes values from `q1` for existing values from `q2`.
|
||||||
* `WITH` templates. This feature simplifies writing and managing complex queries.
|
* `WITH` templates. This feature simplifies writing and managing complex queries.
|
||||||
|
@ -531,7 +532,7 @@ See also [duration_over_time](#duration_over_time) and [lag](#lag).
|
||||||
`mad_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
|
`mad_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
|
||||||
over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||||
|
|
||||||
See also [mad](#mad) and [range_mad](#range_mad).
|
See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outlier_iqr_over_time).
|
||||||
|
|
||||||
#### max_over_time
|
#### max_over_time
|
||||||
|
|
||||||
|
@ -561,6 +562,18 @@ This function is supported by PromQL. See also [tmin_over_time](#tmin_over_time)
|
||||||
for raw samples on the given lookbehind window `d`. It is calculated individually per each time series returned
|
for raw samples on the given lookbehind window `d`. It is calculated individually per each time series returned
|
||||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). It is expected that raw sample values are discrete.
|
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). It is expected that raw sample values are discrete.
|
||||||
|
|
||||||
|
#### outlier_iqr_over_time
|
||||||
|
|
||||||
|
`outlier_iqr_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last sample on the given lookbehind window `d`
|
||||||
|
if its value is either smaller than the `q25-1.5*iqr` or bigger than `q75+1.5*iqr` where:
|
||||||
|
- `iqr` is an [Interquartile range](https://en.wikipedia.org/wiki/Interquartile_range) over raw samples on the lookbehind window `d`
|
||||||
|
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) over raw samples on the lookbehind window `d`.
|
||||||
|
|
||||||
|
The `outlier_iqr_over_time()` is useful for detecting anomalies in gauge values based on the previous history of values.
|
||||||
|
For example, `outlier_iqr_over_time(memory_usage_bytes[1h])` triggers when `memory_usage_bytes` suddenly goes outside the usual value range for the last 24 hours.
|
||||||
|
|
||||||
|
See also [outliers_iqr](#outliers_iqr).
|
||||||
|
|
||||||
#### predict_linear
|
#### predict_linear
|
||||||
|
|
||||||
`predict_linear(series_selector[d], t)` is a [rollup function](#rollup-functions), which calculates the value `t` seconds in the future using
|
`predict_linear(series_selector[d], t)` is a [rollup function](#rollup-functions), which calculates the value `t` seconds in the future using
|
||||||
|
@ -865,7 +878,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
||||||
|
|
||||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||||
|
|
||||||
See also [zscore](#zscore) and [range_trim_zscore](#range_trim_zscore).
|
See also [zscore](#zscore), [range_trim_zscore](#range_trim_zscore) and [outlier_iqr_over_time](#outlier_iqr_over_time).
|
||||||
|
|
||||||
|
|
||||||
### Transform functions
|
### Transform functions
|
||||||
|
@ -1055,6 +1068,17 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
||||||
|
|
||||||
This function is supported by PromQL. See also [rad](#rad).
|
This function is supported by PromQL. See also [rad](#rad).
|
||||||
|
|
||||||
|
#### drop_empty_series
|
||||||
|
|
||||||
|
`drop_empty_series(q)` is a [transform function](#transform-functions), which drops empty series from `q`.
|
||||||
|
|
||||||
|
This function can be used when `default` operator should be applied only to non-empty series. For example,
|
||||||
|
`drop_empty_series(temperature < 30) default 42` returns series, which have at least a single sample smaller than 30 on the selected time range,
|
||||||
|
while filling gaps in the returned series with 42.
|
||||||
|
|
||||||
|
On the other hand `(temperature < 30) default 40` returns all the `temperature` series, even if they have no samples smaller than 30,
|
||||||
|
by replacing all the values bigger or equal to 30 with 40.
|
||||||
|
|
||||||
#### end
|
#### end
|
||||||
|
|
||||||
`end()` is a [transform function](#transform-functions), which returns the unix timestamp in seconds for the last point.
|
`end()` is a [transform function](#transform-functions), which returns the unix timestamp in seconds for the last point.
|
||||||
|
@ -1591,7 +1615,7 @@ which maps `label` values from `src_*` to `dst*` for all the time series returne
|
||||||
which drops time series from `q` with `label` not matching the given `regexp`.
|
which drops time series from `q` with `label` not matching the given `regexp`.
|
||||||
This function can be useful after [rollup](#rollup)-like functions, which may return multiple time series for every input series.
|
This function can be useful after [rollup](#rollup)-like functions, which may return multiple time series for every input series.
|
||||||
|
|
||||||
See also [label_mismatch](#label_mismatch).
|
See also [label_mismatch](#label_mismatch) and [labels_equal](#labels_equal).
|
||||||
|
|
||||||
#### label_mismatch
|
#### label_mismatch
|
||||||
|
|
||||||
|
@ -1599,7 +1623,7 @@ See also [label_mismatch](#label_mismatch).
|
||||||
which drops time series from `q` with `label` matching the given `regexp`.
|
which drops time series from `q` with `label` matching the given `regexp`.
|
||||||
This function can be useful after [rollup](#rollup)-like functions, which may return multiple time series for every input series.
|
This function can be useful after [rollup](#rollup)-like functions, which may return multiple time series for every input series.
|
||||||
|
|
||||||
See also [label_match](#label_match).
|
See also [label_match](#label_match) and [labels_equal](#labels_equal).
|
||||||
|
|
||||||
#### label_move
|
#### label_move
|
||||||
|
|
||||||
|
@ -1642,23 +1666,30 @@ for the given `label` for every time series returned by `q`.
|
||||||
For example, if `label_value(foo, "bar")` is applied to `foo{bar="1.234"}`, then it will return a time series
|
For example, if `label_value(foo, "bar")` is applied to `foo{bar="1.234"}`, then it will return a time series
|
||||||
`foo{bar="1.234"}` with `1.234` value. Function will return no data for non-numeric label values.
|
`foo{bar="1.234"}` with `1.234` value. Function will return no data for non-numeric label values.
|
||||||
|
|
||||||
|
#### labels_equal
|
||||||
|
|
||||||
|
`labels_equal(q, "label1", "label2", ...)` is [label manipulation function](#label-manipulation-functions), which returns `q` series with identical values for the listed labels
|
||||||
|
"label1", "label2", etc.
|
||||||
|
|
||||||
|
See also [label_match](#label_match) and [label_mismatch](#label_mismatch).
|
||||||
|
|
||||||
#### sort_by_label
|
#### sort_by_label
|
||||||
|
|
||||||
`sort_by_label(q, label1, ... labelN)` is [label manipulation function](#label-manipulation-functions), which sorts series in ascending order by the given set of labels.
|
`sort_by_label(q, "label1", ... "labelN")` is [label manipulation function](#label-manipulation-functions), which sorts series in ascending order by the given set of labels.
|
||||||
For example, `sort_by_label(foo, "bar")` would sort `foo` series by values of the label `bar` in these series.
|
For example, `sort_by_label(foo, "bar")` would sort `foo` series by values of the label `bar` in these series.
|
||||||
|
|
||||||
See also [sort_by_label_desc](#sort_by_label_desc) and [sort_by_label_numeric](#sort_by_label_numeric).
|
See also [sort_by_label_desc](#sort_by_label_desc) and [sort_by_label_numeric](#sort_by_label_numeric).
|
||||||
|
|
||||||
#### sort_by_label_desc
|
#### sort_by_label_desc
|
||||||
|
|
||||||
`sort_by_label_desc(q, label1, ... labelN)` is [label manipulation function](#label-manipulation-functions), which sorts series in descending order by the given set of labels.
|
`sort_by_label_desc(q, "label1", ... "labelN")` is [label manipulation function](#label-manipulation-functions), which sorts series in descending order by the given set of labels.
|
||||||
For example, `sort_by_label(foo, "bar")` would sort `foo` series by values of the label `bar` in these series.
|
For example, `sort_by_label(foo, "bar")` would sort `foo` series by values of the label `bar` in these series.
|
||||||
|
|
||||||
See also [sort_by_label](#sort_by_label) and [sort_by_label_numeric_desc](#sort_by_label_numeric_desc).
|
See also [sort_by_label](#sort_by_label) and [sort_by_label_numeric_desc](#sort_by_label_numeric_desc).
|
||||||
|
|
||||||
#### sort_by_label_numeric
|
#### sort_by_label_numeric
|
||||||
|
|
||||||
`sort_by_label_numeric(q, label1, ... labelN)` is [label manipulation function](#label-manipulation-functions), which sorts series in ascending order by the given set of labels
|
`sort_by_label_numeric(q, "label1", ... "labelN")` is [label manipulation function](#label-manipulation-functions), which sorts series in ascending order by the given set of labels
|
||||||
using [numeric sort](https://www.gnu.org/software/coreutils/manual/html_node/Version-sort-is-not-the-same-as-numeric-sort.html).
|
using [numeric sort](https://www.gnu.org/software/coreutils/manual/html_node/Version-sort-is-not-the-same-as-numeric-sort.html).
|
||||||
For example, if `foo` series have `bar` label with values `1`, `101`, `15` and `2`, then `sort_by_label_numeric(foo, "bar")` would return series
|
For example, if `foo` series have `bar` label with values `1`, `101`, `15` and `2`, then `sort_by_label_numeric(foo, "bar")` would return series
|
||||||
in the following order of `bar` label values: `1`, `2`, `15` and `101`.
|
in the following order of `bar` label values: `1`, `2`, `15` and `101`.
|
||||||
|
@ -1667,7 +1698,7 @@ See also [sort_by_label_numeric_desc](#sort_by_label_numeric_desc) and [sort_by_
|
||||||
|
|
||||||
#### sort_by_label_numeric_desc
|
#### sort_by_label_numeric_desc
|
||||||
|
|
||||||
`sort_by_label_numeric_desc(q, label1, ... labelN)` is [label manipulation function](#label-manipulation-functions), which sorts series in descending order
|
`sort_by_label_numeric_desc(q, "label1", ... "labelN")` is [label manipulation function](#label-manipulation-functions), which sorts series in descending order
|
||||||
by the given set of labels using [numeric sort](https://www.gnu.org/software/coreutils/manual/html_node/Version-sort-is-not-the-same-as-numeric-sort.html).
|
by the given set of labels using [numeric sort](https://www.gnu.org/software/coreutils/manual/html_node/Version-sort-is-not-the-same-as-numeric-sort.html).
|
||||||
For example, if `foo` series have `bar` label with values `1`, `101`, `15` and `2`, then `sort_by_label_numeric(foo, "bar")`
|
For example, if `foo` series have `bar` label with values `1`, `101`, `15` and `2`, then `sort_by_label_numeric(foo, "bar")`
|
||||||
would return series in the following order of `bar` label values: `101`, `15`, `2` and `1`.
|
would return series in the following order of `bar` label values: `101`, `15`, `2` and `1`.
|
||||||
|
@ -1839,20 +1870,33 @@ This function is supported by PromQL.
|
||||||
`mode(q) by (group_labels)` is [aggregate function](#aggregate-functions), which returns [mode](https://en.wikipedia.org/wiki/Mode_(statistics))
|
`mode(q) by (group_labels)` is [aggregate function](#aggregate-functions), which returns [mode](https://en.wikipedia.org/wiki/Mode_(statistics))
|
||||||
per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp.
|
per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp.
|
||||||
|
|
||||||
|
#### outliers_iqr
|
||||||
|
|
||||||
|
`outliers_iqr(q)` is [aggregate function](#aggregate-functions), which returns time series from `q` with at least a single point
|
||||||
|
outside e.g. [Interquartile range outlier bounds](https://en.wikipedia.org/wiki/Interquartile_range) `[q25-1.5*iqr .. q75+1.5*iqr]`
|
||||||
|
comparing to other time series at the given point, where:
|
||||||
|
- `iqr` is an [Interquartile range](https://en.wikipedia.org/wiki/Interquartile_range) calculated independently per each point on the graph across `q` series.
|
||||||
|
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) calculated independently per each point on the graph across `q` series.
|
||||||
|
|
||||||
|
The `outliers_iqr()` is useful for detecting anomalous series in the group of series. For example, `outliers_iqr(temperature) by (country)` returns
|
||||||
|
per-country series with anomalous outlier values comparing to the rest of per-country series.
|
||||||
|
|
||||||
|
See also [outliers_mad](#outliers_mad), [outliersk](#outliersk) and [outlier_iqr_over_time](#outlier_iqr_over_time).
|
||||||
|
|
||||||
#### outliers_mad
|
#### outliers_mad
|
||||||
|
|
||||||
`outliers_mad(tolerance, q)` is [aggregate function](#aggregate-functions), which returns time series from `q` with at least
|
`outliers_mad(tolerance, q)` is [aggregate function](#aggregate-functions), which returns time series from `q` with at least
|
||||||
a single point outside [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) (aka MAD) multiplied by `tolerance`.
|
a single point outside [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) (aka MAD) multiplied by `tolerance`.
|
||||||
E.g. it returns time series with at least a single point below `median(q) - mad(q)` or a single point above `median(q) + mad(q)`.
|
E.g. it returns time series with at least a single point below `median(q) - mad(q)` or a single point above `median(q) + mad(q)`.
|
||||||
|
|
||||||
See also [outliersk](#outliersk) and [mad](#mad).
|
See also [outliers_iqr](#outliers_iqr), [outliersk](#outliersk) and [mad](#mad).
|
||||||
|
|
||||||
#### outliersk
|
#### outliersk
|
||||||
|
|
||||||
`outliersk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` time series with the biggest standard deviation (aka outliers)
|
`outliersk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` time series with the biggest standard deviation (aka outliers)
|
||||||
out of time series returned by `q`.
|
out of time series returned by `q`.
|
||||||
|
|
||||||
See also [outliers_mad](#outliers_mad).
|
See also [outliers_iqr](#outliers_iqr) and [outliers_mad](#outliers_mad).
|
||||||
|
|
||||||
#### quantile
|
#### quantile
|
||||||
|
|
||||||
|
@ -1972,7 +2016,7 @@ See also [bottomk_min](#bottomk_min).
|
||||||
per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp.
|
per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp.
|
||||||
This function is useful for detecting anomalies in the group of related time series.
|
This function is useful for detecting anomalies in the group of related time series.
|
||||||
|
|
||||||
See also [zscore_over_time](#zscore_over_time) and [range_trim_zscore](#range_trim_zscore).
|
See also [zscore_over_time](#zscore_over_time), [range_trim_zscore](#range_trim_zscore) and [outliers_iqr](#outliers_iqr).
|
||||||
|
|
||||||
## Subqueries
|
## Subqueries
|
||||||
|
|
|
@ -60,7 +60,7 @@ and sending the data to the Prometheus-compatible remote storage:
|
||||||
Example command for writing the data received via [supported push-based protocols](#how-to-push-data-to-vmagent)
|
Example command for writing the data received via [supported push-based protocols](#how-to-push-data-to-vmagent)
|
||||||
to [single-node VictoriaMetrics](https://docs.victoriametrics.com/) located at `victoria-metrics-host:8428`:
|
to [single-node VictoriaMetrics](https://docs.victoriametrics.com/) located at `victoria-metrics-host:8428`:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ the data to [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-V
|
||||||
|
|
||||||
Example command for scraping Prometheus targets and writing the data to single-node VictoriaMetrics:
|
Example command for scraping Prometheus targets and writing the data to single-node VictoriaMetrics:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
/path/to/vmagent -promscrape.config=/path/to/prometheus.yml -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
/path/to/vmagent -promscrape.config=/path/to/prometheus.yml -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ additionally to pull-based Prometheus-compatible targets' scraping:
|
||||||
|
|
||||||
* Sending `SIGHUP` signal to `vmagent` process:
|
* Sending `SIGHUP` signal to `vmagent` process:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
kill -SIGHUP `pidof vmagent`
|
kill -SIGHUP `pidof vmagent`
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ in the `scrape_config_files` section of `-promscrape.config` file. For example,
|
||||||
loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file
|
loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file
|
||||||
and from `https://config-server/scrape_config.yml` url:
|
and from `https://config-server/scrape_config.yml` url:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_config_files:
|
scrape_config_files:
|
||||||
- configs/*.yml
|
- configs/*.yml
|
||||||
- single_scrape_config.yml
|
- single_scrape_config.yml
|
||||||
|
@ -335,7 +335,7 @@ scrape_config_files:
|
||||||
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||||
There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["vmagent:8429"]
|
- targets: ["vmagent:8429"]
|
||||||
|
@ -375,7 +375,7 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
|
||||||
For example, the following command starts `vmagent`, which adds `{datacenter="foobar"}` label to all the metrics pushed
|
For example, the following command starts `vmagent`, which adds `{datacenter="foobar"}` label to all the metrics pushed
|
||||||
to all the configured remote storage systems (all the `-remoteWrite.url` flag values):
|
to all the configured remote storage systems (all the `-remoteWrite.url` flag values):
|
||||||
|
|
||||||
```
|
```bash
|
||||||
/path/to/vmagent -remoteWrite.label=datacenter=foobar ...
|
/path/to/vmagent -remoteWrite.label=datacenter=foobar ...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -740,7 +740,7 @@ stream parsing mode can be explicitly enabled in the following places:
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: 'big-federate'
|
- job_name: 'big-federate'
|
||||||
stream_parse: true
|
stream_parse: true
|
||||||
|
@ -767,7 +767,7 @@ Each `vmagent` instance in the cluster must use identical `-promscrape.config` f
|
||||||
in the range `0 ... N-1`, where `N` is the number of `vmagent` instances in the cluster specified via `-promscrape.cluster.membersCount`.
|
in the range `0 ... N-1`, where `N` is the number of `vmagent` instances in the cluster specified via `-promscrape.cluster.membersCount`.
|
||||||
For example, the following commands spread scrape targets among a cluster of two `vmagent` instances:
|
For example, the following commands spread scrape targets among a cluster of two `vmagent` instances:
|
||||||
|
|
||||||
```
|
```text
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
||||||
```
|
```
|
||||||
|
@ -779,7 +779,7 @@ By default, each scrape target is scraped only by a single `vmagent` instance in
|
||||||
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
|
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
|
||||||
start a cluster of three `vmagent` instances, where each target is scraped by two `vmagent` instances:
|
start a cluster of three `vmagent` instances, where each target is scraped by two `vmagent` instances:
|
||||||
|
|
||||||
```
|
```text
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=2 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=2 -promscrape.config=/path/to/config.yml ...
|
||||||
|
@ -793,7 +793,7 @@ The `-promscrape.cluster.memberLabel` command-line flag allows specifying a name
|
||||||
The value of the `member num` label is set to `-promscrape.cluster.memberNum`. For example, the following config instructs adding `vmagent_instance="0"` label
|
The value of the `member num` label is set to `-promscrape.cluster.memberNum`. For example, the following config instructs adding `vmagent_instance="0"` label
|
||||||
to all the metrics scraped by the given `vmagent` instance:
|
to all the metrics scraped by the given `vmagent` instance:
|
||||||
|
|
||||||
```
|
```text
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.cluster.memberLabel=vmagent_instance
|
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.cluster.memberLabel=vmagent_instance
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -820,7 +820,7 @@ See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679)
|
||||||
`vmagent` supports scraping targets via http, https and socks5 proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs
|
`vmagent` supports scraping targets via http, https and socks5 proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs
|
||||||
target scraping via https proxy at `https://proxy-addr:1234`:
|
target scraping via https proxy at `https://proxy-addr:1234`:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
proxy_url: https://proxy-addr:1234
|
proxy_url: https://proxy-addr:1234
|
||||||
|
@ -837,7 +837,7 @@ Proxy can be configured with the following optional settings:
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
proxy_url: https://proxy-addr:1234
|
proxy_url: https://proxy-addr:1234
|
||||||
|
@ -987,7 +987,7 @@ If you have suggestions for improvements or have found a bug - please open an is
|
||||||
* By default `vmagent` evenly spreads scrape load in time. If a particular scrape target must be scraped at the beginning of some interval,
|
* By default `vmagent` evenly spreads scrape load in time. If a particular scrape target must be scraped at the beginning of some interval,
|
||||||
then `scrape_align_interval` option must be used. For example, the following config aligns hourly scrapes to the beginning of hour:
|
then `scrape_align_interval` option must be used. For example, the following config aligns hourly scrapes to the beginning of hour:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
scrape_interval: 1h
|
scrape_interval: 1h
|
||||||
|
@ -997,7 +997,7 @@ If you have suggestions for improvements or have found a bug - please open an is
|
||||||
* By default `vmagent` evenly spreads scrape load in time. If a particular scrape target must be scraped at specific offset, then `scrape_offset` option must be used.
|
* By default `vmagent` evenly spreads scrape load in time. If a particular scrape target must be scraped at specific offset, then `scrape_offset` option must be used.
|
||||||
For example, the following config instructs `vmagent` to scrape the target at 10 seconds of every minute:
|
For example, the following config instructs `vmagent` to scrape the target at 10 seconds of every minute:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
scrape_interval: 1m
|
scrape_interval: 1m
|
||||||
|
@ -1010,14 +1010,14 @@ If you have suggestions for improvements or have found a bug - please open an is
|
||||||
|
|
||||||
The following relabeling rule may be added to `relabel_configs` section in order to filter out pods with unneeded ports:
|
The following relabeling rule may be added to `relabel_configs` section in order to filter out pods with unneeded ports:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
- action: keep_if_equal
|
- action: keep_if_equal
|
||||||
source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_container_port_number]
|
source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_container_port_number]
|
||||||
```
|
```
|
||||||
|
|
||||||
The following relabeling rule may be added to `relabel_configs` section in order to filter out init container pods:
|
The following relabeling rule may be added to `relabel_configs` section in order to filter out init container pods:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
- action: drop
|
- action: drop
|
||||||
source_labels: [__meta_kubernetes_pod_container_init]
|
source_labels: [__meta_kubernetes_pod_container_init]
|
||||||
regex: true
|
regex: true
|
||||||
|
@ -1061,7 +1061,7 @@ For example, `-kafka.consumer.topic.brokers=host1:9092;host2:9092`.
|
||||||
The following command starts `vmagent`, which reads metrics in InfluxDB line protocol format from Kafka broker at `localhost:9092`
|
The following command starts `vmagent`, which reads metrics in InfluxDB line protocol format from Kafka broker at `localhost:9092`
|
||||||
from the topic `metrics-by-telegraf` and sends them to remote storage at `http://localhost:8428/api/v1/write`:
|
from the topic `metrics-by-telegraf` and sends them to remote storage at `http://localhost:8428/api/v1/write`:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
./bin/vmagent -remoteWrite.url=http://localhost:8428/api/v1/write \
|
./bin/vmagent -remoteWrite.url=http://localhost:8428/api/v1/write \
|
||||||
-kafka.consumer.topic.brokers=localhost:9092 \
|
-kafka.consumer.topic.brokers=localhost:9092 \
|
||||||
-kafka.consumer.topic.format=influx \
|
-kafka.consumer.topic.format=influx \
|
||||||
|
@ -1084,7 +1084,7 @@ These command-line flags are available only in [enterprise](https://docs.victori
|
||||||
which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) page
|
which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) page
|
||||||
(see `vmutils-...-enterprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
(see `vmutils-...-enterprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||||
|
|
||||||
```
|
```text
|
||||||
-kafka.consumer.topic array
|
-kafka.consumer.topic array
|
||||||
Kafka topic names for data consumption.
|
Kafka topic names for data consumption.
|
||||||
Supports an array of values separated by comma or specified via multiple flags.
|
Supports an array of values separated by comma or specified via multiple flags.
|
||||||
|
@ -1129,13 +1129,13 @@ Two types of auth are supported:
|
||||||
|
|
||||||
* sasl with username and password:
|
* sasl with username and password:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
./bin/vmagent -remoteWrite.url=kafka://localhost:9092/?topic=prom-rw&security.protocol=SASL_SSL&sasl.mechanisms=PLAIN -remoteWrite.basicAuth.username=user -remoteWrite.basicAuth.password=password
|
./bin/vmagent -remoteWrite.url=kafka://localhost:9092/?topic=prom-rw&security.protocol=SASL_SSL&sasl.mechanisms=PLAIN -remoteWrite.basicAuth.username=user -remoteWrite.basicAuth.password=password
|
||||||
```
|
```
|
||||||
|
|
||||||
* tls certificates:
|
* tls certificates:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
./bin/vmagent -remoteWrite.url=kafka://localhost:9092/?topic=prom-rw&security.protocol=SSL -remoteWrite.tlsCAFile=/opt/ca.pem -remoteWrite.tlsCertFile=/opt/cert.pem -remoteWrite.tlsKeyFile=/opt/key.pem
|
./bin/vmagent -remoteWrite.url=kafka://localhost:9092/?topic=prom-rw&security.protocol=SSL -remoteWrite.tlsCAFile=/opt/ca.pem -remoteWrite.tlsCertFile=/opt/cert.pem -remoteWrite.tlsKeyFile=/opt/key.pem
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1166,7 +1166,7 @@ The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package-vmagent`.
|
||||||
The base docker image is [alpine](https://hub.docker.com/_/alpine) but it is possible to use any other base image
|
The base docker image is [alpine](https://hub.docker.com/_/alpine) but it is possible to use any other base image
|
||||||
by setting it via `<ROOT_IMAGE>` environment variable. For example, the following command builds the image on top of [scratch](https://hub.docker.com/_/scratch) image:
|
by setting it via `<ROOT_IMAGE>` environment variable. For example, the following command builds the image on top of [scratch](https://hub.docker.com/_/scratch) image:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
ROOT_IMAGE=scratch make package-vmagent
|
ROOT_IMAGE=scratch make package-vmagent
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1194,7 +1194,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
curl http://0.0.0.0:8429/debug/pprof/heap > mem.pprof
|
curl http://0.0.0.0:8429/debug/pprof/heap > mem.pprof
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1204,7 +1204,7 @@ curl http://0.0.0.0:8429/debug/pprof/heap > mem.pprof
|
||||||
|
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
curl http://0.0.0.0:8429/debug/pprof/profile > cpu.pprof
|
curl http://0.0.0.0:8429/debug/pprof/profile > cpu.pprof
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1220,7 +1220,7 @@ It is safe sharing the collected profiles from security point of view, since the
|
||||||
|
|
||||||
`vmagent` can be fine-tuned with various command-line flags. Run `./vmagent -help` in order to see the full list of these flags with their descriptions and default values:
|
`vmagent` can be fine-tuned with various command-line flags. Run `./vmagent -help` in order to see the full list of these flags with their descriptions and default values:
|
||||||
|
|
||||||
```
|
```text
|
||||||
./vmagent -help
|
./vmagent -help
|
||||||
|
|
||||||
vmagent collects metrics data via popular data ingestion protocols and routes them to VictoriaMetrics.
|
vmagent collects metrics data via popular data ingestion protocols and routes them to VictoriaMetrics.
|
||||||
|
|
|
@ -1232,7 +1232,7 @@ The shortlist of configuration flags is the following:
|
||||||
-remoteWrite.bearerTokenFile string
|
-remoteWrite.bearerTokenFile string
|
||||||
Optional path to bearer token file to use for -remoteWrite.url.
|
Optional path to bearer token file to use for -remoteWrite.url.
|
||||||
-remoteWrite.concurrency int
|
-remoteWrite.concurrency int
|
||||||
Defines number of writers for concurrent writing into remote querier (default 1)
|
Defines number of writers for concurrent writing into remote write endpoint (default 1)
|
||||||
-remoteWrite.disablePathAppend
|
-remoteWrite.disablePathAppend
|
||||||
Whether to disable automatic appending of '/api/v1/write' path to the configured -remoteWrite.url.
|
Whether to disable automatic appending of '/api/v1/write' path to the configured -remoteWrite.url.
|
||||||
-remoteWrite.flushInterval duration
|
-remoteWrite.flushInterval duration
|
||||||
|
|
|
@ -3,6 +3,7 @@ package remotewrite
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -117,12 +118,19 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
|
||||||
// Push adds timeseries into queue for writing into remote storage.
|
// Push adds timeseries into queue for writing into remote storage.
|
||||||
// Push returns and error if client is stopped or if queue is full.
|
// Push returns and error if client is stopped or if queue is full.
|
||||||
func (c *Client) Push(s prompbmarshal.TimeSeries) error {
|
func (c *Client) Push(s prompbmarshal.TimeSeries) error {
|
||||||
|
rwTotal.Inc()
|
||||||
select {
|
select {
|
||||||
case <-c.doneCh:
|
case <-c.doneCh:
|
||||||
|
rwErrors.Inc()
|
||||||
|
droppedRows.Add(len(s.Samples))
|
||||||
|
droppedBytes.Add(s.Size())
|
||||||
return fmt.Errorf("client is closed")
|
return fmt.Errorf("client is closed")
|
||||||
case c.input <- s:
|
case c.input <- s:
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
|
rwErrors.Inc()
|
||||||
|
droppedRows.Add(len(s.Samples))
|
||||||
|
droppedBytes.Add(s.Size())
|
||||||
return fmt.Errorf("failed to push timeseries - queue is full (%d entries). "+
|
return fmt.Errorf("failed to push timeseries - queue is full (%d entries). "+
|
||||||
"Queue size is controlled by -remoteWrite.maxQueueSize flag",
|
"Queue size is controlled by -remoteWrite.maxQueueSize flag",
|
||||||
c.maxQueueSize)
|
c.maxQueueSize)
|
||||||
|
@ -181,11 +189,14 @@ func (c *Client) run(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
rwErrors = metrics.NewCounter(`vmalert_remotewrite_errors_total`)
|
||||||
|
rwTotal = metrics.NewCounter(`vmalert_remotewrite_total`)
|
||||||
|
|
||||||
sentRows = metrics.NewCounter(`vmalert_remotewrite_sent_rows_total`)
|
sentRows = metrics.NewCounter(`vmalert_remotewrite_sent_rows_total`)
|
||||||
sentBytes = metrics.NewCounter(`vmalert_remotewrite_sent_bytes_total`)
|
sentBytes = metrics.NewCounter(`vmalert_remotewrite_sent_bytes_total`)
|
||||||
sendDuration = metrics.NewFloatCounter(`vmalert_remotewrite_send_duration_seconds_total`)
|
|
||||||
droppedRows = metrics.NewCounter(`vmalert_remotewrite_dropped_rows_total`)
|
droppedRows = metrics.NewCounter(`vmalert_remotewrite_dropped_rows_total`)
|
||||||
droppedBytes = metrics.NewCounter(`vmalert_remotewrite_dropped_bytes_total`)
|
droppedBytes = metrics.NewCounter(`vmalert_remotewrite_dropped_bytes_total`)
|
||||||
|
sendDuration = metrics.NewFloatCounter(`vmalert_remotewrite_send_duration_seconds_total`)
|
||||||
bufferFlushDuration = metrics.NewHistogram(`vmalert_remotewrite_flush_duration_seconds`)
|
bufferFlushDuration = metrics.NewHistogram(`vmalert_remotewrite_flush_duration_seconds`)
|
||||||
|
|
||||||
_ = metrics.NewGauge(`vmalert_remotewrite_concurrency`, func() float64 {
|
_ = metrics.NewGauge(`vmalert_remotewrite_concurrency`, func() float64 {
|
||||||
|
@ -222,6 +233,11 @@ func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
|
||||||
L:
|
L:
|
||||||
for attempts := 0; ; attempts++ {
|
for attempts := 0; ; attempts++ {
|
||||||
err := c.send(ctx, b)
|
err := c.send(ctx, b)
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Something in the middle between client and destination might be closing
|
||||||
|
// the connection. So we do a one more attempt in hope request will succeed.
|
||||||
|
err = c.send(ctx, b)
|
||||||
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sentRows.Add(len(wr.Timeseries))
|
sentRows.Add(len(wr.Timeseries))
|
||||||
sentBytes.Add(len(b))
|
sentBytes.Add(len(b))
|
||||||
|
@ -259,6 +275,7 @@ L:
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rwErrors.Inc()
|
||||||
droppedRows.Add(len(wr.Timeseries))
|
droppedRows.Add(len(wr.Timeseries))
|
||||||
droppedBytes.Add(len(b))
|
droppedBytes.Add(len(b))
|
||||||
logger.Errorf("attempts to send remote-write request failed - dropping %d time series",
|
logger.Errorf("attempts to send remote-write request failed - dropping %d time series",
|
||||||
|
@ -303,7 +320,7 @@ func (c *Client) send(ctx context.Context, data []byte) error {
|
||||||
// Prometheus remote Write compatible receivers MUST
|
// Prometheus remote Write compatible receivers MUST
|
||||||
switch resp.StatusCode / 100 {
|
switch resp.StatusCode / 100 {
|
||||||
case 2:
|
case 2:
|
||||||
// respond with a HTTP 2xx status code when the write is successful.
|
// respond with HTTP 2xx status code when write is successful.
|
||||||
return nil
|
return nil
|
||||||
case 4:
|
case 4:
|
||||||
if resp.StatusCode != http.StatusTooManyRequests {
|
if resp.StatusCode != http.StatusTooManyRequests {
|
||||||
|
|
|
@ -30,7 +30,7 @@ var (
|
||||||
|
|
||||||
maxQueueSize = flag.Int("remoteWrite.maxQueueSize", 1e5, "Defines the max number of pending datapoints to remote write endpoint")
|
maxQueueSize = flag.Int("remoteWrite.maxQueueSize", 1e5, "Defines the max number of pending datapoints to remote write endpoint")
|
||||||
maxBatchSize = flag.Int("remoteWrite.maxBatchSize", 1e3, "Defines max number of timeseries to be flushed at once")
|
maxBatchSize = flag.Int("remoteWrite.maxBatchSize", 1e3, "Defines max number of timeseries to be flushed at once")
|
||||||
concurrency = flag.Int("remoteWrite.concurrency", 1, "Defines number of writers for concurrent writing into remote querier")
|
concurrency = flag.Int("remoteWrite.concurrency", 1, "Defines number of writers for concurrent writing into remote write endpoint")
|
||||||
flushInterval = flag.Duration("remoteWrite.flushInterval", 5*time.Second, "Defines interval of flushes to remote write endpoint")
|
flushInterval = flag.Duration("remoteWrite.flushInterval", 5*time.Second, "Defines interval of flushes to remote write endpoint")
|
||||||
|
|
||||||
tlsInsecureSkipVerify = flag.Bool("remoteWrite.tlsInsecureSkipVerify", false, "Whether to skip tls verification when connecting to -remoteWrite.url")
|
tlsInsecureSkipVerify = flag.Bool("remoteWrite.tlsInsecureSkipVerify", false, "Whether to skip tls verification when connecting to -remoteWrite.url")
|
||||||
|
|
|
@ -663,9 +663,6 @@ var (
|
||||||
|
|
||||||
execTotal = metrics.NewCounter(`vmalert_execution_total`)
|
execTotal = metrics.NewCounter(`vmalert_execution_total`)
|
||||||
execErrors = metrics.NewCounter(`vmalert_execution_errors_total`)
|
execErrors = metrics.NewCounter(`vmalert_execution_errors_total`)
|
||||||
|
|
||||||
remoteWriteErrors = metrics.NewCounter(`vmalert_remotewrite_errors_total`)
|
|
||||||
remoteWriteTotal = metrics.NewCounter(`vmalert_remotewrite_total`)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDuration time.Duration, limit int) error {
|
func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDuration time.Duration, limit int) error {
|
||||||
|
@ -686,9 +683,7 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||||
pushToRW := func(tss []prompbmarshal.TimeSeries) error {
|
pushToRW := func(tss []prompbmarshal.TimeSeries) error {
|
||||||
var lastErr error
|
var lastErr error
|
||||||
for _, ts := range tss {
|
for _, ts := range tss {
|
||||||
remoteWriteTotal.Inc()
|
|
||||||
if err := e.Rw.Push(ts); err != nil {
|
if err := e.Rw.Push(ts); err != nil {
|
||||||
remoteWriteErrors.Inc()
|
|
||||||
lastErr = fmt.Errorf("rule %q: remote write failure: %w", r, err)
|
lastErr = fmt.Errorf("rule %q: remote write failure: %w", r, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,38 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi
|
||||||
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
||||||
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
||||||
|
|
||||||
|
## Dropping request path prefix
|
||||||
|
|
||||||
|
By default `vmauth` doesn't drop the path prefix from the original request when proxying the request to the matching backend.
|
||||||
|
Sometimes it is needed to drop path prefix before routing the request to the backend. This can be done by specifying the number of `/`-delimited
|
||||||
|
prefix parts to drop from the request path via `drop_src_path_prefix_parts` option at `url_map` level or at `user` level.
|
||||||
|
|
||||||
|
For example, if you need to serve requests to [vmalert](https://docs.victoriametrics.com/vmalert.html) at `/vmalert/` path prefix,
|
||||||
|
while serving requests to [vmagent](https://docs.victoriametrics.com/vmagent.html) at `/vmagent/` path prefix for a particular user,
|
||||||
|
then the following [-auth.config](#auth-config) can be used:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
users:
|
||||||
|
- username: foo
|
||||||
|
url_map:
|
||||||
|
|
||||||
|
# proxy all the requests, which start with `/vmagent/`, to vmagent backend
|
||||||
|
- src_paths:
|
||||||
|
- "/vmagent/.+"
|
||||||
|
|
||||||
|
# drop /vmagent/ path prefix from the original request before proxying it to url_prefix.
|
||||||
|
drop_src_path_prefix_parts: 1
|
||||||
|
url_prefix: "http://vmagent-backend:8429/"
|
||||||
|
|
||||||
|
# proxy all the requests, which start with `/vmalert`, to vmalert backend
|
||||||
|
- src_paths:
|
||||||
|
- "/vmalert/.+"
|
||||||
|
|
||||||
|
# drop /vmalert/ path prefix from the original request before proxying it to url_prefix.
|
||||||
|
drop_src_path_prefix_parts: 1
|
||||||
|
url_prefix: "http://vmalert-backend:8880/"
|
||||||
|
```
|
||||||
|
|
||||||
## Load balancing
|
## Load balancing
|
||||||
|
|
||||||
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls.
|
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls.
|
||||||
|
@ -101,6 +133,31 @@ The following [metrics](#monitoring) related to concurrency limits are exposed b
|
||||||
- `vmauth_unauthorized_user_concurrent_requests_limit_reached_total` - the number of requests rejected with `429 Too Many Requests` error
|
- `vmauth_unauthorized_user_concurrent_requests_limit_reached_total` - the number of requests rejected with `429 Too Many Requests` error
|
||||||
because of the concurrency limit has been reached for unauthorized users (if `unauthorized_user` section is used).
|
because of the concurrency limit has been reached for unauthorized users (if `unauthorized_user` section is used).
|
||||||
|
|
||||||
|
## Backend TLS setup
|
||||||
|
|
||||||
|
By default `vmauth` uses system settings when performing requests to HTTPS backends specified via `url_prefix` option
|
||||||
|
in the [`-auth.config`](https://docs.victoriametrics.com/vmauth.html#auth-config). These settings can be overridden with the following command-line flags:
|
||||||
|
|
||||||
|
- `-backend.tlsInsecureSkipVerify` allows skipping TLS verification when connecting to HTTPS backends.
|
||||||
|
This global setting can be overridden at per-user level inside [`-auth.config`](https://docs.victoriametrics.com/vmauth.html#auth-config)
|
||||||
|
via `tls_insecure_skip_verify` option. For example:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
- username: "foo"
|
||||||
|
url_prefix: "https://localhost"
|
||||||
|
tls_insecure_skip_verify: true
|
||||||
|
```
|
||||||
|
|
||||||
|
- `-backend.tlsCAFile` allows specifying the path to TLS Root CA, which will be used for TLS verification when connecting to HTTPS backends.
|
||||||
|
The `-backend.tlsCAFile` may point either to local file or to `http` / `https` url.
|
||||||
|
This global setting can be overridden at per-user level inside [`-auth.config`](https://docs.victoriametrics.com/vmauth.html#auth-config)
|
||||||
|
via `tls_ca_file` option. For example:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
- username: "foo"
|
||||||
|
url_prefix: "https://localhost"
|
||||||
|
tls_ca_file: "/path/to/tls/root/ca"
|
||||||
|
```
|
||||||
|
|
||||||
## IP filters
|
## IP filters
|
||||||
|
|
||||||
|
@ -181,6 +238,15 @@ users:
|
||||||
password: "***"
|
password: "***"
|
||||||
url_prefix: "http://localhost:8428?extra_label=team=dev"
|
url_prefix: "http://localhost:8428?extra_label=team=dev"
|
||||||
|
|
||||||
|
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||||
|
# are proxied to https://localhost:8428.
|
||||||
|
# For example, http://vmauth:8427/api/v1/query is routed to https://localhost/api/v1/query
|
||||||
|
# TLS verification is skipped for https://localhost.
|
||||||
|
- username: "local-single-node-with-tls"
|
||||||
|
password: "***"
|
||||||
|
url_prefix: "https://localhost"
|
||||||
|
tls_insecure_skip_verify: true
|
||||||
|
|
||||||
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||||
# are load-balanced among http://vmselect1:8481/select/123/prometheus and http://vmselect2:8481/select/123/prometheus
|
# are load-balanced among http://vmselect1:8481/select/123/prometheus and http://vmselect2:8481/select/123/prometheus
|
||||||
# For example, http://vmauth:8427/api/v1/query is proxied to the following urls in a round-robin manner:
|
# For example, http://vmauth:8427/api/v1/query is proxied to the following urls in a round-robin manner:
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -14,13 +15,14 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/metrics"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -38,20 +40,25 @@ type AuthConfig struct {
|
||||||
|
|
||||||
// UserInfo is user information read from authConfigPath
|
// UserInfo is user information read from authConfigPath
|
||||||
type UserInfo struct {
|
type UserInfo struct {
|
||||||
Name string `yaml:"name,omitempty"`
|
Name string `yaml:"name,omitempty"`
|
||||||
BearerToken string `yaml:"bearer_token,omitempty"`
|
BearerToken string `yaml:"bearer_token,omitempty"`
|
||||||
Username string `yaml:"username,omitempty"`
|
Username string `yaml:"username,omitempty"`
|
||||||
Password string `yaml:"password,omitempty"`
|
Password string `yaml:"password,omitempty"`
|
||||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||||
URLMaps []URLMap `yaml:"url_map,omitempty"`
|
URLMaps []URLMap `yaml:"url_map,omitempty"`
|
||||||
HeadersConf HeadersConf `yaml:",inline"`
|
HeadersConf HeadersConf `yaml:",inline"`
|
||||||
MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"`
|
MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"`
|
||||||
DefaultURL *URLPrefix `yaml:"default_url,omitempty"`
|
DefaultURL *URLPrefix `yaml:"default_url,omitempty"`
|
||||||
RetryStatusCodes []int `yaml:"retry_status_codes,omitempty"`
|
RetryStatusCodes []int `yaml:"retry_status_codes,omitempty"`
|
||||||
|
DropSrcPathPrefixParts int `yaml:"drop_src_path_prefix_parts,omitempty"`
|
||||||
|
TLSInsecureSkipVerify *bool `yaml:"tls_insecure_skip_verify,omitempty"`
|
||||||
|
TLSCAFile string `yaml:"tls_ca_file,omitempty"`
|
||||||
|
|
||||||
concurrencyLimitCh chan struct{}
|
concurrencyLimitCh chan struct{}
|
||||||
concurrencyLimitReached *metrics.Counter
|
concurrencyLimitReached *metrics.Counter
|
||||||
|
|
||||||
|
httpTransport *http.Transport
|
||||||
|
|
||||||
requests *metrics.Counter
|
requests *metrics.Counter
|
||||||
requestsDuration *metrics.Summary
|
requestsDuration *metrics.Summary
|
||||||
}
|
}
|
||||||
|
@ -113,10 +120,11 @@ func (h *Header) MarshalYAML() (interface{}, error) {
|
||||||
|
|
||||||
// URLMap is a mapping from source paths to target urls.
|
// URLMap is a mapping from source paths to target urls.
|
||||||
type URLMap struct {
|
type URLMap struct {
|
||||||
SrcPaths []*SrcPath `yaml:"src_paths,omitempty"`
|
SrcPaths []*SrcPath `yaml:"src_paths,omitempty"`
|
||||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||||
HeadersConf HeadersConf `yaml:",inline"`
|
HeadersConf HeadersConf `yaml:",inline"`
|
||||||
RetryStatusCodes []int `yaml:"retry_status_codes,omitempty"`
|
RetryStatusCodes []int `yaml:"retry_status_codes,omitempty"`
|
||||||
|
DropSrcPathPrefixParts int `yaml:"drop_src_path_prefix_parts,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SrcPath represents an src path
|
// SrcPath represents an src path
|
||||||
|
@ -442,6 +450,11 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||||
_ = metrics.GetOrCreateGauge(`vmauth_unauthorized_user_concurrent_requests_current`, func() float64 {
|
_ = metrics.GetOrCreateGauge(`vmauth_unauthorized_user_concurrent_requests_current`, func() float64 {
|
||||||
return float64(len(ui.concurrencyLimitCh))
|
return float64(len(ui.concurrencyLimitCh))
|
||||||
})
|
})
|
||||||
|
tr, err := getTransport(ui.TLSInsecureSkipVerify, ui.TLSCAFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot initialize HTTP transport: %w", err)
|
||||||
|
}
|
||||||
|
ui.httpTransport = tr
|
||||||
}
|
}
|
||||||
return &ac, nil
|
return &ac, nil
|
||||||
}
|
}
|
||||||
|
@ -512,6 +525,12 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||||
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmauth_user_concurrent_requests_current{username=%q}`, name), func() float64 {
|
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmauth_user_concurrent_requests_current{username=%q}`, name), func() float64 {
|
||||||
return float64(len(ui.concurrencyLimitCh))
|
return float64(len(ui.concurrencyLimitCh))
|
||||||
})
|
})
|
||||||
|
tr, err := getTransport(ui.TLSInsecureSkipVerify, ui.TLSCAFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot initialize HTTP transport: %w", err)
|
||||||
|
}
|
||||||
|
ui.httpTransport = tr
|
||||||
|
|
||||||
byAuthToken[at1] = ui
|
byAuthToken[at1] = ui
|
||||||
byAuthToken[at2] = ui
|
byAuthToken[at2] = ui
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,22 +221,26 @@ func TestParseAuthConfigSuccess(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Single user
|
// Single user
|
||||||
|
insecureSkipVerifyTrue := true
|
||||||
f(`
|
f(`
|
||||||
users:
|
users:
|
||||||
- username: foo
|
- username: foo
|
||||||
password: bar
|
password: bar
|
||||||
url_prefix: http://aaa:343/bbb
|
url_prefix: http://aaa:343/bbb
|
||||||
max_concurrent_requests: 5
|
max_concurrent_requests: 5
|
||||||
|
tls_insecure_skip_verify: true
|
||||||
`, map[string]*UserInfo{
|
`, map[string]*UserInfo{
|
||||||
getAuthToken("", "foo", "bar"): {
|
getAuthToken("", "foo", "bar"): {
|
||||||
Username: "foo",
|
Username: "foo",
|
||||||
Password: "bar",
|
Password: "bar",
|
||||||
URLPrefix: mustParseURL("http://aaa:343/bbb"),
|
URLPrefix: mustParseURL("http://aaa:343/bbb"),
|
||||||
MaxConcurrentRequests: 5,
|
MaxConcurrentRequests: 5,
|
||||||
|
TLSInsecureSkipVerify: &insecureSkipVerifyTrue,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
// Multiple url_prefix entries
|
// Multiple url_prefix entries
|
||||||
|
insecureSkipVerifyFalse := false
|
||||||
f(`
|
f(`
|
||||||
users:
|
users:
|
||||||
- username: foo
|
- username: foo
|
||||||
|
@ -244,6 +248,9 @@ users:
|
||||||
url_prefix:
|
url_prefix:
|
||||||
- http://node1:343/bbb
|
- http://node1:343/bbb
|
||||||
- http://node2:343/bbb
|
- http://node2:343/bbb
|
||||||
|
tls_insecure_skip_verify: false
|
||||||
|
retry_status_codes: [500, 501]
|
||||||
|
drop_src_path_prefix_parts: 1
|
||||||
`, map[string]*UserInfo{
|
`, map[string]*UserInfo{
|
||||||
getAuthToken("", "foo", "bar"): {
|
getAuthToken("", "foo", "bar"): {
|
||||||
Username: "foo",
|
Username: "foo",
|
||||||
|
@ -252,6 +259,9 @@ users:
|
||||||
"http://node1:343/bbb",
|
"http://node1:343/bbb",
|
||||||
"http://node2:343/bbb",
|
"http://node2:343/bbb",
|
||||||
}),
|
}),
|
||||||
|
TLSInsecureSkipVerify: &insecureSkipVerifyFalse,
|
||||||
|
RetryStatusCodes: []int{500, 501},
|
||||||
|
DropSrcPathPrefixParts: 1,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -448,6 +458,47 @@ users:
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseAuthConfigPassesTLSVerificationConfig(t *testing.T) {
|
||||||
|
c := `
|
||||||
|
users:
|
||||||
|
- username: foo
|
||||||
|
password: bar
|
||||||
|
url_prefix: https://aaa/bbb
|
||||||
|
max_concurrent_requests: 5
|
||||||
|
tls_insecure_skip_verify: true
|
||||||
|
|
||||||
|
unauthorized_user:
|
||||||
|
url_prefix: http://aaa:343/bbb
|
||||||
|
max_concurrent_requests: 5
|
||||||
|
tls_insecure_skip_verify: false
|
||||||
|
`
|
||||||
|
|
||||||
|
ac, err := parseAuthConfig([]byte(c))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
m, err := parseAuthConfigUsers(ac)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ui := m[getAuthToken("", "foo", "bar")]
|
||||||
|
if !isSetBool(ui.TLSInsecureSkipVerify, true) || !ui.httpTransport.TLSClientConfig.InsecureSkipVerify {
|
||||||
|
t.Fatalf("unexpected TLSInsecureSkipVerify value for user foo")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isSetBool(ac.UnauthorizedUser.TLSInsecureSkipVerify, false) || ac.UnauthorizedUser.httpTransport.TLSClientConfig.InsecureSkipVerify {
|
||||||
|
t.Fatalf("unexpected TLSInsecureSkipVerify value for unauthorized_user")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSetBool(boolP *bool, expectedValue bool) bool {
|
||||||
|
if boolP == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return *boolP == expectedValue
|
||||||
|
}
|
||||||
|
|
||||||
func getSrcPaths(paths []string) []*SrcPath {
|
func getSrcPaths(paths []string) []*SrcPath {
|
||||||
var sps []*SrcPath
|
var sps []*SrcPath
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
|
|
|
@ -42,6 +42,15 @@ users:
|
||||||
password: "***"
|
password: "***"
|
||||||
url_prefix: "http://localhost:8428?extra_label=team=dev"
|
url_prefix: "http://localhost:8428?extra_label=team=dev"
|
||||||
|
|
||||||
|
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||||
|
# are proxied to https://localhost:8428
|
||||||
|
# For example, http://vmauth:8427/api/v1/query is routed to https://localhost/api/v1/query
|
||||||
|
# TLS verification is ignored for https://localhost.
|
||||||
|
- username: "local-single-node-with-tls"
|
||||||
|
password: "***"
|
||||||
|
url_prefix: "https://localhost"
|
||||||
|
tls_insecure_skip_verify: true
|
||||||
|
|
||||||
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||||
# are load-balanced among http://vmselect1:8481/select/123/prometheus and http://vmselect2:8481/select/123/prometheus
|
# are load-balanced among http://vmselect1:8481/select/123/prometheus and http://vmselect2:8481/select/123/prometheus
|
||||||
# For example, http://vmauth:8427/api/v1/query is proxied to the following urls in a round-robin manner:
|
# For example, http://vmauth:8427/api/v1/query is proxied to the following urls in a round-robin manner:
|
||||||
|
|
|
@ -2,6 +2,8 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -15,16 +17,19 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/metrics"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -46,6 +51,10 @@ var (
|
||||||
failTimeout = flag.Duration("failTimeout", 3*time.Second, "Sets a delay period for load balancing to skip a malfunctioning backend")
|
failTimeout = flag.Duration("failTimeout", 3*time.Second, "Sets a delay period for load balancing to skip a malfunctioning backend")
|
||||||
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size, which can be cached and re-tried at other backends. "+
|
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size, which can be cached and re-tried at other backends. "+
|
||||||
"Bigger values may require more memory")
|
"Bigger values may require more memory")
|
||||||
|
backendTLSInsecureSkipVerify = flag.Bool("backend.tlsInsecureSkipVerify", false, "Whether to skip TLS verification when connecting to backends over HTTPS. "+
|
||||||
|
"See https://docs.victoriametrics.com/vmauth.html#backend-tls-setup")
|
||||||
|
backendTLSCAFile = flag.String("backend.TLSCAFile", "", "Optional path to TLS root CA file, which is used for TLS verification when connecting to backends over HTTPS. "+
|
||||||
|
"See https://docs.victoriametrics.com/vmauth.html#backend-tls-setup")
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -155,7 +164,7 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||||
|
|
||||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||||
u := normalizeURL(r.URL)
|
u := normalizeURL(r.URL)
|
||||||
up, hc, retryStatusCodes := ui.getURLPrefixAndHeaders(u)
|
up, hc, retryStatusCodes, dropSrcPathPrefixParts := ui.getURLPrefixAndHeaders(u)
|
||||||
isDefault := false
|
isDefault := false
|
||||||
if up == nil {
|
if up == nil {
|
||||||
if ui.DefaultURL == nil {
|
if ui.DefaultURL == nil {
|
||||||
|
@ -189,9 +198,9 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||||
query.Set("request_path", u.Path)
|
query.Set("request_path", u.Path)
|
||||||
targetURL.RawQuery = query.Encode()
|
targetURL.RawQuery = query.Encode()
|
||||||
} else { // Update path for regular routes.
|
} else { // Update path for regular routes.
|
||||||
targetURL = mergeURLs(targetURL, u)
|
targetURL = mergeURLs(targetURL, u, dropSrcPathPrefixParts)
|
||||||
}
|
}
|
||||||
ok := tryProcessingRequest(w, r, targetURL, hc, retryStatusCodes)
|
ok := tryProcessingRequest(w, r, targetURL, hc, retryStatusCodes, ui.httpTransport)
|
||||||
bu.put()
|
bu.put()
|
||||||
if ok {
|
if ok {
|
||||||
return
|
return
|
||||||
|
@ -205,12 +214,12 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||||
httpserver.Errorf(w, r, "%s", err)
|
httpserver.Errorf(w, r, "%s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, hc HeadersConf, retryStatusCodes []int) bool {
|
func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, hc HeadersConf, retryStatusCodes []int, transport *http.Transport) bool {
|
||||||
// This code has been copied from net/http/httputil/reverseproxy.go
|
// This code has been copied from net/http/httputil/reverseproxy.go
|
||||||
req := sanitizeRequestHeaders(r)
|
req := sanitizeRequestHeaders(r)
|
||||||
req.URL = targetURL
|
req.URL = targetURL
|
||||||
|
req.Host = targetURL.Host
|
||||||
updateHeadersByConfig(req.Header, hc.RequestHeaders)
|
updateHeadersByConfig(req.Header, hc.RequestHeaders)
|
||||||
transportOnce.Do(transportInit)
|
|
||||||
res, err := transport.RoundTrip(req)
|
res, err := transport.RoundTrip(req)
|
||||||
rtb, rtbOK := req.Body.(*readTrackingBody)
|
rtb, rtbOK := req.Body.(*readTrackingBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -353,23 +362,77 @@ var (
|
||||||
missingRouteRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="missing_route"}`)
|
missingRouteRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="missing_route"}`)
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
func getTransport(insecureSkipVerifyP *bool, caFile string) (*http.Transport, error) {
|
||||||
transport *http.Transport
|
if insecureSkipVerifyP == nil {
|
||||||
transportOnce sync.Once
|
insecureSkipVerifyP = backendTLSInsecureSkipVerify
|
||||||
)
|
}
|
||||||
|
insecureSkipVerify := *insecureSkipVerifyP
|
||||||
|
if caFile == "" {
|
||||||
|
caFile = *backendTLSCAFile
|
||||||
|
}
|
||||||
|
|
||||||
func transportInit() {
|
bb := bbPool.Get()
|
||||||
|
defer bbPool.Put(bb)
|
||||||
|
|
||||||
|
bb.B = appendTransportKey(bb.B[:0], insecureSkipVerify, caFile)
|
||||||
|
|
||||||
|
transportMapLock.Lock()
|
||||||
|
defer transportMapLock.Unlock()
|
||||||
|
|
||||||
|
tr := transportMap[string(bb.B)]
|
||||||
|
if tr == nil {
|
||||||
|
trLocal, err := newTransport(insecureSkipVerify, caFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
transportMap[string(bb.B)] = trLocal
|
||||||
|
tr = trLocal
|
||||||
|
}
|
||||||
|
|
||||||
|
return tr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var transportMap = make(map[string]*http.Transport)
|
||||||
|
var transportMapLock sync.Mutex
|
||||||
|
|
||||||
|
func appendTransportKey(dst []byte, insecureSkipVerify bool, caFile string) []byte {
|
||||||
|
dst = encoding.MarshalBool(dst, insecureSkipVerify)
|
||||||
|
dst = encoding.MarshalBytes(dst, bytesutil.ToUnsafeBytes(caFile))
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
var bbPool bytesutil.ByteBufferPool
|
||||||
|
|
||||||
|
func newTransport(insecureSkipVerify bool, caFile string) (*http.Transport, error) {
|
||||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
tr.ResponseHeaderTimeout = *responseTimeout
|
tr.ResponseHeaderTimeout = *responseTimeout
|
||||||
// Automatic compression must be disabled in order to fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/535
|
// Automatic compression must be disabled in order to fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/535
|
||||||
tr.DisableCompression = true
|
tr.DisableCompression = true
|
||||||
// Disable HTTP/2.0, since VictoriaMetrics components don't support HTTP/2.0 (because there is no sense in this).
|
|
||||||
tr.ForceAttemptHTTP2 = false
|
|
||||||
tr.MaxIdleConnsPerHost = *maxIdleConnsPerBackend
|
tr.MaxIdleConnsPerHost = *maxIdleConnsPerBackend
|
||||||
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
|
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
|
||||||
tr.MaxIdleConns = tr.MaxIdleConnsPerHost
|
tr.MaxIdleConns = tr.MaxIdleConnsPerHost
|
||||||
}
|
}
|
||||||
transport = tr
|
tlsCfg := tr.TLSClientConfig
|
||||||
|
if tlsCfg == nil {
|
||||||
|
tlsCfg = &tls.Config{}
|
||||||
|
tr.TLSClientConfig = tlsCfg
|
||||||
|
}
|
||||||
|
if insecureSkipVerify || caFile != "" {
|
||||||
|
tlsCfg.ClientSessionCache = tls.NewLRUClientSessionCache(0)
|
||||||
|
tlsCfg.InsecureSkipVerify = insecureSkipVerify
|
||||||
|
if caFile != "" {
|
||||||
|
data, err := fs.ReadFileOrHTTP(caFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot read tls_ca_file: %w", err)
|
||||||
|
}
|
||||||
|
rootCA := x509.NewCertPool()
|
||||||
|
if !rootCA.AppendCertsFromPEM(data) {
|
||||||
|
return nil, fmt.Errorf("cannot parse data read from tls_ca_file %q", caFile)
|
||||||
|
}
|
||||||
|
tlsCfg.RootCAs = rootCA
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -6,12 +6,13 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func mergeURLs(uiURL, requestURI *url.URL) *url.URL {
|
func mergeURLs(uiURL, requestURI *url.URL, dropSrcPathPrefixParts int) *url.URL {
|
||||||
targetURL := *uiURL
|
targetURL := *uiURL
|
||||||
if strings.HasPrefix(requestURI.Path, "/") {
|
srcPath := dropPrefixParts(requestURI.Path, dropSrcPathPrefixParts)
|
||||||
|
if strings.HasPrefix(srcPath, "/") {
|
||||||
targetURL.Path = strings.TrimSuffix(targetURL.Path, "/")
|
targetURL.Path = strings.TrimSuffix(targetURL.Path, "/")
|
||||||
}
|
}
|
||||||
targetURL.Path += requestURI.Path
|
targetURL.Path += srcPath
|
||||||
requestParams := requestURI.Query()
|
requestParams := requestURI.Query()
|
||||||
// fast path
|
// fast path
|
||||||
if len(requestParams) == 0 {
|
if len(requestParams) == 0 {
|
||||||
|
@ -32,18 +33,34 @@ func mergeURLs(uiURL, requestURI *url.URL) *url.URL {
|
||||||
return &targetURL
|
return &targetURL
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL) (*URLPrefix, HeadersConf, []int) {
|
func dropPrefixParts(path string, parts int) string {
|
||||||
|
if parts <= 0 {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
for parts > 0 {
|
||||||
|
path = strings.TrimPrefix(path, "/")
|
||||||
|
n := strings.IndexByte(path, '/')
|
||||||
|
if n < 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
path = path[n:]
|
||||||
|
parts--
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL) (*URLPrefix, HeadersConf, []int, int) {
|
||||||
for _, e := range ui.URLMaps {
|
for _, e := range ui.URLMaps {
|
||||||
for _, sp := range e.SrcPaths {
|
for _, sp := range e.SrcPaths {
|
||||||
if sp.match(u.Path) {
|
if sp.match(u.Path) {
|
||||||
return e.URLPrefix, e.HeadersConf, e.RetryStatusCodes
|
return e.URLPrefix, e.HeadersConf, e.RetryStatusCodes, e.DropSrcPathPrefixParts
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ui.URLPrefix != nil {
|
if ui.URLPrefix != nil {
|
||||||
return ui.URLPrefix, ui.HeadersConf, ui.RetryStatusCodes
|
return ui.URLPrefix, ui.HeadersConf, ui.RetryStatusCodes, ui.DropSrcPathPrefixParts
|
||||||
}
|
}
|
||||||
return nil, HeadersConf{}, nil
|
return nil, HeadersConf{}, nil, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func normalizeURL(uOrig *url.URL) *url.URL {
|
func normalizeURL(uOrig *url.URL) *url.URL {
|
||||||
|
|
|
@ -7,20 +7,91 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestDropPrefixParts(t *testing.T) {
|
||||||
|
f := func(path string, parts int, expectedResult string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
result := dropPrefixParts(path, parts)
|
||||||
|
if result != expectedResult {
|
||||||
|
t.Fatalf("unexpected result; got %q; want %q", result, expectedResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f("", 0, "")
|
||||||
|
f("", 1, "")
|
||||||
|
f("", 10, "")
|
||||||
|
f("foo", 0, "foo")
|
||||||
|
f("foo", -1, "foo")
|
||||||
|
f("foo", 1, "")
|
||||||
|
|
||||||
|
f("/foo", 0, "/foo")
|
||||||
|
f("/foo/bar", 0, "/foo/bar")
|
||||||
|
f("/foo/bar/baz", 0, "/foo/bar/baz")
|
||||||
|
|
||||||
|
f("foo", 0, "foo")
|
||||||
|
f("foo/bar", 0, "foo/bar")
|
||||||
|
f("foo/bar/baz", 0, "foo/bar/baz")
|
||||||
|
|
||||||
|
f("/foo/", 0, "/foo/")
|
||||||
|
f("/foo/bar/", 0, "/foo/bar/")
|
||||||
|
f("/foo/bar/baz/", 0, "/foo/bar/baz/")
|
||||||
|
|
||||||
|
f("/foo", 1, "")
|
||||||
|
f("/foo/bar", 1, "/bar")
|
||||||
|
f("/foo/bar/baz", 1, "/bar/baz")
|
||||||
|
|
||||||
|
f("foo", 1, "")
|
||||||
|
f("foo/bar", 1, "/bar")
|
||||||
|
f("foo/bar/baz", 1, "/bar/baz")
|
||||||
|
|
||||||
|
f("/foo/", 1, "/")
|
||||||
|
f("/foo/bar/", 1, "/bar/")
|
||||||
|
f("/foo/bar/baz/", 1, "/bar/baz/")
|
||||||
|
|
||||||
|
f("/foo", 2, "")
|
||||||
|
f("/foo/bar", 2, "")
|
||||||
|
f("/foo/bar/baz", 2, "/baz")
|
||||||
|
|
||||||
|
f("foo", 2, "")
|
||||||
|
f("foo/bar", 2, "")
|
||||||
|
f("foo/bar/baz", 2, "/baz")
|
||||||
|
|
||||||
|
f("/foo/", 2, "")
|
||||||
|
f("/foo/bar/", 2, "/")
|
||||||
|
f("/foo/bar/baz/", 2, "/baz/")
|
||||||
|
|
||||||
|
f("/foo", 3, "")
|
||||||
|
f("/foo/bar", 3, "")
|
||||||
|
f("/foo/bar/baz", 3, "")
|
||||||
|
|
||||||
|
f("foo", 3, "")
|
||||||
|
f("foo/bar", 3, "")
|
||||||
|
f("foo/bar/baz", 3, "")
|
||||||
|
|
||||||
|
f("/foo/", 3, "")
|
||||||
|
f("/foo/bar/", 3, "")
|
||||||
|
f("/foo/bar/baz/", 3, "/")
|
||||||
|
|
||||||
|
f("/foo/", 4, "")
|
||||||
|
f("/foo/bar/", 4, "")
|
||||||
|
f("/foo/bar/baz/", 4, "")
|
||||||
|
}
|
||||||
|
|
||||||
func TestCreateTargetURLSuccess(t *testing.T) {
|
func TestCreateTargetURLSuccess(t *testing.T) {
|
||||||
f := func(ui *UserInfo, requestURI, expectedTarget, expectedRequestHeaders, expectedResponseHeaders string, expectedRetryStatusCodes []int) {
|
f := func(ui *UserInfo, requestURI, expectedTarget, expectedRequestHeaders, expectedResponseHeaders string,
|
||||||
|
expectedRetryStatusCodes []int, expectedDropSrcPathPrefixParts int) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
u, err := url.Parse(requestURI)
|
u, err := url.Parse(requestURI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||||
}
|
}
|
||||||
u = normalizeURL(u)
|
u = normalizeURL(u)
|
||||||
up, hc, retryStatusCodes := ui.getURLPrefixAndHeaders(u)
|
up, hc, retryStatusCodes, dropSrcPathPrefixParts := ui.getURLPrefixAndHeaders(u)
|
||||||
if up == nil {
|
if up == nil {
|
||||||
t.Fatalf("cannot determie backend: %s", err)
|
t.Fatalf("cannot determie backend: %s", err)
|
||||||
}
|
}
|
||||||
bu := up.getLeastLoadedBackendURL()
|
bu := up.getLeastLoadedBackendURL()
|
||||||
target := mergeURLs(bu.url, u)
|
target := mergeURLs(bu.url, u, dropSrcPathPrefixParts)
|
||||||
bu.put()
|
bu.put()
|
||||||
if target.String() != expectedTarget {
|
if target.String() != expectedTarget {
|
||||||
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
|
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
|
||||||
|
@ -32,11 +103,14 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||||
if !reflect.DeepEqual(retryStatusCodes, expectedRetryStatusCodes) {
|
if !reflect.DeepEqual(retryStatusCodes, expectedRetryStatusCodes) {
|
||||||
t.Fatalf("unexpected retryStatusCodes; got %d; want %d", retryStatusCodes, expectedRetryStatusCodes)
|
t.Fatalf("unexpected retryStatusCodes; got %d; want %d", retryStatusCodes, expectedRetryStatusCodes)
|
||||||
}
|
}
|
||||||
|
if dropSrcPathPrefixParts != expectedDropSrcPathPrefixParts {
|
||||||
|
t.Fatalf("unexpected dropSrcPathPrefixParts; got %d; want %d", dropSrcPathPrefixParts, expectedDropSrcPathPrefixParts)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Simple routing with `url_prefix`
|
// Simple routing with `url_prefix`
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("http://foo.bar"),
|
URLPrefix: mustParseURL("http://foo.bar"),
|
||||||
}, "", "http://foo.bar/.", "[]", "[]", nil)
|
}, "", "http://foo.bar/.", "[]", "[]", nil, 0)
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("http://foo.bar"),
|
URLPrefix: mustParseURL("http://foo.bar"),
|
||||||
HeadersConf: HeadersConf{
|
HeadersConf: HeadersConf{
|
||||||
|
@ -45,29 +119,30 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||||
Value: "aaa",
|
Value: "aaa",
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
RetryStatusCodes: []int{503, 501},
|
RetryStatusCodes: []int{503, 501},
|
||||||
}, "/", "http://foo.bar", `[{"bb" "aaa"}]`, `[]`, []int{503, 501})
|
DropSrcPathPrefixParts: 2,
|
||||||
|
}, "/a/b/c", "http://foo.bar/c", `[{"bb" "aaa"}]`, `[]`, []int{503, 501}, 2)
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("http://foo.bar/federate"),
|
URLPrefix: mustParseURL("http://foo.bar/federate"),
|
||||||
}, "/", "http://foo.bar/federate", "[]", "[]", nil)
|
}, "/", "http://foo.bar/federate", "[]", "[]", nil, 0)
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("http://foo.bar"),
|
URLPrefix: mustParseURL("http://foo.bar"),
|
||||||
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "[]", "[]", nil)
|
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "[]", "[]", nil, 0)
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||||
}, "/z", "https://sss:3894/x/y/z", "[]", "[]", nil)
|
}, "/z", "https://sss:3894/x/y/z", "[]", "[]", nil, 0)
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||||
}, "/../../aaa", "https://sss:3894/x/y/aaa", "[]", "[]", nil)
|
}, "/../../aaa", "https://sss:3894/x/y/aaa", "[]", "[]", nil, 0)
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||||
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "[]", "[]", nil)
|
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "[]", "[]", nil, 0)
|
||||||
|
|
||||||
// Complex routing with `url_map`
|
// Complex routing with `url_map`
|
||||||
ui := &UserInfo{
|
ui := &UserInfo{
|
||||||
URLMaps: []URLMap{
|
URLMaps: []URLMap{
|
||||||
{
|
{
|
||||||
SrcPaths: getSrcPaths([]string{"/api/v1/query"}),
|
SrcPaths: getSrcPaths([]string{"/vmsingle/api/v1/query"}),
|
||||||
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
|
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
|
||||||
HeadersConf: HeadersConf{
|
HeadersConf: HeadersConf{
|
||||||
RequestHeaders: []Header{
|
RequestHeaders: []Header{
|
||||||
|
@ -87,7 +162,8 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RetryStatusCodes: []int{503, 500, 501},
|
RetryStatusCodes: []int{503, 500, 501},
|
||||||
|
DropSrcPathPrefixParts: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
SrcPaths: getSrcPaths([]string{"/api/v1/write"}),
|
SrcPaths: getSrcPaths([]string{"/api/v1/write"}),
|
||||||
|
@ -105,11 +181,12 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||||
Value: "y",
|
Value: "y",
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
RetryStatusCodes: []int{502},
|
RetryStatusCodes: []int{502},
|
||||||
|
DropSrcPathPrefixParts: 2,
|
||||||
}
|
}
|
||||||
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", `[{"xx" "aa"} {"yy" "asdf"}]`, `[{"qwe" "rty"}]`, []int{503, 500, 501})
|
f(ui, "/vmsingle/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", `[{"xx" "aa"} {"yy" "asdf"}]`, `[{"qwe" "rty"}]`, []int{503, 500, 501}, 1)
|
||||||
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", nil)
|
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", nil, 0)
|
||||||
f(ui, "/api/v1/query_range", "http://default-server/api/v1/query_range", `[{"bb" "aaa"}]`, `[{"x" "y"}]`, []int{502})
|
f(ui, "/foo/bar/api/v1/query_range", "http://default-server/api/v1/query_range", `[{"bb" "aaa"}]`, `[{"x" "y"}]`, []int{502}, 2)
|
||||||
|
|
||||||
// Complex routing regexp paths in `url_map`
|
// Complex routing regexp paths in `url_map`
|
||||||
ui = &UserInfo{
|
ui = &UserInfo{
|
||||||
|
@ -125,17 +202,17 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||||
},
|
},
|
||||||
URLPrefix: mustParseURL("http://default-server"),
|
URLPrefix: mustParseURL("http://default-server"),
|
||||||
}
|
}
|
||||||
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "[]", "[]", nil)
|
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "[]", "[]", nil, 0)
|
||||||
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "[]", "[]", nil)
|
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "[]", "[]", nil, 0)
|
||||||
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "[]", "[]", nil)
|
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "[]", "[]", nil, 0)
|
||||||
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", nil)
|
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", nil, 0)
|
||||||
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "[]", "[]", nil)
|
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "[]", "[]", nil, 0)
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=dev"),
|
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=dev"),
|
||||||
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "[]", "[]", nil)
|
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "[]", "[]", nil, 0)
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"),
|
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"),
|
||||||
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "[]", "[]", nil)
|
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "[]", "[]", nil, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateTargetURLFailure(t *testing.T) {
|
func TestCreateTargetURLFailure(t *testing.T) {
|
||||||
|
@ -146,7 +223,7 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
||||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||||
}
|
}
|
||||||
u = normalizeURL(u)
|
u = normalizeURL(u)
|
||||||
up, hc, retryStatusCodes := ui.getURLPrefixAndHeaders(u)
|
up, hc, retryStatusCodes, dropSrcPathPrefixParts := ui.getURLPrefixAndHeaders(u)
|
||||||
if up != nil {
|
if up != nil {
|
||||||
t.Fatalf("unexpected non-empty up=%#v", up)
|
t.Fatalf("unexpected non-empty up=%#v", up)
|
||||||
}
|
}
|
||||||
|
@ -159,6 +236,9 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
||||||
if retryStatusCodes != nil {
|
if retryStatusCodes != nil {
|
||||||
t.Fatalf("unexpected non-empty retryStatusCodes=%d", retryStatusCodes)
|
t.Fatalf("unexpected non-empty retryStatusCodes=%d", retryStatusCodes)
|
||||||
}
|
}
|
||||||
|
if dropSrcPathPrefixParts != 0 {
|
||||||
|
t.Fatalf("unexpected non-zero dropSrcPathPrefixParts=%d", dropSrcPathPrefixParts)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
f(&UserInfo{}, "/foo/bar")
|
f(&UserInfo{}, "/foo/bar")
|
||||||
f(&UserInfo{
|
f(&UserInfo{
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build aix || linux || solaris || zos
|
//go:build aix || linux || solaris || zos
|
||||||
// +build aix linux solaris zos
|
|
||||||
|
|
||||||
package terminal
|
package terminal
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build darwin || freebsd || openbsd
|
//go:build darwin || freebsd || openbsd
|
||||||
// +build darwin freebsd openbsd
|
|
||||||
|
|
||||||
package terminal
|
package terminal
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build windows
|
//go:build windows
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package terminal
|
package terminal
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
"github.com/VictoriaMetrics/metricsql"
|
"github.com/VictoriaMetrics/metricsql"
|
||||||
)
|
)
|
||||||
|
@ -41,7 +42,7 @@ var (
|
||||||
"See also -search.logSlowQueryDuration and -search.maxMemoryPerQuery")
|
"See also -search.logSlowQueryDuration and -search.maxMemoryPerQuery")
|
||||||
noStaleMarkers = flag.Bool("search.noStaleMarkers", false, "Set this flag to true if the database doesn't contain Prometheus stale markers, "+
|
noStaleMarkers = flag.Bool("search.noStaleMarkers", false, "Set this flag to true if the database doesn't contain Prometheus stale markers, "+
|
||||||
"so there is no need in spending additional CPU time on its handling. Staleness markers may exist only in data obtained from Prometheus scrape targets")
|
"so there is no need in spending additional CPU time on its handling. Staleness markers may exist only in data obtained from Prometheus scrape targets")
|
||||||
minWindowForInstantRollupOptimization = flagutil.NewDuration("search.minWindowForInstantRollupOptimization", "6h", "Enable cache-based optimization for repeated queries "+
|
minWindowForInstantRollupOptimization = flagutil.NewDuration("search.minWindowForInstantRollupOptimization", "3h", "Enable cache-based optimization for repeated queries "+
|
||||||
"to /api/v1/query (aka instant queries), which contain rollup functions with lookbehind window exceeding the given value")
|
"to /api/v1/query (aka instant queries), which contain rollup functions with lookbehind window exceeding the given value")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -259,7 +260,7 @@ func getTimestamps(start, end, step int64, maxPointsPerSeries int) []int64 {
|
||||||
func evalExpr(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
func evalExpr(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := string(e.AppendString(nil))
|
query := string(e.AppendString(nil))
|
||||||
query = bytesutil.LimitStringLen(query, 300)
|
query = stringsutil.LimitStringLen(query, 300)
|
||||||
mayCache := ec.mayCache()
|
mayCache := ec.mayCache()
|
||||||
qt = qt.NewChild("eval: query=%s, timeRange=%s, step=%d, mayCache=%v", query, ec.timeRangeString(), ec.Step, mayCache)
|
qt = qt.NewChild("eval: query=%s, timeRange=%s, step=%d, mayCache=%v", query, ec.timeRangeString(), ec.Step, mayCache)
|
||||||
}
|
}
|
||||||
|
@ -1084,6 +1085,59 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||||
}
|
}
|
||||||
return offset >= maxOffset
|
return offset >= maxOffset
|
||||||
}
|
}
|
||||||
|
deleteCachedSeries := func(qt *querytracer.Tracer) {
|
||||||
|
rollupResultCacheV.DeleteInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||||
|
}
|
||||||
|
getCachedSeries := func(qt *querytracer.Tracer) ([]*timeseries, int64, error) {
|
||||||
|
again:
|
||||||
|
offset := int64(0)
|
||||||
|
tssCached := rollupResultCacheV.GetInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||||
|
ec.QueryStats.addSeriesFetched(len(tssCached))
|
||||||
|
if len(tssCached) == 0 {
|
||||||
|
// Cache miss. Re-populate the missing data.
|
||||||
|
start := int64(fasttime.UnixTimestamp()*1000) - cacheTimestampOffset.Milliseconds()
|
||||||
|
offset = timestamp - start
|
||||||
|
if offset < 0 {
|
||||||
|
start = timestamp
|
||||||
|
offset = 0
|
||||||
|
}
|
||||||
|
if tooBigOffset(offset) {
|
||||||
|
qt.Printf("cannot apply instant rollup optimization because the -search.cacheTimestampOffset=%s is too big "+
|
||||||
|
"for the requested time=%s and window=%d", cacheTimestampOffset, storage.TimestampToHumanReadableFormat(timestamp), window)
|
||||||
|
tss, err := evalAt(qt, timestamp, window)
|
||||||
|
return tss, 0, err
|
||||||
|
}
|
||||||
|
qt.Printf("calculating the rollup at time=%s, because it is missing in the cache", storage.TimestampToHumanReadableFormat(start))
|
||||||
|
tss, err := evalAt(qt, start, window)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
if hasDuplicateSeries(tss) {
|
||||||
|
qt.Printf("cannot apply instant rollup optimization because the result contains duplicate series")
|
||||||
|
tss, err := evalAt(qt, timestamp, window)
|
||||||
|
return tss, 0, err
|
||||||
|
}
|
||||||
|
rollupResultCacheV.PutInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss, tss)
|
||||||
|
return tss, offset, nil
|
||||||
|
}
|
||||||
|
// Cache hit. Verify whether it is OK to use the cached data.
|
||||||
|
offset = timestamp - tssCached[0].Timestamps[0]
|
||||||
|
if offset < 0 {
|
||||||
|
qt.Printf("do not apply instant rollup optimization because the cached values have bigger timestamp=%s than the requested one=%s",
|
||||||
|
storage.TimestampToHumanReadableFormat(tssCached[0].Timestamps[0]), storage.TimestampToHumanReadableFormat(timestamp))
|
||||||
|
// Delete the outdated cached values, so the cache could be re-populated with newer values.
|
||||||
|
deleteCachedSeries(qt)
|
||||||
|
goto again
|
||||||
|
}
|
||||||
|
if tooBigOffset(offset) {
|
||||||
|
qt.Printf("do not apply instant rollup optimization because the offset=%d between the requested timestamp "+
|
||||||
|
"and the cached values is too big comparing to window=%d", offset, window)
|
||||||
|
// Delete the outdated cached values, so the cache could be re-populated with newer values.
|
||||||
|
deleteCachedSeries(qt)
|
||||||
|
goto again
|
||||||
|
}
|
||||||
|
return tssCached, offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
if !ec.mayCache() {
|
if !ec.mayCache() {
|
||||||
qt.Printf("do not apply instant rollup optimization because of disabled cache")
|
qt.Printf("do not apply instant rollup optimization because of disabled cache")
|
||||||
|
@ -1159,6 +1213,130 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return evalExpr(qt, ec, be)
|
return evalExpr(qt, ec, be)
|
||||||
|
case "max_over_time":
|
||||||
|
if iafc != nil {
|
||||||
|
if strings.ToLower(iafc.ae.Name) != "max" {
|
||||||
|
qt.Printf("do not apply instant rollup optimization for non-max incremental aggregate %s()", iafc.ae.Name)
|
||||||
|
return evalAt(qt, timestamp, window)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate
|
||||||
|
//
|
||||||
|
// max_over_time(m[window] @ timestamp)
|
||||||
|
//
|
||||||
|
// as the maximum of
|
||||||
|
//
|
||||||
|
// - max_over_time(m[window] @ (timestamp-offset))
|
||||||
|
// - max_over_time(m[offset] @ timestamp)
|
||||||
|
//
|
||||||
|
// if max_over_time(m[offset] @ (timestamp-window)) < max_over_time(m[window] @ (timestamp-offset))
|
||||||
|
// otherwise do not apply the optimization
|
||||||
|
//
|
||||||
|
// where
|
||||||
|
//
|
||||||
|
// - max_over_time(m[window] @ (timestamp-offset)) is obtained from cache
|
||||||
|
// - max_over_time(m[offset] @ timestamp) and max_over_time(m[offset] @ (timestamp-window)) are calculated from the storage
|
||||||
|
// These rollups are calculated faster than max_over_time(m[window]) because offset is smaller than window.
|
||||||
|
qtChild := qt.NewChild("optimized calculation for instant rollup %s at time=%s with lookbehind window=%d",
|
||||||
|
expr.AppendString(nil), storage.TimestampToHumanReadableFormat(timestamp), window)
|
||||||
|
defer qtChild.Done()
|
||||||
|
|
||||||
|
tssCached, offset, err := getCachedSeries(qtChild)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if offset == 0 {
|
||||||
|
return tssCached, nil
|
||||||
|
}
|
||||||
|
// Calculate max_over_time(m[offset] @ timestamp)
|
||||||
|
tssStart, err := evalAt(qtChild, timestamp, offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if hasDuplicateSeries(tssStart) {
|
||||||
|
qtChild.Printf("cannot apply instant rollup optimization, since tssStart contains duplicate series")
|
||||||
|
return evalAt(qtChild, timestamp, window)
|
||||||
|
}
|
||||||
|
// Calculate max_over_time(m[offset] @ (timestamp - window))
|
||||||
|
tssEnd, err := evalAt(qtChild, timestamp-window, offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if hasDuplicateSeries(tssEnd) {
|
||||||
|
qtChild.Printf("cannot apply instant rollup optimization, since tssEnd contains duplicate series")
|
||||||
|
return evalAt(qtChild, timestamp, window)
|
||||||
|
}
|
||||||
|
// Calculate the result
|
||||||
|
tss, ok := getMaxInstantValues(qtChild, tssCached, tssStart, tssEnd)
|
||||||
|
if !ok {
|
||||||
|
qtChild.Printf("cannot apply instant rollup optimization, since tssEnd contains bigger values than tssCached")
|
||||||
|
deleteCachedSeries(qtChild)
|
||||||
|
return evalAt(qt, timestamp, window)
|
||||||
|
}
|
||||||
|
return tss, nil
|
||||||
|
case "min_over_time":
|
||||||
|
if iafc != nil {
|
||||||
|
if strings.ToLower(iafc.ae.Name) != "min" {
|
||||||
|
qt.Printf("do not apply instant rollup optimization for non-min incremental aggregate %s()", iafc.ae.Name)
|
||||||
|
return evalAt(qt, timestamp, window)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate
|
||||||
|
//
|
||||||
|
// min_over_time(m[window] @ timestamp)
|
||||||
|
//
|
||||||
|
// as the minimum of
|
||||||
|
//
|
||||||
|
// - min_over_time(m[window] @ (timestamp-offset))
|
||||||
|
// - min_over_time(m[offset] @ timestamp)
|
||||||
|
//
|
||||||
|
// if min_over_time(m[offset] @ (timestamp-window)) > min_over_time(m[window] @ (timestamp-offset))
|
||||||
|
// otherwise do not apply the optimization
|
||||||
|
//
|
||||||
|
// where
|
||||||
|
//
|
||||||
|
// - min_over_time(m[window] @ (timestamp-offset)) is obtained from cache
|
||||||
|
// - min_over_time(m[offset] @ timestamp) and min_over_time(m[offset] @ (timestamp-window)) are calculated from the storage
|
||||||
|
// These rollups are calculated faster than min_over_time(m[window]) because offset is smaller than window.
|
||||||
|
qtChild := qt.NewChild("optimized calculation for instant rollup %s at time=%s with lookbehind window=%d",
|
||||||
|
expr.AppendString(nil), storage.TimestampToHumanReadableFormat(timestamp), window)
|
||||||
|
defer qtChild.Done()
|
||||||
|
|
||||||
|
tssCached, offset, err := getCachedSeries(qtChild)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if offset == 0 {
|
||||||
|
return tssCached, nil
|
||||||
|
}
|
||||||
|
// Calculate min_over_time(m[offset] @ timestamp)
|
||||||
|
tssStart, err := evalAt(qtChild, timestamp, offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if hasDuplicateSeries(tssStart) {
|
||||||
|
qtChild.Printf("cannot apply instant rollup optimization, since tssStart contains duplicate series")
|
||||||
|
return evalAt(qtChild, timestamp, window)
|
||||||
|
}
|
||||||
|
// Calculate min_over_time(m[offset] @ (timestamp - window))
|
||||||
|
tssEnd, err := evalAt(qtChild, timestamp-window, offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if hasDuplicateSeries(tssEnd) {
|
||||||
|
qtChild.Printf("cannot apply instant rollup optimization, since tssEnd contains duplicate series")
|
||||||
|
return evalAt(qtChild, timestamp, window)
|
||||||
|
}
|
||||||
|
// Calculate the result
|
||||||
|
tss, ok := getMinInstantValues(qtChild, tssCached, tssStart, tssEnd)
|
||||||
|
if !ok {
|
||||||
|
qtChild.Printf("cannot apply instant rollup optimization, since tssEnd contains smaller values than tssCached")
|
||||||
|
deleteCachedSeries(qtChild)
|
||||||
|
return evalAt(qt, timestamp, window)
|
||||||
|
}
|
||||||
|
return tss, nil
|
||||||
case
|
case
|
||||||
"count_eq_over_time",
|
"count_eq_over_time",
|
||||||
"count_gt_over_time",
|
"count_gt_over_time",
|
||||||
|
@ -1191,65 +1369,33 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||||
expr.AppendString(nil), storage.TimestampToHumanReadableFormat(timestamp), window)
|
expr.AppendString(nil), storage.TimestampToHumanReadableFormat(timestamp), window)
|
||||||
defer qtChild.Done()
|
defer qtChild.Done()
|
||||||
|
|
||||||
again:
|
tssCached, offset, err := getCachedSeries(qtChild)
|
||||||
offset := int64(0)
|
if err != nil {
|
||||||
tssCached := rollupResultCacheV.GetInstantValues(qtChild, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
return nil, err
|
||||||
ec.QueryStats.addSeriesFetched(len(tssCached))
|
|
||||||
if len(tssCached) == 0 {
|
|
||||||
// Cache miss. Re-populate it
|
|
||||||
start := int64(fasttime.UnixTimestamp()*1000) - cacheTimestampOffset.Milliseconds()
|
|
||||||
offset = timestamp - start
|
|
||||||
if offset < 0 {
|
|
||||||
start = timestamp
|
|
||||||
offset = 0
|
|
||||||
}
|
|
||||||
if tooBigOffset(offset) {
|
|
||||||
qtChild.Printf("cannot apply instant rollup optimization because the -search.cacheTimestampOffset=%s is too big "+
|
|
||||||
"for the requested time=%s and window=%d", cacheTimestampOffset, storage.TimestampToHumanReadableFormat(timestamp), window)
|
|
||||||
return evalAt(qtChild, timestamp, window)
|
|
||||||
}
|
|
||||||
qtChild.Printf("calculating the rollup at time=%s, because it is missing in the cache", storage.TimestampToHumanReadableFormat(start))
|
|
||||||
tss, err := evalAt(qtChild, start, window)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rollupResultCacheV.PutInstantValues(qtChild, expr, window, ec.Step, ec.EnforcedTagFilterss, tss)
|
|
||||||
tssCached = tss
|
|
||||||
} else {
|
|
||||||
offset = timestamp - tssCached[0].Timestamps[0]
|
|
||||||
if offset < 0 {
|
|
||||||
qtChild.Printf("do not apply instant rollup optimization because the cached values have bigger timestamp=%s than the requested one=%s",
|
|
||||||
storage.TimestampToHumanReadableFormat(tssCached[0].Timestamps[0]), storage.TimestampToHumanReadableFormat(timestamp))
|
|
||||||
// Delete the outdated cached values, so the cache could be re-populated with newer values.
|
|
||||||
rollupResultCacheV.DeleteInstantValues(qtChild, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
|
||||||
goto again
|
|
||||||
}
|
|
||||||
if tooBigOffset(offset) {
|
|
||||||
qtChild.Printf("do not apply instant rollup optimization because the offset=%d between the requested timestamp "+
|
|
||||||
"and the cached values is too big comparing to window=%d", offset, window)
|
|
||||||
// Delete the outdated cached values, so the cache could be re-populated with newer values.
|
|
||||||
rollupResultCacheV.DeleteInstantValues(qtChild, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
|
||||||
goto again
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if offset == 0 {
|
if offset == 0 {
|
||||||
qtChild.Printf("return cached values, since they have the requested timestamp=%s", storage.TimestampToHumanReadableFormat(timestamp))
|
|
||||||
return tssCached, nil
|
return tssCached, nil
|
||||||
}
|
}
|
||||||
// Calculate count_over_time(m[offset] @ timestamp)
|
// Calculate rf(m[offset] @ timestamp)
|
||||||
tssStart, err := evalAt(qtChild, timestamp, offset)
|
tssStart, err := evalAt(qtChild, timestamp, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Calculate count_over_time(m[offset] @ (timestamp - window))
|
if hasDuplicateSeries(tssStart) {
|
||||||
|
qtChild.Printf("cannot apply instant rollup optimization, since tssStart contains duplicate series")
|
||||||
|
return evalAt(qtChild, timestamp, window)
|
||||||
|
}
|
||||||
|
// Calculate rf(m[offset] @ (timestamp - window))
|
||||||
tssEnd, err := evalAt(qtChild, timestamp-window, offset)
|
tssEnd, err := evalAt(qtChild, timestamp-window, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tss, err := mergeInstantValues(qtChild, tssCached, tssStart, tssEnd)
|
if hasDuplicateSeries(tssEnd) {
|
||||||
if err != nil {
|
qtChild.Printf("cannot apply instant rollup optimization, since tssEnd contains duplicate series")
|
||||||
return nil, fmt.Errorf("cannot merge instant series: %w", err)
|
return evalAt(qtChild, timestamp, window)
|
||||||
}
|
}
|
||||||
|
// Calculate the result
|
||||||
|
tss := getSumInstantValues(qtChild, tssCached, tssStart, tssEnd)
|
||||||
return tss, nil
|
return tss, nil
|
||||||
default:
|
default:
|
||||||
qt.Printf("instant rollup optimization isn't implemented for %s()", funcName)
|
qt.Printf("instant rollup optimization isn't implemented for %s()", funcName)
|
||||||
|
@ -1257,9 +1403,112 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeInstantValues calculates tssCached + tssStart - tssEnd
|
func hasDuplicateSeries(tss []*timeseries) bool {
|
||||||
func mergeInstantValues(qt *querytracer.Tracer, tssCached, tssStart, tssEnd []*timeseries) ([]*timeseries, error) {
|
if len(tss) <= 1 {
|
||||||
qt = qt.NewChild("merge instant values across series; cached=%d, start=%d, end=%d", len(tssCached), len(tssStart), len(tssEnd))
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(map[string]struct{}, len(tss))
|
||||||
|
bb := bbPool.Get()
|
||||||
|
defer bbPool.Put(bb)
|
||||||
|
|
||||||
|
for _, ts := range tss {
|
||||||
|
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||||
|
if _, ok := m[string(bb.B)]; ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
m[string(bb.B)] = struct{}{}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMinInstantValues(qt *querytracer.Tracer, tssCached, tssStart, tssEnd []*timeseries) ([]*timeseries, bool) {
|
||||||
|
qt = qt.NewChild("calculate the minimum for instant values across series; cached=%d, start=%d, end=%d", len(tssCached), len(tssStart), len(tssEnd))
|
||||||
|
defer qt.Done()
|
||||||
|
|
||||||
|
getMin := func(a, b float64) float64 {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
tss, ok := getMinMaxInstantValues(tssCached, tssStart, tssEnd, getMin)
|
||||||
|
qt.Printf("resulting series=%d; ok=%v", len(tss), ok)
|
||||||
|
return tss, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMaxInstantValues(qt *querytracer.Tracer, tssCached, tssStart, tssEnd []*timeseries) ([]*timeseries, bool) {
|
||||||
|
qt = qt.NewChild("calculate the maximum for instant values across series; cached=%d, start=%d, end=%d", len(tssCached), len(tssStart), len(tssEnd))
|
||||||
|
defer qt.Done()
|
||||||
|
|
||||||
|
getMax := func(a, b float64) float64 {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
tss, ok := getMinMaxInstantValues(tssCached, tssStart, tssEnd, getMax)
|
||||||
|
qt.Printf("resulting series=%d", len(tss))
|
||||||
|
return tss, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMinMaxInstantValues(tssCached, tssStart, tssEnd []*timeseries, f func(a, b float64) float64) ([]*timeseries, bool) {
|
||||||
|
assertInstantValues(tssCached)
|
||||||
|
assertInstantValues(tssStart)
|
||||||
|
assertInstantValues(tssEnd)
|
||||||
|
|
||||||
|
bb := bbPool.Get()
|
||||||
|
defer bbPool.Put(bb)
|
||||||
|
|
||||||
|
m := make(map[string]*timeseries, len(tssCached))
|
||||||
|
for _, ts := range tssCached {
|
||||||
|
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||||
|
if _, ok := m[string(bb.B)]; ok {
|
||||||
|
logger.Panicf("BUG: duplicate series found: %s", &ts.MetricName)
|
||||||
|
}
|
||||||
|
m[string(bb.B)] = ts
|
||||||
|
}
|
||||||
|
|
||||||
|
mStart := make(map[string]*timeseries, len(tssStart))
|
||||||
|
for _, ts := range tssStart {
|
||||||
|
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||||
|
if _, ok := m[string(bb.B)]; ok {
|
||||||
|
logger.Panicf("BUG: duplicate series found: %s", &ts.MetricName)
|
||||||
|
}
|
||||||
|
m[string(bb.B)] = ts
|
||||||
|
tsCached := m[string(bb.B)]
|
||||||
|
if tsCached != nil && !math.IsNaN(tsCached.Values[0]) {
|
||||||
|
if !math.IsNaN(ts.Values[0]) {
|
||||||
|
tsCached.Values[0] = f(ts.Values[0], tsCached.Values[0])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
m[string(bb.B)] = ts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ts := range tssEnd {
|
||||||
|
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||||
|
tsCached := m[string(bb.B)]
|
||||||
|
if tsCached != nil && !math.IsNaN(tsCached.Values[0]) && !math.IsNaN(ts.Values[0]) {
|
||||||
|
if ts.Values[0] == f(ts.Values[0], tsCached.Values[0]) {
|
||||||
|
tsStart := mStart[string(bb.B)]
|
||||||
|
if tsStart == nil || math.IsNaN(tsStart.Values[0]) || tsStart.Values[0] != f(ts.Values[0], tsStart.Values[0]) {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rvs := make([]*timeseries, 0, len(m))
|
||||||
|
for _, ts := range m {
|
||||||
|
rvs = append(rvs, ts)
|
||||||
|
}
|
||||||
|
return rvs, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSumInstantValues calculates tssCached + tssStart - tssEnd
|
||||||
|
func getSumInstantValues(qt *querytracer.Tracer, tssCached, tssStart, tssEnd []*timeseries) []*timeseries {
|
||||||
|
qt = qt.NewChild("calculate the sum for instant values across series; cached=%d, start=%d, end=%d", len(tssCached), len(tssStart), len(tssEnd))
|
||||||
defer qt.Done()
|
defer qt.Done()
|
||||||
|
|
||||||
assertInstantValues(tssCached)
|
assertInstantValues(tssCached)
|
||||||
|
@ -1272,8 +1521,8 @@ func mergeInstantValues(qt *querytracer.Tracer, tssCached, tssStart, tssEnd []*t
|
||||||
|
|
||||||
for _, ts := range tssCached {
|
for _, ts := range tssCached {
|
||||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||||
if tsExisting := m[string(bb.B)]; tsExisting != nil {
|
if _, ok := m[string(bb.B)]; ok {
|
||||||
return nil, fmt.Errorf("duplicate series found: %s", &ts.MetricName)
|
logger.Panicf("BUG: duplicate series found: %s", &ts.MetricName)
|
||||||
}
|
}
|
||||||
m[string(bb.B)] = ts
|
m[string(bb.B)] = ts
|
||||||
}
|
}
|
||||||
|
@ -1305,7 +1554,7 @@ func mergeInstantValues(qt *querytracer.Tracer, tssCached, tssStart, tssEnd []*t
|
||||||
rvs = append(rvs, ts)
|
rvs = append(rvs, ts)
|
||||||
}
|
}
|
||||||
qt.Printf("resulting series=%d", len(rvs))
|
qt.Printf("resulting series=%d", len(rvs))
|
||||||
return rvs, nil
|
return rvs
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertInstantValues(tss []*timeseries) {
|
func assertInstantValues(tss []*timeseries) {
|
||||||
|
|
|
@ -955,11 +955,11 @@ func newRollupHoltWinters(args []interface{}) (rollupFunc, error) {
|
||||||
return rfa.prevValue
|
return rfa.prevValue
|
||||||
}
|
}
|
||||||
sf := sfs[rfa.idx]
|
sf := sfs[rfa.idx]
|
||||||
if sf <= 0 || sf >= 1 {
|
if sf < 0 || sf > 1 {
|
||||||
return nan
|
return nan
|
||||||
}
|
}
|
||||||
tf := tfs[rfa.idx]
|
tf := tfs[rfa.idx]
|
||||||
if tf <= 0 || tf >= 1 {
|
if tf < 0 || tf > 1 {
|
||||||
return nan
|
return nan
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
||||||
"github.com/VictoriaMetrics/fastcache"
|
"github.com/VictoriaMetrics/fastcache"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
|
@ -205,7 +206,7 @@ func ResetRollupResultCache() {
|
||||||
func (rrc *rollupResultCache) GetInstantValues(qt *querytracer.Tracer, expr metricsql.Expr, window, step int64, etfss [][]storage.TagFilter) []*timeseries {
|
func (rrc *rollupResultCache) GetInstantValues(qt *querytracer.Tracer, expr metricsql.Expr, window, step int64, etfss [][]storage.TagFilter) []*timeseries {
|
||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := string(expr.AppendString(nil))
|
query := string(expr.AppendString(nil))
|
||||||
query = bytesutil.LimitStringLen(query, 300)
|
query = stringsutil.LimitStringLen(query, 300)
|
||||||
qt = qt.NewChild("rollup cache get instant values: query=%s, window=%d, step=%d", query, window, step)
|
qt = qt.NewChild("rollup cache get instant values: query=%s, window=%d, step=%d", query, window, step)
|
||||||
defer qt.Done()
|
defer qt.Done()
|
||||||
}
|
}
|
||||||
|
@ -227,7 +228,7 @@ func (rrc *rollupResultCache) GetInstantValues(qt *querytracer.Tracer, expr metr
|
||||||
func (rrc *rollupResultCache) PutInstantValues(qt *querytracer.Tracer, expr metricsql.Expr, window, step int64, etfss [][]storage.TagFilter, tss []*timeseries) {
|
func (rrc *rollupResultCache) PutInstantValues(qt *querytracer.Tracer, expr metricsql.Expr, window, step int64, etfss [][]storage.TagFilter, tss []*timeseries) {
|
||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := string(expr.AppendString(nil))
|
query := string(expr.AppendString(nil))
|
||||||
query = bytesutil.LimitStringLen(query, 300)
|
query = stringsutil.LimitStringLen(query, 300)
|
||||||
startStr := ""
|
startStr := ""
|
||||||
if len(tss) > 0 {
|
if len(tss) > 0 {
|
||||||
startStr = storage.TimestampToHumanReadableFormat(tss[0].Timestamps[0])
|
startStr = storage.TimestampToHumanReadableFormat(tss[0].Timestamps[0])
|
||||||
|
@ -260,7 +261,7 @@ func (rrc *rollupResultCache) DeleteInstantValues(qt *querytracer.Tracer, expr m
|
||||||
|
|
||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := string(expr.AppendString(nil))
|
query := string(expr.AppendString(nil))
|
||||||
query = bytesutil.LimitStringLen(query, 300)
|
query = stringsutil.LimitStringLen(query, 300)
|
||||||
qt.Printf("rollup result cache delete instant values: query=%s, window=%d, step=%d", query, window, step)
|
qt.Printf("rollup result cache delete instant values: query=%s, window=%d, step=%d", query, window, step)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -268,7 +269,7 @@ func (rrc *rollupResultCache) DeleteInstantValues(qt *querytracer.Tracer, expr m
|
||||||
func (rrc *rollupResultCache) GetSeries(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
|
func (rrc *rollupResultCache) GetSeries(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
|
||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := string(expr.AppendString(nil))
|
query := string(expr.AppendString(nil))
|
||||||
query = bytesutil.LimitStringLen(query, 300)
|
query = stringsutil.LimitStringLen(query, 300)
|
||||||
qt = qt.NewChild("rollup cache get series: query=%s, timeRange=%s, window=%d, step=%d", query, ec.timeRangeString(), window, ec.Step)
|
qt = qt.NewChild("rollup cache get series: query=%s, timeRange=%s, window=%d, step=%d", query, ec.timeRangeString(), window, ec.Step)
|
||||||
defer qt.Done()
|
defer qt.Done()
|
||||||
}
|
}
|
||||||
|
@ -353,7 +354,7 @@ var resultBufPool bytesutil.ByteBufferPool
|
||||||
func (rrc *rollupResultCache) PutSeries(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
|
func (rrc *rollupResultCache) PutSeries(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
|
||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := string(expr.AppendString(nil))
|
query := string(expr.AppendString(nil))
|
||||||
query = bytesutil.LimitStringLen(query, 300)
|
query = stringsutil.LimitStringLen(query, 300)
|
||||||
qt = qt.NewChild("rollup cache put series: query=%s, timeRange=%s, step=%d, window=%d, series=%d", query, ec.timeRangeString(), ec.Step, window, len(tss))
|
qt = qt.NewChild("rollup cache put series: query=%s, timeRange=%s, step=%d, window=%d, series=%d", query, ec.timeRangeString(), ec.Step, window, len(tss))
|
||||||
defer qt.Done()
|
defer qt.Done()
|
||||||
}
|
}
|
||||||
|
|
|
@ -470,12 +470,12 @@ func TestRollupHoltWinters(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
f(-1, 0.5, nan)
|
f(-1, 0.5, nan)
|
||||||
f(0, 0.5, nan)
|
f(0, 0.5, -856)
|
||||||
f(1, 0.5, nan)
|
f(1, 0.5, 34)
|
||||||
f(2, 0.5, nan)
|
f(2, 0.5, nan)
|
||||||
f(0.5, -1, nan)
|
f(0.5, -1, nan)
|
||||||
f(0.5, 0, nan)
|
f(0.5, 0, -54.1474609375)
|
||||||
f(0.5, 1, nan)
|
f(0.5, 1, 25.25)
|
||||||
f(0.5, 2, nan)
|
f(0.5, 2, nan)
|
||||||
f(0.5, 0.5, 34.97794532775879)
|
f(0.5, 0.5, 34.97794532775879)
|
||||||
f(0.1, 0.5, -131.30529492371622)
|
f(0.1, 0.5, -131.30529492371622)
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build go1.15
|
//go:build go1.15
|
||||||
// +build go1.15
|
|
||||||
|
|
||||||
package promql
|
package promql
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
{
|
{
|
||||||
"files": {
|
"files": {
|
||||||
"main.css": "./static/css/main.b863450b.css",
|
"main.css": "./static/css/main.349e6522.css",
|
||||||
"main.js": "./static/js/main.5566464c.js",
|
"main.js": "./static/js/main.c93073e5.js",
|
||||||
"static/js/522.da77e7b3.chunk.js": "./static/js/522.da77e7b3.chunk.js",
|
"static/js/522.da77e7b3.chunk.js": "./static/js/522.da77e7b3.chunk.js",
|
||||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.8644fd7c964802dd34a9.md",
|
"static/media/MetricsQL.md": "./static/media/MetricsQL.8644fd7c964802dd34a9.md",
|
||||||
"index.html": "./index.html"
|
"index.html": "./index.html"
|
||||||
},
|
},
|
||||||
"entrypoints": [
|
"entrypoints": [
|
||||||
"static/css/main.b863450b.css",
|
"static/css/main.349e6522.css",
|
||||||
"static/js/main.5566464c.js"
|
"static/js/main.c93073e5.js"
|
||||||
]
|
]
|
||||||
}
|
}
|
Binary file not shown.
Before Width: | Height: | Size: 15 KiB |
|
@ -1 +1 @@
|
||||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.5566464c.js"></script><link href="./static/css/main.b863450b.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.c93073e5.js"></script><link href="./static/css/main.349e6522.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
1
app/vmselect/vmui/static/css/main.349e6522.css
Normal file
1
app/vmselect/vmui/static/css/main.349e6522.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/main.c93073e5.js
Normal file
2
app/vmselect/vmui/static/js/main.c93073e5.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -7,7 +7,7 @@
|
||||||
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @remix-run/router v1.7.2
|
* @remix-run/router v1.10.0
|
||||||
*
|
*
|
||||||
* Copyright (c) Remix Software Inc.
|
* Copyright (c) Remix Software Inc.
|
||||||
*
|
*
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* React Router DOM v6.14.2
|
* React Router DOM v6.17.0
|
||||||
*
|
*
|
||||||
* Copyright (c) Remix Software Inc.
|
* Copyright (c) Remix Software Inc.
|
||||||
*
|
*
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* React Router v6.14.2
|
* React Router v6.17.0
|
||||||
*
|
*
|
||||||
* Copyright (c) Remix Software Inc.
|
* Copyright (c) Remix Software Inc.
|
||||||
*
|
*
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.21.3 as build-web-stage
|
FROM golang:1.21.4 as build-web-stage
|
||||||
COPY build /build
|
COPY build /build
|
||||||
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
576
app/vmui/packages/vmui/package-lock.json
generated
576
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
Binary file not shown.
Before Width: | Height: | Size: 15 KiB |
|
@ -9,6 +9,9 @@ import { TuneIcon } from "../../Main/Icons";
|
||||||
import Button from "../../Main/Button/Button";
|
import Button from "../../Main/Button/Button";
|
||||||
import classNames from "classnames";
|
import classNames from "classnames";
|
||||||
import useBoolean from "../../../hooks/useBoolean";
|
import useBoolean from "../../../hooks/useBoolean";
|
||||||
|
import useEventListener from "../../../hooks/useEventListener";
|
||||||
|
import Tooltip from "../../Main/Tooltip/Tooltip";
|
||||||
|
import { AUTOCOMPLETE_KEY } from "../../Main/ShortcutKeys/constants/keyList";
|
||||||
|
|
||||||
const AdditionalSettingsControls: FC<{isMobile?: boolean}> = ({ isMobile }) => {
|
const AdditionalSettingsControls: FC<{isMobile?: boolean}> = ({ isMobile }) => {
|
||||||
const { autocomplete } = useQueryState();
|
const { autocomplete } = useQueryState();
|
||||||
|
@ -29,6 +32,16 @@ const AdditionalSettingsControls: FC<{isMobile?: boolean}> = ({ isMobile }) => {
|
||||||
queryDispatch({ type: "TOGGLE_AUTOCOMPLETE" });
|
queryDispatch({ type: "TOGGLE_AUTOCOMPLETE" });
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const handleKeyDown = (e: KeyboardEvent) => {
|
||||||
|
const { key, ctrlKey, metaKey, shiftKey } = e;
|
||||||
|
if (key === "a" && shiftKey && (ctrlKey || metaKey)) {
|
||||||
|
e.preventDefault();
|
||||||
|
onChangeAutocomplete();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
useEventListener("keydown", handleKeyDown);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
className={classNames({
|
className={classNames({
|
||||||
|
@ -36,12 +49,14 @@ const AdditionalSettingsControls: FC<{isMobile?: boolean}> = ({ isMobile }) => {
|
||||||
"vm-additional-settings_mobile": isMobile
|
"vm-additional-settings_mobile": isMobile
|
||||||
})}
|
})}
|
||||||
>
|
>
|
||||||
<Switch
|
<Tooltip title={AUTOCOMPLETE_KEY}>
|
||||||
label={"Autocomplete"}
|
<Switch
|
||||||
value={autocomplete}
|
label={"Autocomplete"}
|
||||||
onChange={onChangeAutocomplete}
|
value={autocomplete}
|
||||||
fullWidth={isMobile}
|
onChange={onChangeAutocomplete}
|
||||||
/>
|
fullWidth={isMobile}
|
||||||
|
/>
|
||||||
|
</Tooltip>
|
||||||
<Switch
|
<Switch
|
||||||
label={"Disable cache"}
|
label={"Disable cache"}
|
||||||
value={nocache}
|
value={nocache}
|
||||||
|
|
|
@ -12,7 +12,8 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
&__inputs {
|
&__inputs {
|
||||||
display: flex;
|
display: grid;
|
||||||
|
grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
|
||||||
flex-wrap: wrap;
|
flex-wrap: wrap;
|
||||||
align-items: center;
|
align-items: center;
|
||||||
justify-content: space-between;
|
justify-content: space-between;
|
||||||
|
@ -21,9 +22,5 @@
|
||||||
&_mobile {
|
&_mobile {
|
||||||
gap: $padding-small;
|
gap: $padding-small;
|
||||||
}
|
}
|
||||||
|
|
||||||
div {
|
|
||||||
flex-grow: 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ with (q = ${queryBase}) (
|
||||||
)`;
|
)`;
|
||||||
}, [name, job, instance, rateEnabled, isBucket]);
|
}, [name, job, instance, rateEnabled, isBucket]);
|
||||||
|
|
||||||
const { isLoading, graphData, error, warning, isHistogram } = useFetchQuery({
|
const { isLoading, graphData, error, queryErrors, warning, isHistogram } = useFetchQuery({
|
||||||
predefinedQuery: [query],
|
predefinedQuery: [query],
|
||||||
visible: true,
|
visible: true,
|
||||||
customStep: step,
|
customStep: step,
|
||||||
|
@ -98,6 +98,7 @@ with (q = ${queryBase}) (
|
||||||
>
|
>
|
||||||
{isLoading && <Spinner />}
|
{isLoading && <Spinner />}
|
||||||
{error && <Alert variant="error">{error}</Alert>}
|
{error && <Alert variant="error">{error}</Alert>}
|
||||||
|
{queryErrors[0] && <Alert variant="error">{queryErrors[0]}</Alert>}
|
||||||
{warning && <Alert variant="warning">
|
{warning && <Alert variant="warning">
|
||||||
<div className="vm-explore-metrics-graph__warning">
|
<div className="vm-explore-metrics-graph__warning">
|
||||||
<p>{warning}</p>
|
<p>{warning}</p>
|
||||||
|
|
|
@ -5,6 +5,8 @@ import GraphTips from "../../../Chart/GraphTips/GraphTips";
|
||||||
|
|
||||||
const ctrlMeta = <code>{isMacOs() ? "Cmd" : "Ctrl"}</code>;
|
const ctrlMeta = <code>{isMacOs() ? "Cmd" : "Ctrl"}</code>;
|
||||||
|
|
||||||
|
export const AUTOCOMPLETE_KEY = <>{ctrlMeta} + <code>Shift</code> + <code>A</code></>;
|
||||||
|
|
||||||
const keyList = [
|
const keyList = [
|
||||||
{
|
{
|
||||||
title: "Query",
|
title: "Query",
|
||||||
|
@ -28,6 +30,10 @@ const keyList = [
|
||||||
{
|
{
|
||||||
keys: <>{ctrlMeta} + <code>click</code> by <VisibilityIcon/></>,
|
keys: <>{ctrlMeta} + <code>click</code> by <VisibilityIcon/></>,
|
||||||
description: "Toggle multiple queries"
|
description: "Toggle multiple queries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
keys: AUTOCOMPLETE_KEY,
|
||||||
|
description: "Toggle autocomplete"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
|
@ -9,11 +9,13 @@
|
||||||
&_textarea:after {
|
&_textarea:after {
|
||||||
content: attr(data-replicated-value) " ";
|
content: attr(data-replicated-value) " ";
|
||||||
white-space: pre-wrap;
|
white-space: pre-wrap;
|
||||||
|
word-wrap: break-word;
|
||||||
visibility: hidden;
|
visibility: hidden;
|
||||||
}
|
}
|
||||||
|
|
||||||
&__input,
|
&__input,
|
||||||
&::after {
|
&::after {
|
||||||
|
font-family: $font-family-monospace;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
padding: $padding-small $padding-global;
|
padding: $padding-small $padding-global;
|
||||||
border: $border-divider;
|
border: $border-divider;
|
||||||
|
@ -22,6 +24,7 @@
|
||||||
line-height: 18px;
|
line-height: 18px;
|
||||||
grid-area: 1 / 1 / 2 / 2;
|
grid-area: 1 / 1 / 2 / 2;
|
||||||
overflow: hidden;
|
overflow: hidden;
|
||||||
|
box-sizing: border-box;
|
||||||
}
|
}
|
||||||
|
|
||||||
&__label,
|
&__label,
|
||||||
|
@ -84,7 +87,6 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
&__input {
|
&__input {
|
||||||
font-family: $font-family-monospace;
|
|
||||||
display: block;
|
display: block;
|
||||||
border-radius: $border-radius-small;
|
border-radius: $border-radius-small;
|
||||||
transition: border 200ms ease;
|
transition: border 200ms ease;
|
||||||
|
|
|
@ -1,10 +1,17 @@
|
||||||
import { Order } from "../../pages/CardinalityPanel/Table/types";
|
import { Order } from "../../pages/CardinalityPanel/Table/types";
|
||||||
|
import dayjs from "dayjs";
|
||||||
|
|
||||||
|
const dateColumns = ["date", "timestamp", "time"];
|
||||||
|
|
||||||
export function descendingComparator<T>(a: T, b: T, orderBy: keyof T) {
|
export function descendingComparator<T>(a: T, b: T, orderBy: keyof T) {
|
||||||
if (b[orderBy] < a[orderBy]) {
|
const valueA = a[orderBy];
|
||||||
|
const valueB = b[orderBy];
|
||||||
|
const parsedValueA = dateColumns.includes(`${orderBy}`) ? dayjs(`${valueA}`).unix() : valueA;
|
||||||
|
const parsedValueB = dateColumns.includes(`${orderBy}`) ? dayjs(`${valueB}`).unix() : valueB;
|
||||||
|
if (parsedValueB < parsedValueA) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (b[orderBy] > a[orderBy]) {
|
if (parsedValueB > parsedValueA) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1,8 +1,12 @@
|
||||||
import React, { useEffect, useState } from "preact/compat";
|
import React, { useEffect, useState, useRef } from "preact/compat";
|
||||||
import { StateUpdater } from "preact/hooks";
|
import { StateUpdater } from "preact/hooks";
|
||||||
import { useAppState } from "../state/common/StateContext";
|
import { useAppState } from "../state/common/StateContext";
|
||||||
import { AutocompleteOptions } from "../components/Main/Autocomplete/Autocomplete";
|
import { AutocompleteOptions } from "../components/Main/Autocomplete/Autocomplete";
|
||||||
import { LabelIcon, MetricIcon, ValueIcon } from "../components/Main/Icons";
|
import { LabelIcon, MetricIcon, ValueIcon } from "../components/Main/Icons";
|
||||||
|
import { useTimeState } from "../state/time/TimeStateContext";
|
||||||
|
import { useCallback } from "react";
|
||||||
|
import qs from "qs";
|
||||||
|
import dayjs from "dayjs";
|
||||||
|
|
||||||
enum TypeData {
|
enum TypeData {
|
||||||
metric,
|
metric,
|
||||||
|
@ -11,9 +15,10 @@ enum TypeData {
|
||||||
}
|
}
|
||||||
|
|
||||||
type FetchDataArgs = {
|
type FetchDataArgs = {
|
||||||
url: string;
|
urlSuffix: string;
|
||||||
setter: StateUpdater<AutocompleteOptions[]>;
|
setter: StateUpdater<AutocompleteOptions[]>;
|
||||||
type: TypeData;
|
type: TypeData;
|
||||||
|
params?: URLSearchParams;
|
||||||
}
|
}
|
||||||
|
|
||||||
const icons = {
|
const icons = {
|
||||||
|
@ -22,16 +27,39 @@ const icons = {
|
||||||
[TypeData.value]: <ValueIcon />,
|
[TypeData.value]: <ValueIcon />,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const QUERY_LIMIT = 1000;
|
||||||
|
|
||||||
export const useFetchQueryOptions = ({ metric, label }: { metric: string; label: string }) => {
|
export const useFetchQueryOptions = ({ metric, label }: { metric: string; label: string }) => {
|
||||||
const { serverUrl } = useAppState();
|
const { serverUrl } = useAppState();
|
||||||
|
const { period: { start, end } } = useTimeState();
|
||||||
|
|
||||||
const [metrics, setMetrics] = useState<AutocompleteOptions[]>([]);
|
const [metrics, setMetrics] = useState<AutocompleteOptions[]>([]);
|
||||||
const [labels, setLabels] = useState<AutocompleteOptions[]>([]);
|
const [labels, setLabels] = useState<AutocompleteOptions[]>([]);
|
||||||
const [values, setValues] = useState<AutocompleteOptions[]>([]);
|
const [values, setValues] = useState<AutocompleteOptions[]>([]);
|
||||||
|
|
||||||
const fetchData = async ({ url, setter, type, }: FetchDataArgs) => {
|
const prevParams = useRef<Record<string, URLSearchParams>>({});
|
||||||
|
|
||||||
|
const getQueryParams = useCallback((params?: Record<string, string>) => {
|
||||||
|
const roundedStart = dayjs(start).startOf("day").valueOf();
|
||||||
|
const roundedEnd = dayjs(end).endOf("day").valueOf();
|
||||||
|
|
||||||
|
return new URLSearchParams({
|
||||||
|
...(params || {}),
|
||||||
|
limit: `${QUERY_LIMIT}`,
|
||||||
|
start: `${roundedStart}`,
|
||||||
|
end: `${roundedEnd}`
|
||||||
|
});
|
||||||
|
}, [start, end]);
|
||||||
|
|
||||||
|
const isParamsEqual = (prev: URLSearchParams, next: URLSearchParams) => {
|
||||||
|
const queryNext = qs.parse(next.toString());
|
||||||
|
const queryPrev = qs.parse(prev.toString());
|
||||||
|
return JSON.stringify(queryPrev) === JSON.stringify(queryNext);
|
||||||
|
};
|
||||||
|
|
||||||
|
const fetchData = async ({ urlSuffix, setter, type, params }: FetchDataArgs) => {
|
||||||
try {
|
try {
|
||||||
const response = await fetch(url);
|
const response = await fetch(`${serverUrl}/api/v1/${urlSuffix}?${params}`);
|
||||||
if (response.ok) {
|
if (response.ok) {
|
||||||
const { data } = await response.json() as { data: string[] };
|
const { data } = await response.json() as { data: string[] };
|
||||||
setter(data.map(l => ({
|
setter(data.map(l => ({
|
||||||
|
@ -51,12 +79,19 @@ export const useFetchQueryOptions = ({ metric, label }: { metric: string; label:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const params = getQueryParams();
|
||||||
|
const prev = prevParams.current.metrics || new URLSearchParams({});
|
||||||
|
if (isParamsEqual(params, prev)) return;
|
||||||
|
|
||||||
fetchData({
|
fetchData({
|
||||||
url: `${serverUrl}/api/v1/label/__name__/values`,
|
urlSuffix: "label/__name__/values",
|
||||||
setter: setMetrics,
|
setter: setMetrics,
|
||||||
type: TypeData.metric
|
type: TypeData.metric,
|
||||||
|
params
|
||||||
});
|
});
|
||||||
}, [serverUrl]);
|
|
||||||
|
prevParams.current = { ...prevParams.current, metrics: params };
|
||||||
|
}, [serverUrl, getQueryParams]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const notFoundMetric = !metrics.find(m => m.value === metric);
|
const notFoundMetric = !metrics.find(m => m.value === metric);
|
||||||
|
@ -65,12 +100,19 @@ export const useFetchQueryOptions = ({ metric, label }: { metric: string; label:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const params = getQueryParams({ "match[]": metric });
|
||||||
|
const prev = prevParams.current.labels || new URLSearchParams({});
|
||||||
|
if (isParamsEqual(params, prev)) return;
|
||||||
|
|
||||||
fetchData({
|
fetchData({
|
||||||
url: `${serverUrl}/api/v1/labels?match[]=${metric}`,
|
urlSuffix: "labels",
|
||||||
setter: setLabels,
|
setter: setLabels,
|
||||||
type: TypeData.label
|
type: TypeData.label,
|
||||||
|
params
|
||||||
});
|
});
|
||||||
}, [serverUrl, metric]);
|
|
||||||
|
prevParams.current = { ...prevParams.current, labels: params };
|
||||||
|
}, [serverUrl, metric, getQueryParams]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const notFoundMetric = !metrics.find(m => m.value === metric);
|
const notFoundMetric = !metrics.find(m => m.value === metric);
|
||||||
|
@ -80,12 +122,19 @@ export const useFetchQueryOptions = ({ metric, label }: { metric: string; label:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const params = getQueryParams({ "match[]": metric });
|
||||||
|
const prev = prevParams.current.values || new URLSearchParams({});
|
||||||
|
if (isParamsEqual(params, prev)) return;
|
||||||
|
|
||||||
fetchData({
|
fetchData({
|
||||||
url: `${serverUrl}/api/v1/label/${label}/values?match[]=${metric}`,
|
urlSuffix: `label/${label}/values`,
|
||||||
setter: setValues,
|
setter: setValues,
|
||||||
type: TypeData.value
|
type: TypeData.value,
|
||||||
|
params
|
||||||
});
|
});
|
||||||
}, [serverUrl, metric, label]);
|
|
||||||
|
prevParams.current = { ...prevParams.current, values: params };
|
||||||
|
}, [serverUrl, metric, label, getQueryParams]);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
metrics,
|
metrics,
|
||||||
|
|
|
@ -1,12 +1,9 @@
|
||||||
import React, { FC } from "preact/compat";
|
import React, { FC, memo } from "preact/compat";
|
||||||
import dayjs from "dayjs";
|
|
||||||
import "./style.scss";
|
|
||||||
import { CodeIcon, IssueIcon, LogoShortIcon, WikiIcon } from "../../components/Main/Icons";
|
import { CodeIcon, IssueIcon, LogoShortIcon, WikiIcon } from "../../components/Main/Icons";
|
||||||
import useDeviceDetect from "../../hooks/useDeviceDetect";
|
import "./style.scss";
|
||||||
|
|
||||||
const Footer: FC = () => {
|
const Footer: FC = memo(() => {
|
||||||
const { isMobile } = useDeviceDetect();
|
const copyrightYears = `2019-${new Date().getFullYear()}`;
|
||||||
const copyrightYears = `2019-${dayjs().format("YYYY")}`;
|
|
||||||
|
|
||||||
return <footer className="vm-footer">
|
return <footer className="vm-footer">
|
||||||
<a
|
<a
|
||||||
|
@ -34,7 +31,7 @@ const Footer: FC = () => {
|
||||||
rel="help noreferrer"
|
rel="help noreferrer"
|
||||||
>
|
>
|
||||||
<WikiIcon/>
|
<WikiIcon/>
|
||||||
{isMobile ? "Docs" : "Documentation"}
|
Documentation
|
||||||
</a>
|
</a>
|
||||||
<a
|
<a
|
||||||
className="vm-link vm-footer__link"
|
className="vm-link vm-footer__link"
|
||||||
|
@ -43,12 +40,12 @@ const Footer: FC = () => {
|
||||||
rel="noreferrer"
|
rel="noreferrer"
|
||||||
>
|
>
|
||||||
<IssueIcon/>
|
<IssueIcon/>
|
||||||
{isMobile ? "New issue" : "Create an issue"}
|
Create an issue
|
||||||
</a>
|
</a>
|
||||||
<div className="vm-footer__copyright">
|
<div className="vm-footer__copyright">
|
||||||
© {copyrightYears} VictoriaMetrics
|
© {copyrightYears} VictoriaMetrics
|
||||||
</div>
|
</div>
|
||||||
</footer>;
|
</footer>;
|
||||||
};
|
});
|
||||||
|
|
||||||
export default Footer;
|
export default Footer;
|
||||||
|
|
|
@ -8,6 +8,7 @@ import { useSearchParams } from "react-router-dom";
|
||||||
import dayjs from "dayjs";
|
import dayjs from "dayjs";
|
||||||
import { DATE_FORMAT } from "../../../constants/date";
|
import { DATE_FORMAT } from "../../../constants/date";
|
||||||
import { getTenantIdFromUrl } from "../../../utils/tenants";
|
import { getTenantIdFromUrl } from "../../../utils/tenants";
|
||||||
|
import usePrevious from "../../../hooks/usePrevious";
|
||||||
|
|
||||||
export const useFetchQuery = (): {
|
export const useFetchQuery = (): {
|
||||||
fetchUrl?: string[],
|
fetchUrl?: string[],
|
||||||
|
@ -23,6 +24,7 @@ export const useFetchQuery = (): {
|
||||||
const focusLabel = searchParams.get("focusLabel");
|
const focusLabel = searchParams.get("focusLabel");
|
||||||
const topN = +(searchParams.get("topN") || 10);
|
const topN = +(searchParams.get("topN") || 10);
|
||||||
const date = searchParams.get("date") || dayjs().tz().format(DATE_FORMAT);
|
const date = searchParams.get("date") || dayjs().tz().format(DATE_FORMAT);
|
||||||
|
const prevDate = usePrevious(date);
|
||||||
|
|
||||||
const { serverUrl } = useAppState();
|
const { serverUrl } = useAppState();
|
||||||
const [isLoading, setIsLoading] = useState(false);
|
const [isLoading, setIsLoading] = useState(false);
|
||||||
|
@ -76,11 +78,14 @@ export const useFetchQuery = (): {
|
||||||
const urls = [
|
const urls = [
|
||||||
getCardinalityInfo(serverUrl, requestParams),
|
getCardinalityInfo(serverUrl, requestParams),
|
||||||
getCardinalityInfo(serverUrl, prevDayParams),
|
getCardinalityInfo(serverUrl, prevDayParams),
|
||||||
getCardinalityInfo(serverUrl, totalParams),
|
|
||||||
];
|
];
|
||||||
|
|
||||||
|
if (prevDate !== date) {
|
||||||
|
urls.push(getCardinalityInfo(serverUrl, totalParams));
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const [resp, respPrev, respTotals] = await Promise.all(urls.map(getResponseJson));
|
const [resp, respPrev, respTotals = {}] = await Promise.all(urls.map(getResponseJson));
|
||||||
|
|
||||||
const prevResult = { ...respPrev.data };
|
const prevResult = { ...respPrev.data };
|
||||||
const { data: dataTotal } = respTotals;
|
const { data: dataTotal } = respTotals;
|
||||||
|
@ -90,7 +95,7 @@ export const useFetchQuery = (): {
|
||||||
totalLabelValuePairs: resp.data?.totalLabelValuePairs || resp.data?.headStats?.numLabelValuePairs || 0,
|
totalLabelValuePairs: resp.data?.totalLabelValuePairs || resp.data?.headStats?.numLabelValuePairs || 0,
|
||||||
seriesCountByLabelName: resp.data?.seriesCountByLabelName || [],
|
seriesCountByLabelName: resp.data?.seriesCountByLabelName || [],
|
||||||
seriesCountByFocusLabelValue: resp.data?.seriesCountByFocusLabelValue || [],
|
seriesCountByFocusLabelValue: resp.data?.seriesCountByFocusLabelValue || [],
|
||||||
totalSeriesByAll: dataTotal?.totalSeries || dataTotal?.headStats?.numSeries || 0,
|
totalSeriesByAll: dataTotal?.totalSeries || dataTotal?.headStats?.numSeries || tsdbStatus.totalSeriesByAll || 0,
|
||||||
totalSeriesPrev: prevResult?.totalSeries || prevResult?.headStats?.numSeries || 0,
|
totalSeriesPrev: prevResult?.totalSeries || prevResult?.headStats?.numSeries || 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -112,6 +112,7 @@ const QueryHistory: FC<Props> = ({ handleSelectQuery }) => {
|
||||||
variant="text"
|
variant="text"
|
||||||
onClick={handleOpenModal}
|
onClick={handleOpenModal}
|
||||||
startIcon={<ClockIcon/>}
|
startIcon={<ClockIcon/>}
|
||||||
|
ariaLabel={"Show history"}
|
||||||
/>
|
/>
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,8 @@ import Alert from "../../components/Main/Alert/Alert";
|
||||||
import ExploreLogsHeader from "./ExploreLogsHeader/ExploreLogsHeader";
|
import ExploreLogsHeader from "./ExploreLogsHeader/ExploreLogsHeader";
|
||||||
import "./style.scss";
|
import "./style.scss";
|
||||||
import usePrevious from "../../hooks/usePrevious";
|
import usePrevious from "../../hooks/usePrevious";
|
||||||
|
import { ErrorTypes } from "../../types";
|
||||||
|
import { useState } from "react";
|
||||||
|
|
||||||
const ExploreLogs: FC = () => {
|
const ExploreLogs: FC = () => {
|
||||||
const { serverUrl } = useAppState();
|
const { serverUrl } = useAppState();
|
||||||
|
@ -17,9 +19,18 @@ const ExploreLogs: FC = () => {
|
||||||
const [query, setQuery] = useStateSearchParams("", "query");
|
const [query, setQuery] = useStateSearchParams("", "query");
|
||||||
const prevQuery = usePrevious(query);
|
const prevQuery = usePrevious(query);
|
||||||
const { logs, isLoading, error, fetchLogs } = useFetchLogs(serverUrl, query);
|
const { logs, isLoading, error, fetchLogs } = useFetchLogs(serverUrl, query);
|
||||||
|
const [queryError, setQueryError] = useState<ErrorTypes | string>("");
|
||||||
|
const [loaded, isLoaded] = useState(false);
|
||||||
|
|
||||||
const handleRunQuery = () => {
|
const handleRunQuery = () => {
|
||||||
fetchLogs();
|
if (!query) {
|
||||||
|
setQueryError(ErrorTypes.validQuery);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
fetchLogs().then(() => {
|
||||||
|
isLoaded(true);
|
||||||
|
});
|
||||||
const changedQuery = prevQuery && query !== prevQuery;
|
const changedQuery = prevQuery && query !== prevQuery;
|
||||||
const params: Record<string, string | number> = changedQuery ? { query, page: 1 } : { query };
|
const params: Record<string, string | number> = changedQuery ? { query, page: 1 } : { query };
|
||||||
setSearchParamsFromKeys(params);
|
setSearchParamsFromKeys(params);
|
||||||
|
@ -29,16 +40,24 @@ const ExploreLogs: FC = () => {
|
||||||
if (query) handleRunQuery();
|
if (query) handleRunQuery();
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
setQueryError("");
|
||||||
|
}, [query]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="vm-explore-logs">
|
<div className="vm-explore-logs">
|
||||||
<ExploreLogsHeader
|
<ExploreLogsHeader
|
||||||
query={query}
|
query={query}
|
||||||
|
error={queryError}
|
||||||
onChange={setQuery}
|
onChange={setQuery}
|
||||||
onRun={handleRunQuery}
|
onRun={handleRunQuery}
|
||||||
/>
|
/>
|
||||||
{isLoading && <Spinner />}
|
{isLoading && <Spinner />}
|
||||||
{error && <Alert variant="error">{error}</Alert>}
|
{error && <Alert variant="error">{error}</Alert>}
|
||||||
<ExploreLogsBody data={logs}/>
|
<ExploreLogsBody
|
||||||
|
data={logs}
|
||||||
|
loaded={loaded}
|
||||||
|
/>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
|
@ -18,7 +18,8 @@ import TableLogs from "./TableLogs";
|
||||||
import GroupLogs from "./GroupLogs";
|
import GroupLogs from "./GroupLogs";
|
||||||
|
|
||||||
export interface ExploreLogBodyProps {
|
export interface ExploreLogBodyProps {
|
||||||
data: Logs[]
|
data: Logs[];
|
||||||
|
loaded?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum DisplayType {
|
enum DisplayType {
|
||||||
|
@ -33,7 +34,7 @@ const tabs = [
|
||||||
{ label: "JSON", value: DisplayType.json, icon: <CodeIcon /> },
|
{ label: "JSON", value: DisplayType.json, icon: <CodeIcon /> },
|
||||||
];
|
];
|
||||||
|
|
||||||
const ExploreLogsBody: FC<ExploreLogBodyProps> = ({ data }) => {
|
const ExploreLogsBody: FC<ExploreLogBodyProps> = ({ data, loaded }) => {
|
||||||
const { isMobile } = useDeviceDetect();
|
const { isMobile } = useDeviceDetect();
|
||||||
const { timezone } = useTimeState();
|
const { timezone } = useTimeState();
|
||||||
const { setSearchParamsFromKeys } = useSearchParamsFromObject();
|
const { setSearchParamsFromKeys } = useSearchParamsFromObject();
|
||||||
|
@ -117,6 +118,11 @@ const ExploreLogsBody: FC<ExploreLogBodyProps> = ({ data }) => {
|
||||||
"vm-explore-logs-body__table_mobile": isMobile,
|
"vm-explore-logs-body__table_mobile": isMobile,
|
||||||
})}
|
})}
|
||||||
>
|
>
|
||||||
|
{!data.length && (
|
||||||
|
<div className="vm-explore-logs-body__empty">
|
||||||
|
{loaded ? "No logs found" : "Run query to see logs"}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
{!!data.length && (
|
{!!data.length && (
|
||||||
<>
|
<>
|
||||||
{activeTab === DisplayType.table && (
|
{activeTab === DisplayType.table && (
|
||||||
|
|
|
@ -15,6 +15,15 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
&__empty {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
min-height: 120px;
|
||||||
|
color: $color-text-disabled;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
&__table {
|
&__table {
|
||||||
padding-top: $padding-medium;
|
padding-top: $padding-medium;
|
||||||
width: calc(100vw - ($padding-medium * 4) - var(--scrollbar-width));
|
width: calc(100vw - ($padding-medium * 4) - var(--scrollbar-width));
|
||||||
|
|
|
@ -8,11 +8,12 @@ import QueryEditor from "../../../components/Configurators/QueryEditor/QueryEdit
|
||||||
|
|
||||||
export interface ExploreLogHeaderProps {
|
export interface ExploreLogHeaderProps {
|
||||||
query: string;
|
query: string;
|
||||||
|
error?: string;
|
||||||
onChange: (val: string) => void;
|
onChange: (val: string) => void;
|
||||||
onRun: () => void;
|
onRun: () => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
const ExploreLogsHeader: FC<ExploreLogHeaderProps> = ({ query, onChange, onRun }) => {
|
const ExploreLogsHeader: FC<ExploreLogHeaderProps> = ({ query, error, onChange, onRun }) => {
|
||||||
const { isMobile } = useDeviceDetect();
|
const { isMobile } = useDeviceDetect();
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
@ -32,6 +33,7 @@ const ExploreLogsHeader: FC<ExploreLogHeaderProps> = ({ query, onChange, onRun }
|
||||||
onEnter={onRun}
|
onEnter={onRun}
|
||||||
onChange={onChange}
|
onChange={onChange}
|
||||||
label={"Log query"}
|
label={"Log query"}
|
||||||
|
error={error}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
<div className="vm-explore-logs-header-bottom">
|
<div className="vm-explore-logs-header-bottom">
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { getQueryStringValue } from "../../utils/query-string";
|
||||||
import { getFromStorage, saveToStorage } from "../../utils/storage";
|
import { getFromStorage, saveToStorage } from "../../utils/storage";
|
||||||
import { Theme } from "../../types";
|
import { Theme } from "../../types";
|
||||||
import { isDarkTheme } from "../../utils/theme";
|
import { isDarkTheme } from "../../utils/theme";
|
||||||
|
import { removeTrailingSlash } from "../../utils/url";
|
||||||
|
|
||||||
export interface AppState {
|
export interface AppState {
|
||||||
serverUrl: string;
|
serverUrl: string;
|
||||||
|
@ -20,7 +21,7 @@ export type Action =
|
||||||
const tenantId = getQueryStringValue("g0.tenantID", "") as string;
|
const tenantId = getQueryStringValue("g0.tenantID", "") as string;
|
||||||
|
|
||||||
export const initialState: AppState = {
|
export const initialState: AppState = {
|
||||||
serverUrl: getDefaultServer(tenantId),
|
serverUrl: removeTrailingSlash(getDefaultServer(tenantId)),
|
||||||
tenantId,
|
tenantId,
|
||||||
theme: (getFromStorage("THEME") || Theme.system) as Theme,
|
theme: (getFromStorage("THEME") || Theme.system) as Theme,
|
||||||
isDarkTheme: null
|
isDarkTheme: null
|
||||||
|
@ -31,7 +32,7 @@ export function reducer(state: AppState, action: Action): AppState {
|
||||||
case "SET_SERVER":
|
case "SET_SERVER":
|
||||||
return {
|
return {
|
||||||
...state,
|
...state,
|
||||||
serverUrl: action.payload
|
serverUrl: removeTrailingSlash(action.payload)
|
||||||
};
|
};
|
||||||
case "SET_TENANT_ID":
|
case "SET_TENANT_ID":
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -8,4 +8,6 @@ export const isValidHttpUrl = (str: string): boolean => {
|
||||||
}
|
}
|
||||||
|
|
||||||
return url.protocol === "http:" || url.protocol === "https:";
|
return url.protocol === "http:" || url.protocol === "https:";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export const removeTrailingSlash = (url: string) => url.replace(/\/$/, "");
|
||||||
|
|
|
@ -5,12 +5,14 @@ DOCKER_NAMESPACE ?= victoriametrics
|
||||||
ROOT_IMAGE ?= alpine:3.18.4
|
ROOT_IMAGE ?= alpine:3.18.4
|
||||||
CERTS_IMAGE := alpine:3.18.4
|
CERTS_IMAGE := alpine:3.18.4
|
||||||
|
|
||||||
GO_BUILDER_IMAGE := golang:1.21.3-alpine
|
GO_BUILDER_IMAGE := golang:1.21.4-alpine
|
||||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||||
BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||||
DOCKER_BUILD ?= docker build
|
DOCKER ?= docker
|
||||||
DOCKER_COMPOSE ?= docker compose
|
DOCKER_RUN ?= $(DOCKER) run
|
||||||
DOCKER_IMAGE_LS ?= docker image ls --format '{{.Repository}}:{{.Tag}}'
|
DOCKER_BUILD ?= $(DOCKER) build
|
||||||
|
DOCKER_COMPOSE ?= $(DOCKER) compose
|
||||||
|
DOCKER_IMAGE_LS ?= $(DOCKER) image ls --format '{{.Repository}}:{{.Tag}}'
|
||||||
|
|
||||||
package-base:
|
package-base:
|
||||||
($(DOCKER_IMAGE_LS) | grep -q '$(BASE_IMAGE)$$') \
|
($(DOCKER_IMAGE_LS) | grep -q '$(BASE_IMAGE)$$') \
|
||||||
|
@ -29,7 +31,7 @@ package-builder:
|
||||||
|
|
||||||
app-via-docker: package-builder
|
app-via-docker: package-builder
|
||||||
mkdir -p gocache-for-docker
|
mkdir -p gocache-for-docker
|
||||||
docker run --rm \
|
$(DOCKER_RUN) --rm \
|
||||||
--user $(shell id -u):$(shell id -g) \
|
--user $(shell id -u):$(shell id -g) \
|
||||||
--mount type=bind,src="$(shell pwd)",dst=/VictoriaMetrics \
|
--mount type=bind,src="$(shell pwd)",dst=/VictoriaMetrics \
|
||||||
-w /VictoriaMetrics \
|
-w /VictoriaMetrics \
|
||||||
|
@ -44,7 +46,7 @@ app-via-docker: package-builder
|
||||||
|
|
||||||
app-via-docker-windows: package-builder
|
app-via-docker-windows: package-builder
|
||||||
mkdir -p gocache-for-docker
|
mkdir -p gocache-for-docker
|
||||||
docker run --rm \
|
$(DOCKER_RUN) --rm \
|
||||||
--user $(shell id -u):$(shell id -g) \
|
--user $(shell id -u):$(shell id -g) \
|
||||||
--mount type=bind,src="$(shell pwd)",dst=/VictoriaMetrics \
|
--mount type=bind,src="$(shell pwd)",dst=/VictoriaMetrics \
|
||||||
-w /VictoriaMetrics \
|
-w /VictoriaMetrics \
|
||||||
|
@ -72,7 +74,7 @@ publish-via-docker: \
|
||||||
app-via-docker-linux-arm64 \
|
app-via-docker-linux-arm64 \
|
||||||
app-via-docker-linux-ppc64le \
|
app-via-docker-linux-ppc64le \
|
||||||
app-via-docker-linux-386
|
app-via-docker-linux-386
|
||||||
docker buildx build \
|
$(DOCKER) buildx build \
|
||||||
--platform=linux/amd64,linux/arm,linux/arm64,linux/ppc64le,linux/386 \
|
--platform=linux/amd64,linux/arm,linux/arm64,linux/ppc64le,linux/386 \
|
||||||
--build-arg certs_image=$(CERTS_IMAGE) \
|
--build-arg certs_image=$(CERTS_IMAGE) \
|
||||||
--build-arg root_image=$(ROOT_IMAGE) \
|
--build-arg root_image=$(ROOT_IMAGE) \
|
||||||
|
@ -85,7 +87,7 @@ publish-via-docker: \
|
||||||
bin
|
bin
|
||||||
|
|
||||||
run-via-docker: package-via-docker
|
run-via-docker: package-via-docker
|
||||||
docker run -it --rm \
|
$(DOCKER_RUN) -it --rm \
|
||||||
--user $(shell id -u):$(shell id -g) \
|
--user $(shell id -u):$(shell id -g) \
|
||||||
--net host \
|
--net host \
|
||||||
$(DOCKER_OPTS) \
|
$(DOCKER_OPTS) \
|
||||||
|
|
|
@ -18,7 +18,7 @@ services:
|
||||||
- vlogs
|
- vlogs
|
||||||
|
|
||||||
generator:
|
generator:
|
||||||
image: golang:1.21.3-alpine
|
image: golang:1.21.4-alpine
|
||||||
restart: always
|
restart: always
|
||||||
working_dir: /go/src/app
|
working_dir: /go/src/app
|
||||||
volumes:
|
volumes:
|
||||||
|
|
|
@ -2,7 +2,7 @@ version: '3'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
generator:
|
generator:
|
||||||
image: golang:1.21.3-alpine
|
image: golang:1.21.4-alpine
|
||||||
restart: always
|
restart: always
|
||||||
working_dir: /go/src/app
|
working_dir: /go/src/app
|
||||||
volumes:
|
volumes:
|
||||||
|
|
|
@ -16,35 +16,44 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
||||||
|
|
||||||
## Third-party articles and slides about VictoriaMetrics
|
## Third-party articles and slides about VictoriaMetrics
|
||||||
|
|
||||||
* [Why Roblox Picked VictoriaMetrics for Observability Data Overhaul](https://www.datanami.com/2023/05/30/why-roblox-picked-victoriametrics-for-observability-data-overhaul/)
|
* [Datanami: Why Roblox Picked VictoriaMetrics for Observability Data Overhaul](https://www.datanami.com/2023/05/30/why-roblox-picked-victoriametrics-for-observability-data-overhaul/)
|
||||||
* [The (Almost) Infinitely Scalable Open Source Monitoring Dream](https://www.forbes.com/sites/adrianbridgwater/2022/08/16/the-almost-infinitely-scalable-open-source-monitoring-dream/)
|
* [Cloudflare: Introducing notifications for HTTP Traffic Anomalies](https://blog.cloudflare.com/introducing-http-traffic-anomalies-notifications/)
|
||||||
* [The Agility In Cloud Observability](https://www.forbes.com/sites/adrianbridgwater/2023/07/05/the-agility-in-cloud-observability/)
|
* [Grammarly: Better, Faster, Cheaper: How Grammarly Improved Monitoring by Over 10x with VictoriaMetrics](https://www.grammarly.com/blog/engineering/monitoring-with-victoriametrics/)
|
||||||
* [Monitoring at scale with Victoria Metrics](https://tech.bedrockstreaming.com/2022/09/06/monitoring-at-scale-with-victoriametrics.html)
|
* [CERN: CMS monitoring R&D: Real-time monitoring and alerts](https://indico.cern.ch/event/877333/contributions/3696707/attachments/1972189/3281133/CMS_mon_RD_for_opInt.pdf)
|
||||||
|
* [CERN: The CMS monitoring infrastructure and applications](https://arxiv.org/pdf/2007.03630.pdf)
|
||||||
|
* [Forbes: The (Almost) Infinitely Scalable Open Source Monitoring Dream](https://www.forbes.com/sites/adrianbridgwater/2022/08/16/the-almost-infinitely-scalable-open-source-monitoring-dream/)
|
||||||
|
* [Forbes: The Agility In Cloud Observability](https://www.forbes.com/sites/adrianbridgwater/2023/07/05/the-agility-in-cloud-observability/)
|
||||||
|
* [Bedrock: Monitoring at scale with Victoria Metrics](https://tech.bedrockstreaming.com/2022/09/06/monitoring-at-scale-with-victoriametrics.html)
|
||||||
|
* [Percona: Optimizing the Storage of Large Volumes of Metrics for a Long Time in VictoriaMetrics](https://percona.community/blog/2022/06/02/long-time-keeping-metrics-victoriametrics/)
|
||||||
|
* [Percona: Foiled by the Firewall: A Tale of Transition From Prometheus to VictoriaMetrics](https://www.percona.com/blog/2020/12/01/foiled-by-the-firewall-a-tale-of-transition-from-prometheus-to-victoriametrics/)
|
||||||
|
* [Percona: Observations on Better Resource Usage with Percona Monitoring and Management v2.12.0](https://www.percona.com/blog/2020/12/23/observations-on-better-resource-usage-with-percona-monitoring-and-management-v2-12-0/)
|
||||||
|
* [Percona: How do We Keep Metrics for a Long Time in VictoriaMetrics](https://www.youtube.com/watch?v=SGZjY7xgDwE)
|
||||||
|
* [Miro: Prometheus High Availability and Fault Tolerance strategy, long term storage with VictoriaMetrics](https://medium.com/miro-engineering/prometheus-high-availability-and-fault-tolerance-strategy-long-term-storage-with-victoriametrics-82f6f3f0409e)
|
||||||
|
* [ZERODHA: Infrastructure monitoring with Prometheus at Zerodha](https://zerodha.tech/blog/infra-monitoring-at-zerodha/)
|
||||||
|
* [ZERODHA: Monitoring K8S with VictoriaMetrics](https://docs.google.com/presentation/d/1g7yUyVEaAp4tPuRy-MZbPXKqJ1z78_5VKuV841aQfsg/edit)
|
||||||
|
* [Criteo: VictoriaMetrics, a stress-free Prometheus Remote Storage for 1 Billion metrics](https://medium.com/criteo-engineering/victoriametrics-a-prometheus-remote-storage-solution-57081a3d8e61)
|
||||||
|
* [Abios Gaming: Choosing a Time Series Database for High Cardinality Aggregations](https://abiosgaming.com/press/high-cardinality-aggregations/)
|
||||||
|
* [Cybozu: Monitoring Kubernetes clusters with VictoriaMetrics and Grafana](https://blog.cybozu.io/entry/2021/03/18/115743)
|
||||||
|
* [Razorpay: Scaling to trillions of metric data points](https://engineering.razorpay.com/scaling-to-trillions-of-metric-data-points-f569a5b654f2)
|
||||||
|
* [Fly.io: Fly's Prometheus Metrics](https://fly.io/blog/measuring-fly/)
|
||||||
|
* [Sismology: Iguana Solutions’ Monitoring System](https://medium.com/nerd-for-tech/sismology-iguana-solutions-monitoring-system-f46e4170447f)
|
||||||
|
* [Nordic APIs: Monitoring with Prometheus, Grafana, AlertManager and VictoriaMetrics](https://nordicapis.com/api-monitoring-with-prometheus-grafana-alertmanager-and-victoriametrics/)
|
||||||
|
* [Smarkets: How we improved our Kubernetes monitoring at Smarkets, and how you could too](https://smarketshq.com/monitoring-kubernetes-clusters-41a4b24c19e3)
|
||||||
|
* [Mist: Kubernetes and VictoriaMetrics in Mist v4.6](https://mist.io/blog/2021-11-26-kubernetes-and-victoriametrics-in-Mist-v4-6)
|
||||||
|
* [Kintone: Multi-tenancy monitoring system for Kubernetes cluster using VictoriaMetrics and operators](https://blog.kintone.io/entry/2021/03/31/175256)
|
||||||
|
* [Alteos: Observability, Availability & DORA’s Research Program](https://medium.com/alteos-tech-blog/observability-availability-and-doras-research-program-85deb6680e78)
|
||||||
|
* [Brewblox: InfluxDB to Victoria Metrics](https://www.brewblox.com/dev/decisions/20210718_victoria_metrics.html)
|
||||||
|
* [Techetio: Evaluating Backend Options For Prometheus Metrics](https://www.techetio.com/2022/08/21/evaluating-backend-options-for-prometheus-metrics/)
|
||||||
|
* [Asserts: Announcing Asserts](https://www.asserts.ai/blog/announcing-asserts/)
|
||||||
* [Optimizing Linkerd metrics in Prometheus](https://aatarasoff.medium.com/optimizing-linkerd-metrics-in-prometheus-de607ec10f6b)
|
* [Optimizing Linkerd metrics in Prometheus](https://aatarasoff.medium.com/optimizing-linkerd-metrics-in-prometheus-de607ec10f6b)
|
||||||
* [Optimizing the Storage of Large Volumes of Metrics for a Long Time in VictoriaMetrics](https://percona.community/blog/2022/06/02/long-time-keeping-metrics-victoriametrics/)
|
|
||||||
* [How do We Keep Metrics for a Long Time in VictoriaMetrics](https://www.youtube.com/watch?v=SGZjY7xgDwE)
|
|
||||||
* [Announcing Asserts](https://www.asserts.ai/blog/announcing-asserts/)
|
|
||||||
* [Choosing a Time Series Database for High Cardinality Aggregations](https://abiosgaming.com/press/high-cardinality-aggregations/)
|
|
||||||
* [Scaling to trillions of metric data points](https://engineering.razorpay.com/scaling-to-trillions-of-metric-data-points-f569a5b654f2)
|
|
||||||
* [VictoriaMetrics vs. OpenTSDB](https://blg.robot-house.us/posts/tsdbs-grow/)
|
* [VictoriaMetrics vs. OpenTSDB](https://blg.robot-house.us/posts/tsdbs-grow/)
|
||||||
* [Monitoring of multiple OpenShift clusters with VictoriaMetrics](https://medium.com/ibm-garage/monitoring-of-multiple-openshift-clusters-with-victoriametrics-d4f0979e2544)
|
* [Monitoring of multiple OpenShift clusters with VictoriaMetrics](https://medium.com/ibm-garage/monitoring-of-multiple-openshift-clusters-with-victoriametrics-d4f0979e2544)
|
||||||
* [Fly's Prometheus Metrics](https://fly.io/blog/measuring-fly/)
|
|
||||||
* [Ultra Monitoring with Victoria Metrics](https://dev.to/aws-builders/ultra-monitoring-with-victoria-metrics-1p2)
|
* [Ultra Monitoring with Victoria Metrics](https://dev.to/aws-builders/ultra-monitoring-with-victoria-metrics-1p2)
|
||||||
* [Infrastructure monitoring with Prometheus at Zerodha](https://zerodha.tech/blog/infra-monitoring-at-zerodha/)
|
* [Percona: Better Prometheus rate() function with VictoriaMetrics](https://www.percona.com/blog/2020/02/28/better-prometheus-rate-function-with-victoriametrics/)
|
||||||
* [Sismology: Iguana Solutions’ Monitoring System](https://medium.com/nerd-for-tech/sismology-iguana-solutions-monitoring-system-f46e4170447f)
|
* [Percona: Percona monitoring and management migration from Prometheus to VictoriaMetrics FAQ](https://www.percona.com/blog/2020/12/16/percona-monitoring-and-management-migration-from-prometheus-to-victoriametrics-faq/)
|
||||||
* [Prometheus High Availability and Fault Tolerance strategy, long term storage with VictoriaMetrics](https://medium.com/miro-engineering/prometheus-high-availability-and-fault-tolerance-strategy-long-term-storage-with-victoriametrics-82f6f3f0409e)
|
* [Percona: Compiling a Percona Monitoring and Management v2 Client in ARM: Raspberry Pi 3 Reprise](https://www.percona.com/blog/2021/05/26/compiling-a-percona-monitoring-and-management-v2-client-in-arm-raspberry-pi-3/)
|
||||||
* [Monitoring with Prometheus, Grafana, AlertManager and VictoriaMetrics](https://www.sensedia.com/post/monitoring-with-prometheus-grafana-alertmanager-and-victoriametrics)
|
* [Percona: Tame Kubernetes Costs with Percona Monitoring and Management and Prometheus Operator](https://www.percona.com/blog/2021/02/12/tame-kubernetes-costs-with-percona-monitoring-and-management-and-prometheus-operator/)
|
||||||
* [How we improved our Kubernetes monitoring at Smarkets, and how you could too](https://smarketshq.com/monitoring-kubernetes-clusters-41a4b24c19e3)
|
|
||||||
* [Kubernetes and VictoriaMetrics in Mist v4.6](https://mist.io/blog/2021-11-26-kubernetes-and-victoriametrics-in-Mist-v4-6)
|
|
||||||
* [Foiled by the Firewall: A Tale of Transition From Prometheus to VictoriaMetrics](https://www.percona.com/blog/2020/12/01/foiled-by-the-firewall-a-tale-of-transition-from-prometheus-to-victoriametrics/)
|
|
||||||
* [Observations on Better Resource Usage with Percona Monitoring and Management v2.12.0](https://www.percona.com/blog/2020/12/23/observations-on-better-resource-usage-with-percona-monitoring-and-management-v2-12-0/)
|
|
||||||
* [Better Prometheus rate() function with VictoriaMetrics](https://www.percona.com/blog/2020/02/28/better-prometheus-rate-function-with-victoriametrics/)
|
|
||||||
* [Percona monitoring and management migration from Prometheus to VictoriaMetrics FAQ](https://www.percona.com/blog/2020/12/16/percona-monitoring-and-management-migration-from-prometheus-to-victoriametrics-faq/)
|
|
||||||
* [Compiling a Percona Monitoring and Management v2 Client in ARM: Raspberry Pi 3 Reprise](https://www.percona.com/blog/2021/05/26/compiling-a-percona-monitoring-and-management-v2-client-in-arm-raspberry-pi-3/)
|
|
||||||
* [Making peace with Prometheus rate()](https://blog.doit-intl.com/making-peace-with-prometheus-rate-43a3ea75c4cf)
|
* [Making peace with Prometheus rate()](https://blog.doit-intl.com/making-peace-with-prometheus-rate-43a3ea75c4cf)
|
||||||
* [Monitoring K8S with VictoriaMetrics](https://docs.google.com/presentation/d/1g7yUyVEaAp4tPuRy-MZbPXKqJ1z78_5VKuV841aQfsg/edit)
|
|
||||||
* [CMS monitoring R&D: Real-time monitoring and alerts](https://indico.cern.ch/event/877333/contributions/3696707/attachments/1972189/3281133/CMS_mon_RD_for_opInt.pdf)
|
|
||||||
* [The CMS monitoring infrastructure and applications](https://arxiv.org/pdf/2007.03630.pdf)
|
|
||||||
* [Disk usage: VictoriaMetrics vs Prometheus](https://stas.starikevich.com/posts/disk-usage-for-vm-versus-prometheus/)
|
* [Disk usage: VictoriaMetrics vs Prometheus](https://stas.starikevich.com/posts/disk-usage-for-vm-versus-prometheus/)
|
||||||
* [Benchmarking time series workloads on Apache Kudu using TSBS](https://blog.cloudera.com/benchmarking-time-series-workloads-on-apache-kudu-using-tsbs/)
|
* [Benchmarking time series workloads on Apache Kudu using TSBS](https://blog.cloudera.com/benchmarking-time-series-workloads-on-apache-kudu-using-tsbs/)
|
||||||
* [What are Open Source Time Series Databases?](https://www.iunera.com/kraken/fabric/time-series-database/)
|
* [What are Open Source Time Series Databases?](https://www.iunera.com/kraken/fabric/time-series-database/)
|
||||||
|
@ -55,13 +64,8 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
||||||
* [Monitoring Kubernetes with VictoriaMetrics+Prometheus](https://speakerdeck.com/bo0km4n/victoriametrics-plus-prometheusdegou-zhu-surufu-shu-kubernetesfalsejian-shi-ji-pan)
|
* [Monitoring Kubernetes with VictoriaMetrics+Prometheus](https://speakerdeck.com/bo0km4n/victoriametrics-plus-prometheusdegou-zhu-surufu-shu-kubernetesfalsejian-shi-ji-pan)
|
||||||
* [High-performance Graphite storage solution on top of VictoriaMetrics](https://golangexample.com/a-high-performance-graphite-storage-solution/)
|
* [High-performance Graphite storage solution on top of VictoriaMetrics](https://golangexample.com/a-high-performance-graphite-storage-solution/)
|
||||||
* [Cloud Native Model Driven Telemetry Stack on OpenShift](https://cer6erus.medium.com/cloud-native-model-driven-telemetry-stack-on-openshift-80712621f5bc)
|
* [Cloud Native Model Driven Telemetry Stack on OpenShift](https://cer6erus.medium.com/cloud-native-model-driven-telemetry-stack-on-openshift-80712621f5bc)
|
||||||
* [Observability, Availability & DORA’s Research Program](https://medium.com/alteos-tech-blog/observability-availability-and-doras-research-program-85deb6680e78)
|
|
||||||
* [Tame Kubernetes Costs with Percona Monitoring and Management and Prometheus Operator](https://www.percona.com/blog/2021/02/12/tame-kubernetes-costs-with-percona-monitoring-and-management-and-prometheus-operator/)
|
|
||||||
* [Prometheus VictoriaMetrics On AWS ECS](https://dalefro.medium.com/prometheus-victoria-metrics-on-aws-ecs-62448e266090)
|
* [Prometheus VictoriaMetrics On AWS ECS](https://dalefro.medium.com/prometheus-victoria-metrics-on-aws-ecs-62448e266090)
|
||||||
* [API Monitoring With Prometheus, Grafana, AlertManager and VictoriaMetrics](https://nordicapis.com/api-monitoring-with-prometheus-grafana-alertmanager-and-victoriametrics/)
|
|
||||||
* [Solving Metrics at scale with VictoriaMetrics](https://www.youtube.com/watch?v=QgLMztnj7-8)
|
* [Solving Metrics at scale with VictoriaMetrics](https://www.youtube.com/watch?v=QgLMztnj7-8)
|
||||||
* [Monitoring Kubernetes clusters with VictoriaMetrics and Grafana](https://blog.cybozu.io/entry/2021/03/18/115743)
|
|
||||||
* [Multi-tenancy monitoring system for Kubernetes cluster using VictoriaMetrics and operators](https://blog.kintone.io/entry/2021/03/31/175256)
|
|
||||||
* [Monitoring as Code на базе VictoriaMetrics и Grafana](https://habr.com/ru/post/568090/)
|
* [Monitoring as Code на базе VictoriaMetrics и Grafana](https://habr.com/ru/post/568090/)
|
||||||
* [Push Prometheus metrics to VictoriaMetrics or other exporters](https://pythonawesome.com/push-prometheus-metrics-to-victoriametrics-or-other-exporters/)
|
* [Push Prometheus metrics to VictoriaMetrics or other exporters](https://pythonawesome.com/push-prometheus-metrics-to-victoriametrics-or-other-exporters/)
|
||||||
* [Install and configure VictoriaMetrics on Debian](https://www.vultr.com/docs/install-and-configure-victoriametrics-on-debian)
|
* [Install and configure VictoriaMetrics on Debian](https://www.vultr.com/docs/install-and-configure-victoriametrics-on-debian)
|
||||||
|
@ -70,16 +74,12 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
||||||
* [How we tried using VictoriaMetrics and Thanos at the same time](https://medium.com/@uburro/how-we-tried-using-victoriametrics-and-thanos-at-the-same-time-48803d2a638b)
|
* [How we tried using VictoriaMetrics and Thanos at the same time](https://medium.com/@uburro/how-we-tried-using-victoriametrics-and-thanos-at-the-same-time-48803d2a638b)
|
||||||
* [Prometheus, Grafana, and Kubernetes, Oh My!](https://www.groundcover.com/blog/prometheus-grafana-kubernetes)
|
* [Prometheus, Grafana, and Kubernetes, Oh My!](https://www.groundcover.com/blog/prometheus-grafana-kubernetes)
|
||||||
* [Explaining modern server monitoring stacks for self-hosting](https://dataswamp.org/~solene/2022-09-11-exploring-monitoring-stacks.html)
|
* [Explaining modern server monitoring stacks for self-hosting](https://dataswamp.org/~solene/2022-09-11-exploring-monitoring-stacks.html)
|
||||||
* [Brewblox: InfluxDB to Victoria Metrics](https://www.brewblox.com/dev/decisions/20210718_victoria_metrics.html)
|
|
||||||
* [VictoriaMetrics static scraper](https://blog.differentpla.net/blog/2022/10/16/victoria-metrics-static-scraper/)
|
* [VictoriaMetrics static scraper](https://blog.differentpla.net/blog/2022/10/16/victoria-metrics-static-scraper/)
|
||||||
* [VictoriaMetrics and Open Cosmos boldly takes edge computing to the edge of space](https://www.iot-now.com/2022/07/19/122423-victoriametrics-and-open-cosmos-boldly-takes-edge-computing-to-the-edge-of-space/)
|
* [VictoriaMetrics and Open Cosmos boldly takes edge computing to the edge of space](https://www.iot-now.com/2022/07/19/122423-victoriametrics-and-open-cosmos-boldly-takes-edge-computing-to-the-edge-of-space/)
|
||||||
* [Evaluating Backend Options For Prometheus Metrics](https://www.techetio.com/2022/08/21/evaluating-backend-options-for-prometheus-metrics/)
|
|
||||||
* [Time Series in the Multiverse of Madness (in Korean)](https://www.youtube.com/watch?v=OUyXPgVcdw4), plus [these slides](https://deview.kr/data/deview/session/attach/%5B2B4%5DVictoriaMetrics_%E1%84%89%E1%85%B5%E1%84%80%E1%85%A8%E1%84%8B%E1%85%A7%E1%86%AF_%E1%84%83%E1%85%A6%E1%84%8B%E1%85%B5%E1%84%90%E1%85%A5_%E1%84%83%E1%85%A2%E1%84%92%E1%85%A9%E1%86%AB%E1%84%83%E1%85%A9%E1%86%AB%E1%84%8B%E1%85%B4_%E1%84%86%E1%85%A5%E1%86%AF%E1%84%90%E1%85%B5%E1%84%87%E1%85%A5%E1%84%89%E1%85%B3_Kor+Eng.pdf)
|
* [Time Series in the Multiverse of Madness (in Korean)](https://www.youtube.com/watch?v=OUyXPgVcdw4), plus [these slides](https://deview.kr/data/deview/session/attach/%5B2B4%5DVictoriaMetrics_%E1%84%89%E1%85%B5%E1%84%80%E1%85%A8%E1%84%8B%E1%85%A7%E1%86%AF_%E1%84%83%E1%85%A6%E1%84%8B%E1%85%B5%E1%84%90%E1%85%A5_%E1%84%83%E1%85%A2%E1%84%92%E1%85%A9%E1%86%AB%E1%84%83%E1%85%A9%E1%86%AB%E1%84%8B%E1%85%B4_%E1%84%86%E1%85%A5%E1%86%AF%E1%84%90%E1%85%B5%E1%84%87%E1%85%A5%E1%84%89%E1%85%B3_Kor+Eng.pdf)
|
||||||
* [VictoriaMetrics: an overview and its use instead of Prometheus](https://rtfm.co.ua/en/victoriametrics-an-overview-and-its-use-instead-of-prometheus/)
|
* [VictoriaMetrics: an overview and its use instead of Prometheus](https://rtfm.co.ua/en/victoriametrics-an-overview-and-its-use-instead-of-prometheus/)
|
||||||
* [VictoriaMetrics: deploying a Kubernetes monitoring stack](https://rtfm.co.ua/en/victoriametrics-deploying-a-kubernetes-monitoring-stack/)
|
* [VictoriaMetrics: deploying a Kubernetes monitoring stack](https://rtfm.co.ua/en/victoriametrics-deploying-a-kubernetes-monitoring-stack/)
|
||||||
* [VictoriaMetrics: VMAuth – Proxy, Authentication, and Authorization](https://rtfm.co.ua/en/victoriametrics-vmauth-proxy-authentication-and-authorization/)
|
* [VictoriaMetrics: VMAuth – Proxy, Authentication, and Authorization](https://rtfm.co.ua/en/victoriametrics-vmauth-proxy-authentication-and-authorization/)
|
||||||
* [Better, Faster, Cheaper: How Grammarly Improved Monitoring by Over 10x with VictoriaMetrics](https://www.grammarly.com/blog/engineering/monitoring-with-victoriametrics/)
|
|
||||||
* [VictoriaMetrics, a stress-free Prometheus Remote Storage for 1 Billion metrics](https://medium.com/criteo-engineering/victoriametrics-a-prometheus-remote-storage-solution-57081a3d8e61)
|
|
||||||
* [Solving metrics at scale with VictoriaMetrics](https://sarthak-acoustic.medium.com/solving-metrics-at-scale-with-victoriametrics-ac9c306826c3)
|
* [Solving metrics at scale with VictoriaMetrics](https://sarthak-acoustic.medium.com/solving-metrics-at-scale-with-victoriametrics-ac9c306826c3)
|
||||||
* [VictoriaMetrics: a comprehensive guide](https://medium.com/@seifeddinerajhi/victoriametrics-a-comprehensive-guide-comparing-it-to-prometheus-and-implementing-kubernetes-03eb8feb0cc2)
|
* [VictoriaMetrics: a comprehensive guide](https://medium.com/@seifeddinerajhi/victoriametrics-a-comprehensive-guide-comparing-it-to-prometheus-and-implementing-kubernetes-03eb8feb0cc2)
|
||||||
|
|
||||||
|
@ -136,6 +136,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
||||||
* [How ClickHouse inspired us to build a high performance time series database](https://www.youtube.com/watch?v=p9qjb_yoBro). See also [slides](https://docs.google.com/presentation/d/1SdFrwsyR-HMXfbzrY8xfDZH_Dg6E7E5NJ84tQozMn3w/edit?usp=sharing)
|
* [How ClickHouse inspired us to build a high performance time series database](https://www.youtube.com/watch?v=p9qjb_yoBro). See also [slides](https://docs.google.com/presentation/d/1SdFrwsyR-HMXfbzrY8xfDZH_Dg6E7E5NJ84tQozMn3w/edit?usp=sharing)
|
||||||
* [OSA Con 2022: Specifics of data analysis in Time Series Databases](https://www.youtube.com/watch?v=_zORxrgLtec)
|
* [OSA Con 2022: Specifics of data analysis in Time Series Databases](https://www.youtube.com/watch?v=_zORxrgLtec)
|
||||||
* [OSMC 2022. VictoriaMetrics: scaling to 100 million metrics per second](https://www.youtube.com/watch?v=xfed9_Q0_qU). See also [slides](https://www.slideshare.net/NETWAYS/osmc-2022-victoriametrics-scaling-to-100-million-metrics-per-second-by-aliaksandr-valialkin)
|
* [OSMC 2022. VictoriaMetrics: scaling to 100 million metrics per second](https://www.youtube.com/watch?v=xfed9_Q0_qU). See also [slides](https://www.slideshare.net/NETWAYS/osmc-2022-victoriametrics-scaling-to-100-million-metrics-per-second-by-aliaksandr-valialkin)
|
||||||
|
* [OSMC 2023. Large-scale logging made easy](https://docs.google.com/presentation/d/e/2PACX-1vTDs-Ggs89Xp-Q6s6JAuututM-j43ohssght5ar2egb_uCoxibb2VkrrqG931Tt4eRWfy9JJRJ39Qyy/pub?start=false&loop=false&delayms=3000)
|
||||||
* [CNCF Paris Meetup 2022-09-15 - VictoriaMetrics - The cost of scale in Prometheus ecosystem](https://www.youtube.com/watch?v=gcZYHpri2Hw). See also [slides](https://docs.google.com/presentation/d/1jhZuKnAXi15M-mdBP5a4ZAiyrMeHhYmzO8xcZ6pMyLc/edit?usp=sharing)
|
* [CNCF Paris Meetup 2022-09-15 - VictoriaMetrics - The cost of scale in Prometheus ecosystem](https://www.youtube.com/watch?v=gcZYHpri2Hw). See also [slides](https://docs.google.com/presentation/d/1jhZuKnAXi15M-mdBP5a4ZAiyrMeHhYmzO8xcZ6pMyLc/edit?usp=sharing)
|
||||||
* [VictoriaMetrics Meetup December 2022](https://www.youtube.com/watch?v=Mesc6JBFNhQ). See also [slides for "VictoriaMetrics 2022: new features" talk](https://docs.google.com/presentation/d/1jI7XZoodmuzLymdu4MToG9onAKQjzCNwMO2NDupyUkQ/edit?usp=sharing).
|
* [VictoriaMetrics Meetup December 2022](https://www.youtube.com/watch?v=Mesc6JBFNhQ). See also [slides for "VictoriaMetrics 2022: new features" talk](https://docs.google.com/presentation/d/1jI7XZoodmuzLymdu4MToG9onAKQjzCNwMO2NDupyUkQ/edit?usp=sharing).
|
||||||
* [Comparing Thanos to VictoriaMetrics cluster](https://faun.pub/comparing-thanos-to-victoriametrics-cluster-b193bea1683)
|
* [Comparing Thanos to VictoriaMetrics cluster](https://faun.pub/comparing-thanos-to-victoriametrics-cluster-b193bea1683)
|
||||||
|
|
|
@ -31,18 +31,20 @@ The sandbox cluster installation is running under the constant load generated by
|
||||||
**vmalert's cmd-line flag `datasource.queryTimeAlignment` was deprecated and will have no effect anymore. It will be completely removed in next releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5049) and more detailed changes below.**
|
**vmalert's cmd-line flag `datasource.queryTimeAlignment` was deprecated and will have no effect anymore. It will be completely removed in next releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5049) and more detailed changes below.**
|
||||||
**vmalert's cmd-line flag `datasource.lookback` will be deprecated soon. Please use `-rule.evalDelay` command-line flag instead. It will have no effect in next release and be removed in future releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155).**
|
**vmalert's cmd-line flag `datasource.lookback` will be deprecated soon. Please use `-rule.evalDelay` command-line flag instead. It will have no effect in next release and be removed in future releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155).**
|
||||||
|
|
||||||
* SECURITY: upgrade Go builder from Go1.21.1 to Go1.21.3. See [the list of issues addressed in Go1.21.2](https://github.com/golang/go/issues?q=milestone%3AGo1.21.2+label%3ACherryPickApproved) and [the list of issues addressed in Go1.21.3](https://github.com/golang/go/issues?q=milestone%3AGo1.21.3+label%3ACherryPickApproved).
|
* SECURITY: upgrade Go builder from Go1.21.1 to Go1.21.4. See [the list of issues addressed in Go1.21.2](https://github.com/golang/go/issues?q=milestone%3AGo1.21.2+label%3ACherryPickApproved), [the list of issues addressed in Go1.21.3](https://github.com/golang/go/issues?q=milestone%3AGo1.21.3+label%3ACherryPickApproved) and [the list of issues addressed in Go1.21.4](https://github.com/golang/go/issues?q=milestone%3AGo1.21.4+label%3ACherryPickApproved).
|
||||||
|
|
||||||
* FEATURE: `vmselect`: improve performance for repeated [instant queries](https://docs.victoriametrics.com/keyConcepts.html#instant-query) if they contain one of the following [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions):
|
* FEATURE: `vmselect`: improve performance for repeated [instant queries](https://docs.victoriametrics.com/keyConcepts.html#instant-query) if they contain one of the following [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions):
|
||||||
- [avg_over_time](https://docs.victoriametrics.com/MetricsQL.html#avg_over_time)
|
- [`avg_over_time`](https://docs.victoriametrics.com/MetricsQL.html#avg_over_time)
|
||||||
- [sum_over_time](https://docs.victoriametrics.com/MetricsQL.html#sum_over_time)
|
- [`sum_over_time`](https://docs.victoriametrics.com/MetricsQL.html#sum_over_time)
|
||||||
- [count_eq_over_time](https://docs.victoriametrics.com/MetricsQL.html#count_eq_over_time)
|
- [`count_eq_over_time`](https://docs.victoriametrics.com/MetricsQL.html#count_eq_over_time)
|
||||||
- [count_gt_over_time](https://docs.victoriametrics.com/MetricsQL.html#count_gt_over_time)
|
- [`count_gt_over_time`](https://docs.victoriametrics.com/MetricsQL.html#count_gt_over_time)
|
||||||
- [count_le_over_time](https://docs.victoriametrics.com/MetricsQL.html#count_le_over_time)
|
- [`count_le_over_time`](https://docs.victoriametrics.com/MetricsQL.html#count_le_over_time)
|
||||||
- [count_ne_over_time](https://docs.victoriametrics.com/MetricsQL.html#count_ne_over_time)
|
- [`count_ne_over_time`](https://docs.victoriametrics.com/MetricsQL.html#count_ne_over_time)
|
||||||
- [count_over_time](https://docs.victoriametrics.com/MetricsQL.html#count_over_time)
|
- [`count_over_time`](https://docs.victoriametrics.com/MetricsQL.html#count_over_time)
|
||||||
- [increase](https://docs.victoriametrics.com/MetricsQL.html#increase)
|
- [`increase`](https://docs.victoriametrics.com/MetricsQL.html#increase)
|
||||||
- [rate](https://docs.victoriametrics.com/MetricsQL.html#rate)
|
- [`max_over_time`](https://docs.victoriametrics.com/MetricsQL.html#max_over_time)
|
||||||
|
- [`min_over_time`](https://docs.victoriametrics.com/MetricsQL.html#min_over_time)
|
||||||
|
- [`rate`](https://docs.victoriametrics.com/MetricsQL.html#rate)
|
||||||
The optimization is enabled when these functions contain lookbehind window in square brackets bigger or equal to `6h` (the threshold can be changed via `-search.minWindowForInstantRollupOptimization` command-line flag). The optimization improves performance for SLO/SLI-like queries such as `avg_over_time(up[30d])` or `sum(rate(http_request_errors_total[3d])) / sum(rate(http_requests_total[3d]))`, which can be generated by [sloth](https://github.com/slok/sloth) or similar projects.
|
The optimization is enabled when these functions contain lookbehind window in square brackets bigger or equal to `6h` (the threshold can be changed via `-search.minWindowForInstantRollupOptimization` command-line flag). The optimization improves performance for SLO/SLI-like queries such as `avg_over_time(up[30d])` or `sum(rate(http_request_errors_total[3d])) / sum(rate(http_requests_total[3d]))`, which can be generated by [sloth](https://github.com/slok/sloth) or similar projects.
|
||||||
* FEATURE: `vmselect`: improve query performance on systems with big number of CPU cores (`>=32`). Add `-search.maxWorkersPerQuery` command-line flag, which can be used for fine-tuning query performance on systems with big number of CPU cores. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5195).
|
* FEATURE: `vmselect`: improve query performance on systems with big number of CPU cores (`>=32`). Add `-search.maxWorkersPerQuery` command-line flag, which can be used for fine-tuning query performance on systems with big number of CPU cores. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5195).
|
||||||
* FEATURE: `vmselect`: expose `vm_memory_intensive_queries_total` counter metric which gets increased each time `-search.logQueryMemoryUsage` memory limit is exceeded by a query. This metric should help to identify expensive and heavy queries without inspecting the logs.
|
* FEATURE: `vmselect`: expose `vm_memory_intensive_queries_total` counter metric which gets increased each time `-search.logQueryMemoryUsage` memory limit is exceeded by a query. This metric should help to identify expensive and heavy queries without inspecting the logs.
|
||||||
|
@ -56,6 +58,7 @@ The sandbox cluster installation is running under the constant load generated by
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow specifying full url in notifier static_configs target address, like `http://alertmanager:9093/test/api/v2/alerts`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5184).
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow specifying full url in notifier static_configs target address, like `http://alertmanager:9093/test/api/v2/alerts`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5184).
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): reduce the number of queries for restoring alerts state on start-up. The change should speed up the restore process and reduce pressure on `remoteRead.url`. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5265).
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): reduce the number of queries for restoring alerts state on start-up. The change should speed up the restore process and reduce pressure on `remoteRead.url`. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5265).
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add label `file` pointing to the group's filename to metrics `vmalert_recording_.*` and `vmalert_alerts_.*`. The filename should help identifying alerting rules belonging to specific groups with identical names but different filenames. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5267).
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add label `file` pointing to the group's filename to metrics `vmalert_recording_.*` and `vmalert_alerts_.*`. The filename should help identifying alerting rules belonging to specific groups with identical names but different filenames. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5267).
|
||||||
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): automatically retry remote-write requests on closed connections. The change should reduce the amount of logs produced in environments with short-living connections or environments without support of keep-alive on network balancers.
|
||||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): support data ingestion from [NewRelic infrastructure agent](https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-newrelic-agent), [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3520) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4712).
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): support data ingestion from [NewRelic infrastructure agent](https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-newrelic-agent), [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3520) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4712).
|
||||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `-remoteWrite.shardByURL.labels` command-line flag, which can be used for specifying a list of labels for sharding outgoing samples among the configured `-remoteWrite.url` destinations if `-remoteWrite.shardByURL` command-line flag is set. See [these docs](https://docs.victoriametrics.com/vmagent.html#sharding-among-remote-storages) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4942) for details.
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `-remoteWrite.shardByURL.labels` command-line flag, which can be used for specifying a list of labels for sharding outgoing samples among the configured `-remoteWrite.url` destinations if `-remoteWrite.shardByURL` command-line flag is set. See [these docs](https://docs.victoriametrics.com/vmagent.html#sharding-among-remote-storages) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4942) for details.
|
||||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not exit on startup when [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) refer to non-existing or invalid files with auth configs, since these files may appear / updated later. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4959) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5153).
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not exit on startup when [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) refer to non-existing or invalid files with auth configs, since these files may appear / updated later. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4959) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5153).
|
||||||
|
@ -76,11 +79,18 @@ The sandbox cluster installation is running under the constant load generated by
|
||||||
* FEATURE: [vmalert-tool](https://docs.victoriametrics.com/#vmalert-tool): add `unittest` command to run unittest for alerting and recording rules. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4789) for details.
|
* FEATURE: [vmalert-tool](https://docs.victoriametrics.com/#vmalert-tool): add `unittest` command to run unittest for alerting and recording rules. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4789) for details.
|
||||||
* FEATURE: dashboards/vmalert: add new panel `Missed evaluations` for indicating alerting groups that miss their evaluations.
|
* FEATURE: dashboards/vmalert: add new panel `Missed evaluations` for indicating alerting groups that miss their evaluations.
|
||||||
* FEATURE: all: track requests with wrong auth key and wrong basic auth at `vm_http_request_errors_total` [metric](https://docs.victoriametrics.com/#monitoring) with `reason="wrong_auth_key"` and `reason="wrong_basic_auth"`. See [this issue](https://github.com/victoriaMetrics/victoriaMetrics/issues/4590). Thanks to @venkatbvc for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5166).
|
* FEATURE: all: track requests with wrong auth key and wrong basic auth at `vm_http_request_errors_total` [metric](https://docs.victoriametrics.com/#monitoring) with `reason="wrong_auth_key"` and `reason="wrong_basic_auth"`. See [this issue](https://github.com/victoriaMetrics/victoriaMetrics/issues/4590). Thanks to @venkatbvc for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5166).
|
||||||
|
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): add ability to drop the specified number of `/`-delimited prefix parts from the request path before proxying the request to the matching backend. See [these docs](https://docs.victoriametrics.com/vmauth.html#dropping-request-path-prefix).
|
||||||
|
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): add ability to skip TLS verification and to specify TLS Root CA when connecting to backends. See [these docs](https://docs.victoriametrics.com/vmauth.html#backend-tls-setup) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5240).
|
||||||
|
* FEATURE: `vmstorage`: gradually close `vminsert` connections during 25 seconds at [graceful shutdown](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#updating--reconfiguring-cluster-nodes). This should reduce data ingestion slowdown during rolling restarts. The duration for gradual closing of `vminsert` connections can be configured via `-storage.vminsertConnsShutdownDuration` command-line flag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4922) and [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#improving-re-routing-performance-during-restart) for details.
|
||||||
|
* FEATURE: `vmstorage`: add `-blockcache.missesBeforeCaching` command-line flag, which can be used for fine-tuning RAM usage for `indexdb/dataBlocks` cache when queries touching big number of time series are executed.
|
||||||
|
* FEATURE: add `-loggerMaxArgLen` command-line flag for fine-tuning the maximum lengths of logged args.
|
||||||
|
|
||||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): strip sensitive information such as auth headers or passwords from datasource, remote-read, remote-write or notifier URLs in log messages or UI. This behavior is by default and is controlled via `-datasource.showURL`, `-remoteRead.showURL`, `remoteWrite.showURL` or `-notifier.showURL` cmd-line flags. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5044).
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): strip sensitive information such as auth headers or passwords from datasource, remote-read, remote-write or notifier URLs in log messages or UI. This behavior is by default and is controlled via `-datasource.showURL`, `-remoteRead.showURL`, `remoteWrite.showURL` or `-notifier.showURL` cmd-line flags. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5044).
|
||||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): fix vmalert web UI when running on 32-bit architectures machine.
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): fix vmalert web UI when running on 32-bit architectures machine.
|
||||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): do not send requests to configured remote systems when `-datasource.*`, `-remoteWrite.*`, `-remoteRead.*` or `-notifier.*` command-line flags refer files with invalid auth configs. Previously such requests were sent without properly set auth headers. Now the requests are sent only after the files are updated with valid auth configs. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5153).
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): do not send requests to configured remote systems when `-datasource.*`, `-remoteWrite.*`, `-remoteRead.*` or `-notifier.*` command-line flags refer files with invalid auth configs. Previously such requests were sent without properly set auth headers. Now the requests are sent only after the files are updated with valid auth configs. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5153).
|
||||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly maintain alerts state in [replay mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling) if alert's `for` param was bigger than replay request range (usually a couple of hours). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5186) for details.
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly maintain alerts state in [replay mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling) if alert's `for` param was bigger than replay request range (usually a couple of hours). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5186) for details.
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): increment `vmalert_remotewrite_errors_total` metric if all retries to send remote-write request failed. Before, this metric was incremented only if remote-write client's buffer is overloaded.
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): increment `vmalert_remotewrite_dropped_rows_total` and `vmalert_remotewrite_dropped_bytes_total` metrics if remote-write client's buffer is overloaded. Before, these metrics were incremented only after unsuccessful HTTP calls.
|
||||||
* BUGFIX: `vmselect`: improve performance and memory usage during query processing on machines with big number of CPU cores. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5087).
|
* BUGFIX: `vmselect`: improve performance and memory usage during query processing on machines with big number of CPU cores. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5087).
|
||||||
* BUGFIX: dashboards: fix vminsert/vmstorage/vmselect metrics filtering when dashboard is used to display data from many sub-clusters with unique job names. Before, only one specific job could have been accounted for component-specific panels, instead of all available jobs for the component.
|
* BUGFIX: dashboards: fix vminsert/vmstorage/vmselect metrics filtering when dashboard is used to display data from many sub-clusters with unique job names. Before, only one specific job could have been accounted for component-specific panels, instead of all available jobs for the component.
|
||||||
* BUGFIX: dashboards/vmalert: apply `desc` sorting in tooltips for vmalert dashboard in order to improve visibility of the outliers on graph.
|
* BUGFIX: dashboards/vmalert: apply `desc` sorting in tooltips for vmalert dashboard in order to improve visibility of the outliers on graph.
|
||||||
|
@ -89,16 +99,23 @@ The sandbox cluster installation is running under the constant load generated by
|
||||||
* BUGFIX: dashboards/cluster: fix description about `max` threshold for `Concurrent selects` panel. Before, it was mistakenly implying that `max` is equal to the double of available CPUs.
|
* BUGFIX: dashboards/cluster: fix description about `max` threshold for `Concurrent selects` panel. Before, it was mistakenly implying that `max` is equal to the double of available CPUs.
|
||||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): bump hard-coded limit for search query size at `vmstorage` from 1MB to 5MB. The change should be more suitable for real-world scenarios and protect vmstorage from excessive memory usage. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5154) for details
|
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): bump hard-coded limit for search query size at `vmstorage` from 1MB to 5MB. The change should be more suitable for real-world scenarios and protect vmstorage from excessive memory usage. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5154) for details
|
||||||
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): fix error when creating an incremental backup with the `-origin` command-line flag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5144) for details.
|
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): fix error when creating an incremental backup with the `-origin` command-line flag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5144) for details.
|
||||||
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly apply [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling) with `regex`, which start and end with `.+` or `.*` and which contain alternate sub-regexps. For example, `.+;|;.+` or `.*foo|bar|baz.*`. Previously such regexps were improperly parsed, which could result in undexpected relabeling results. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5297).
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly discover Kubernetes targets via [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs). Previously some targets and some labels could be skipped during service discovery because of the bug introduced in [v1.93.5](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.5) when implementing [this feature](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4850). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5216) for more details.
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly discover Kubernetes targets via [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs). Previously some targets and some labels could be skipped during service discovery because of the bug introduced in [v1.93.5](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.5) when implementing [this feature](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4850). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5216) for more details.
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): fix vmagent ignoring configuration reload for streaming aggregation if it was started with empty streaming aggregation config. Thanks to @aluode99 for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5178).
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): fix vmagent ignoring configuration reload for streaming aggregation if it was started with empty streaming aggregation config. Thanks to @aluode99 for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5178).
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not scrape targets if the corresponding [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) refer to files with invalid auth configs. Previously the targets were scraped without properly set auth headers in this case. Now targets are scraped only after the files are updated with valid auth configs. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5153).
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not scrape targets if the corresponding [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) refer to files with invalid auth configs. Previously the targets were scraped without properly set auth headers in this case. Now targets are scraped only after the files are updated with valid auth configs. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5153).
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly parse `ca`, `cert` and `key` options at `tls_config` section inside [http client settings](https://docs.victoriametrics.com/sd_configs.html#http-api-client-options). Previously string values couldn't be parsed for these options, since the parser was mistakenly expecting a list of `uint8` values instead.
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly parse `ca`, `cert` and `key` options at `tls_config` section inside [http client settings](https://docs.victoriametrics.com/sd_configs.html#http-api-client-options). Previously string values couldn't be parsed for these options, since the parser was mistakenly expecting a list of `uint8` values instead.
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly drop samples if `-streamAggr.dropInput` command-line flag is set and `-remoteWrite.streamAggr.config` contains an empty file. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5207).
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly drop samples if `-streamAggr.dropInput` command-line flag is set and `-remoteWrite.streamAggr.config` contains an empty file. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5207).
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not print redundant error logs when failed to scrape consul or nomad target. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5239).
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not print redundant error logs when failed to scrape consul or nomad target. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5239).
|
||||||
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): generate proper link to the main page and to `favicon.ico` at http pages served by `vmagent` such as `/targets` or `/service-discovery` when `vmagent` sits behind an http proxy with custom http path prefixes. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5306).
|
||||||
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly decode Snappy-encoded data blocks received via [VictoriaMetrics remote_write protocol](https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5301).
|
||||||
* BUGFIX: [vmstorage](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): prevent deleted series to be searchable via `/api/v1/series` API if they were re-ingested with staleness markers. This situation could happen if user deletes the series from the target and from VM, and then vmagent sends stale markers for absent series. Thanks to @ilyatrefilov for the [issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5069) and [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5174).
|
* BUGFIX: [vmstorage](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): prevent deleted series to be searchable via `/api/v1/series` API if they were re-ingested with staleness markers. This situation could happen if user deletes the series from the target and from VM, and then vmagent sends stale markers for absent series. Thanks to @ilyatrefilov for the [issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5069) and [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5174).
|
||||||
* BUGFIX: [vmstorage](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): log warning about switching to ReadOnly mode only on state change. Before, vmstorage would log this warning every 1s. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5159) for details.
|
* BUGFIX: [vmstorage](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): log warning about switching to ReadOnly mode only on state change. Before, vmstorage would log this warning every 1s. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5159) for details.
|
||||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): show browser authorization window for unauthorized requests to unsupported paths if the `unauthorized_user` section is specified. This allows properly authorizing the user. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5236) for details.
|
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): show browser authorization window for unauthorized requests to unsupported paths if the `unauthorized_user` section is specified. This allows properly authorizing the user. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5236) for details.
|
||||||
|
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): properly proxy requests to HTTP/2.0 backends and properly pass `Host` header to backends.
|
||||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the `Disable cache` toggle at `JSON` and `Table` views. Previously response caching was always enabled and couldn't be disabled at these views.
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the `Disable cache` toggle at `JSON` and `Table` views. Previously response caching was always enabled and couldn't be disabled at these views.
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): correctly display query errors on [Explore Prometheus Metrics](https://docs.victoriametrics.com/#metrics-explorer) page. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5202) for details.
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly handle trailing slash in the server URL. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5203).
|
||||||
|
* BUGFIX: [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html): correctly print error in logs when copying backup fails. Previously, error was displayed in metrics but was missing in logs.
|
||||||
|
|
||||||
## [v1.94.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.94.0)
|
## [v1.94.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.94.0)
|
||||||
|
|
||||||
|
|
|
@ -323,6 +323,10 @@ See more details about cardinality limiter in [these docs](https://docs.victoria
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
- If your VictoriaMetrics cluster experiences data ingestion delays during
|
||||||
|
[rolling restarts and configuration updates](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#updating--reconfiguring-cluster-nodes),
|
||||||
|
then see [these docs](#improving-re-routing-performance-during-restart).
|
||||||
|
|
||||||
[Troubleshooting docs for single-node VictoriaMetrics](https://docs.victoriametrics.com/Troubleshooting.html) apply to VictoriaMetrics cluster as well.
|
[Troubleshooting docs for single-node VictoriaMetrics](https://docs.victoriametrics.com/Troubleshooting.html) apply to VictoriaMetrics cluster as well.
|
||||||
|
|
||||||
## Readonly mode
|
## Readonly mode
|
||||||
|
@ -470,8 +474,18 @@ This strategy allows upgrading the cluster without downtime if the following con
|
||||||
- The updated config / upgraded binary is compatible with the remaining components in the cluster.
|
- The updated config / upgraded binary is compatible with the remaining components in the cluster.
|
||||||
See the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) for compatibility notes between different releases.
|
See the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) for compatibility notes between different releases.
|
||||||
|
|
||||||
If at least a single condition isn't met, then the rolling restart may result in cluster unavailability
|
If at least a single condition isn't met, then the rolling restart may result in cluster unavailability
|
||||||
during the config update / version upgrade. In this case the following strategy is recommended.
|
during the config update / version upgrade. In this case the following strategy is recommended.
|
||||||
|
|
||||||
|
#### Improving re-routing performance during restart
|
||||||
|
|
||||||
|
`vmstorage` nodes may experience increased usage for CPU, RAM and disk IO during
|
||||||
|
[rolling restarts](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#no-downtime-strategy),
|
||||||
|
since they need to process higher load when some of `vmstorage` nodes are temporarily unavailable in the cluster.
|
||||||
|
It is possible to reduce resource usage spikes by running more `vminsert` nodes and by passing bigger values
|
||||||
|
to `-storage.vminsertConnsShutdownDuration` command-line flag at `vmstorage` nodes.
|
||||||
|
Make sure that the `-storage.vminsertConnsShutdownDuration` is smaller than the graceful timeout configured at the system which manages `vmstorage`
|
||||||
|
(e.g. Docker, Kubernetes, systemd, etc.). Otherwise the system may kill `vmstorage` node before it finishes gradual closing of `vminsert` connections.
|
||||||
|
|
||||||
### Minimum downtime strategy
|
### Minimum downtime strategy
|
||||||
|
|
||||||
|
@ -866,6 +880,8 @@ Below is the output for `/path/to/vminsert -help`:
|
||||||
Whether to skip verification of TLS certificates provided by -storageNode nodes if -cluster.tls flag is set. Note that disabled TLS certificate verification breaks security. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
Whether to skip verification of TLS certificates provided by -storageNode nodes if -cluster.tls flag is set. Note that disabled TLS certificate verification breaks security. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||||
-cluster.tlsKeyFile string
|
-cluster.tlsKeyFile string
|
||||||
Path to client-side TLS key file to use when connecting to -storageNode if -cluster.tls flag is set. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
Path to client-side TLS key file to use when connecting to -storageNode if -cluster.tls flag is set. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||||
|
-clusternative.vminsertConnsShutdownDuration duration
|
||||||
|
The time needed for gradual closing of upstream vminsert connections during graceful shutdown. Bigger duration reduces spikes in CPU, RAM and disk IO load on the remaining lower-level clusters during rolling restart. Smaller duration reduces the time needed to close all the upstream vminsert connections, thus reducing the time for graceful shutdown. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#improving-re-routing-performance-during-restart (default 25s)
|
||||||
-clusternativeListenAddr string
|
-clusternativeListenAddr string
|
||||||
TCP address to listen for data from other vminsert nodes in multi-level cluster setup. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup . Usually :8400 should be set to match default vmstorage port for vminsert. Disabled work if empty
|
TCP address to listen for data from other vminsert nodes in multi-level cluster setup. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup . Usually :8400 should be set to match default vmstorage port for vminsert. Disabled work if empty
|
||||||
-csvTrimTimestamp duration
|
-csvTrimTimestamp duration
|
||||||
|
@ -974,6 +990,8 @@ Below is the output for `/path/to/vminsert -help`:
|
||||||
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
||||||
-loggerLevel string
|
-loggerLevel string
|
||||||
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
||||||
|
-loggerMaxArgLen int
|
||||||
|
The maximum length of a single logged argument. Longer arguments are replaced with 'arg_start..arg_end', where 'arg_start' and 'arg_end' is prefix and suffix of the arg with the length not exceeding -loggerMaxArgLen / 2 (default 500)
|
||||||
-loggerOutput string
|
-loggerOutput string
|
||||||
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
||||||
-loggerTimezone string
|
-loggerTimezone string
|
||||||
|
@ -1177,6 +1195,8 @@ Below is the output for `/path/to/vmselect -help`:
|
||||||
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
||||||
-loggerLevel string
|
-loggerLevel string
|
||||||
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
||||||
|
-loggerMaxArgLen int
|
||||||
|
The maximum length of a single logged argument. Longer arguments are replaced with 'arg_start..arg_end', where 'arg_start' and 'arg_end' is prefix and suffix of the arg with the length not exceeding -loggerMaxArgLen / 2 (default 500)
|
||||||
-loggerOutput string
|
-loggerOutput string
|
||||||
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
||||||
-loggerTimezone string
|
-loggerTimezone string
|
||||||
|
@ -1332,6 +1352,8 @@ Below is the output for `/path/to/vmstorage -help`:
|
||||||
```
|
```
|
||||||
-bigMergeConcurrency int
|
-bigMergeConcurrency int
|
||||||
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
||||||
|
-blockcache.missesBeforeCaching int
|
||||||
|
The number of cache misses before putting the block into cache. Higher values may reduce indexdb/dataBlocks cache size at the cost of higher CPU and disk read usage (default 2)
|
||||||
-cacheExpireDuration duration
|
-cacheExpireDuration duration
|
||||||
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
||||||
-cluster.tls
|
-cluster.tls
|
||||||
|
@ -1428,6 +1450,8 @@ Below is the output for `/path/to/vmstorage -help`:
|
||||||
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
||||||
-loggerLevel string
|
-loggerLevel string
|
||||||
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
||||||
|
-loggerMaxArgLen int
|
||||||
|
The maximum length of a single logged argument. Longer arguments are replaced with 'arg_start..arg_end', where 'arg_start' and 'arg_end' is prefix and suffix of the arg with the length not exceeding -loggerMaxArgLen / 2 (default 500)
|
||||||
-loggerOutput string
|
-loggerOutput string
|
||||||
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
||||||
-loggerTimezone string
|
-loggerTimezone string
|
||||||
|
@ -1507,6 +1531,8 @@ Below is the output for `/path/to/vmstorage -help`:
|
||||||
-storage.minFreeDiskSpaceBytes size
|
-storage.minFreeDiskSpaceBytes size
|
||||||
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
|
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 10000000)
|
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 10000000)
|
||||||
|
-storage.vminsertConnsShutdownDuration duration
|
||||||
|
The time needed for gradual closing of vminsert connections during graceful shutdown. Bigger duration reduces spikes in CPU, RAM and disk IO load on the remaining vmstorage nodes during rolling restart. Smaller duration reduces the time needed to close all the vminsert connections, thus reducing the time for graceful shutdown. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#improving-re-routing-performance-during-restart (default 25s)
|
||||||
-storageDataPath string
|
-storageDataPath string
|
||||||
Path to storage data (default "vmstorage-data")
|
Path to storage data (default "vmstorage-data")
|
||||||
-tls
|
-tls
|
||||||
|
|
|
@ -445,7 +445,7 @@ This information is obtained from the `/api/v1/status/active_queries` HTTP endpo
|
||||||
[VMUI](#vmui) provides an ability to explore metrics exported by a particular `job` / `instance` in the following way:
|
[VMUI](#vmui) provides an ability to explore metrics exported by a particular `job` / `instance` in the following way:
|
||||||
|
|
||||||
1. Open the `vmui` at `http://victoriametrics:8428/vmui/`.
|
1. Open the `vmui` at `http://victoriametrics:8428/vmui/`.
|
||||||
1. Click the `Explore metrics` tab.
|
1. Click the `Explore Prometheus metrics` tab.
|
||||||
1. Select the `job` you want to explore.
|
1. Select the `job` you want to explore.
|
||||||
1. Optionally select the `instance` for the selected job to explore.
|
1. Optionally select the `instance` for the selected job to explore.
|
||||||
1. Select metrics you want to explore and compare.
|
1. Select metrics you want to explore and compare.
|
||||||
|
@ -1129,6 +1129,18 @@ For example, the following command builds the image on top of [scratch](https://
|
||||||
ROOT_IMAGE=scratch make package-victoria-metrics
|
ROOT_IMAGE=scratch make package-victoria-metrics
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Building VictoriaMetrics with Podman
|
||||||
|
|
||||||
|
VictoriaMetrics can be built with Podman in either rootful or rootless mode.
|
||||||
|
|
||||||
|
When building via rootlful Podman, simply add `DOCKER=podman` to the relevant `make` commandline. To build
|
||||||
|
via rootless Podman, add `DOCKER=podman DOCKER_RUN="podman run --userns=keep-id"` to the `make`
|
||||||
|
commandline.
|
||||||
|
|
||||||
|
For example: `make victoria-metrics-pure DOCKER=podman DOCKER_RUN="podman run --userns=keep-id"`
|
||||||
|
|
||||||
|
Note that `production` builds are not supported via Podman becuase Podman does not support `buildx`.
|
||||||
|
|
||||||
## Start with docker-compose
|
## Start with docker-compose
|
||||||
|
|
||||||
[Docker-compose](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/docker-compose.yml)
|
[Docker-compose](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/docker-compose.yml)
|
||||||
|
@ -1686,43 +1698,44 @@ See also [cardinality limiter](#cardinality-limiter) and [capacity planning docs
|
||||||
|
|
||||||
## High availability
|
## High availability
|
||||||
|
|
||||||
* Install multiple VictoriaMetrics instances in distinct datacenters (availability zones).
|
The general approach for achieving high availability is the following:
|
||||||
* Pass addresses of these instances to [vmagent](https://docs.victoriametrics.com/vmagent.html) via `-remoteWrite.url` command-line flag:
|
|
||||||
|
- to run two identically configured VictoriaMetrics instances in distinct datacenters (availability zones)
|
||||||
|
- to store the collected data simultaneously into these instances via [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus
|
||||||
|
- to query the first VictoriaMetrics instance and to fail over to the second instance when the first instance becomes temporarily unavailable.
|
||||||
|
|
||||||
|
Such a setup guarantees that the collected data isn't lost when one of VictoriaMetrics instance becomes unavailable.
|
||||||
|
The collected data continues to be written to the available VictoriaMetrics instance, so it should be available for querying.
|
||||||
|
Both [vmagent](https://docs.victoriametrics.com/vmagent.html) and Prometheus buffer the collected data locally if they cannot send it
|
||||||
|
to the configured remote storage. So the collected data will be written to the temporarily unavailable VictoriaMetrics instance
|
||||||
|
after it becomes available.
|
||||||
|
|
||||||
|
If you use [vmagent](https://docs.victoriametrics.com/vmagent.html) for storing the data into VictoriaMetrics,
|
||||||
|
then it can be configured with multiple `-remoteWrite.url` command-line flags, where every flag points to the VictoriaMetrics
|
||||||
|
instance in a particular availability zone, in order to replicate the collected data to all the VictoriaMetrics instances.
|
||||||
|
For example, the following command instructs `vmagent` to replicate data to `vm-az1` and `vm-az2` instances of VictoriaMetrics:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
/path/to/vmagent -remoteWrite.url=http://<victoriametrics-addr-1>:8428/api/v1/write -remoteWrite.url=http://<victoriametrics-addr-2>:8428/api/v1/write
|
/path/to/vmagent \
|
||||||
|
-remoteWrite.url=http://<vm-az1>:8428/api/v1/write \
|
||||||
|
-remoteWrite.url=http://<vm-az2>:8428/api/v1/write
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively these addresses may be passed to `remote_write` section in Prometheus config:
|
If you use Prometheus for collecting and writing the data to VictoriaMetrics,
|
||||||
|
then the following [`remote_write`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) section
|
||||||
|
in Prometheus config can be used for replicating the collected data to `vm-az1` and `vm-az2` VictoriaMetrics instances:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
remote_write:
|
remote_write:
|
||||||
- url: http://<victoriametrics-addr-1>:8428/api/v1/write
|
- url: http://<vm-az1>:8428/api/v1/write
|
||||||
queue_config:
|
- url: http://<vm-az2>:8428/api/v1/write
|
||||||
max_samples_per_send: 10000
|
|
||||||
# ...
|
|
||||||
- url: http://<victoriametrics-addr-N>:8428/api/v1/write
|
|
||||||
queue_config:
|
|
||||||
max_samples_per_send: 10000
|
|
||||||
```
|
```
|
||||||
|
|
||||||
* Apply the updated config:
|
It is recommended to use [vmagent](https://docs.victoriametrics.com/vmagent.html) instead of Prometheus for highly loaded setups,
|
||||||
|
since it uses lower amounts of RAM, CPU and network bandwidth than Prometheus.
|
||||||
|
|
||||||
```console
|
If you use identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances for collecting the same data
|
||||||
kill -HUP `pidof prometheus`
|
and sending it to VictoriaMetrics, then do not forget enabling [deduplication](#deduplication) at VictoriaMetrics side.
|
||||||
```
|
|
||||||
|
|
||||||
It is recommended to use [vmagent](https://docs.victoriametrics.com/vmagent.html) instead of Prometheus for highly loaded setups.
|
|
||||||
|
|
||||||
* Now Prometheus should write data into all the configured `remote_write` urls in parallel.
|
|
||||||
* Set up [Promxy](https://github.com/jacksontj/promxy) in front of all the VictoriaMetrics replicas.
|
|
||||||
* Set up Prometheus datasource in Grafana that points to Promxy.
|
|
||||||
|
|
||||||
If you have Prometheus HA pairs with replicas `r1` and `r2` in each pair, then configure each `r1`
|
|
||||||
to write data to `victoriametrics-addr-1`, while each `r2` should write data to `victoriametrics-addr-2`.
|
|
||||||
|
|
||||||
Another option is to write data simultaneously from Prometheus HA pair to a pair of VictoriaMetrics instances
|
|
||||||
with the enabled de-duplication. See [this section](#deduplication) for details.
|
|
||||||
|
|
||||||
## Deduplication
|
## Deduplication
|
||||||
|
|
||||||
|
@ -2516,6 +2529,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
```
|
```
|
||||||
-bigMergeConcurrency int
|
-bigMergeConcurrency int
|
||||||
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
||||||
|
-blockcache.missesBeforeCaching int
|
||||||
|
The number of cache misses before putting the block into cache. Higher values may reduce indexdb/dataBlocks cache size at the cost of higher CPU and disk read usage (default 2)
|
||||||
-cacheExpireDuration duration
|
-cacheExpireDuration duration
|
||||||
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
||||||
-configAuthKey string
|
-configAuthKey string
|
||||||
|
@ -2643,6 +2658,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
||||||
-loggerLevel string
|
-loggerLevel string
|
||||||
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
||||||
|
-loggerMaxArgLen int
|
||||||
|
The maximum length of a single logged argument. Longer arguments are replaced with 'arg_start..arg_end', where 'arg_start' and 'arg_end' is prefix and suffix of the arg with the length not exceeding -loggerMaxArgLen / 2 (default 500)
|
||||||
-loggerOutput string
|
-loggerOutput string
|
||||||
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
||||||
-loggerTimezone string
|
-loggerTimezone string
|
||||||
|
|
|
@ -453,7 +453,7 @@ This information is obtained from the `/api/v1/status/active_queries` HTTP endpo
|
||||||
[VMUI](#vmui) provides an ability to explore metrics exported by a particular `job` / `instance` in the following way:
|
[VMUI](#vmui) provides an ability to explore metrics exported by a particular `job` / `instance` in the following way:
|
||||||
|
|
||||||
1. Open the `vmui` at `http://victoriametrics:8428/vmui/`.
|
1. Open the `vmui` at `http://victoriametrics:8428/vmui/`.
|
||||||
1. Click the `Explore metrics` tab.
|
1. Click the `Explore Prometheus metrics` tab.
|
||||||
1. Select the `job` you want to explore.
|
1. Select the `job` you want to explore.
|
||||||
1. Optionally select the `instance` for the selected job to explore.
|
1. Optionally select the `instance` for the selected job to explore.
|
||||||
1. Select metrics you want to explore and compare.
|
1. Select metrics you want to explore and compare.
|
||||||
|
@ -1137,6 +1137,18 @@ For example, the following command builds the image on top of [scratch](https://
|
||||||
ROOT_IMAGE=scratch make package-victoria-metrics
|
ROOT_IMAGE=scratch make package-victoria-metrics
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Building VictoriaMetrics with Podman
|
||||||
|
|
||||||
|
VictoriaMetrics can be built with Podman in either rootful or rootless mode.
|
||||||
|
|
||||||
|
When building via rootlful Podman, simply add `DOCKER=podman` to the relevant `make` commandline. To build
|
||||||
|
via rootless Podman, add `DOCKER=podman DOCKER_RUN="podman run --userns=keep-id"` to the `make`
|
||||||
|
commandline.
|
||||||
|
|
||||||
|
For example: `make victoria-metrics-pure DOCKER=podman DOCKER_RUN="podman run --userns=keep-id"`
|
||||||
|
|
||||||
|
Note that `production` builds are not supported via Podman becuase Podman does not support `buildx`.
|
||||||
|
|
||||||
## Start with docker-compose
|
## Start with docker-compose
|
||||||
|
|
||||||
[Docker-compose](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/docker-compose.yml)
|
[Docker-compose](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/docker-compose.yml)
|
||||||
|
@ -1694,43 +1706,44 @@ See also [cardinality limiter](#cardinality-limiter) and [capacity planning docs
|
||||||
|
|
||||||
## High availability
|
## High availability
|
||||||
|
|
||||||
* Install multiple VictoriaMetrics instances in distinct datacenters (availability zones).
|
The general approach for achieving high availability is the following:
|
||||||
* Pass addresses of these instances to [vmagent](https://docs.victoriametrics.com/vmagent.html) via `-remoteWrite.url` command-line flag:
|
|
||||||
|
- to run two identically configured VictoriaMetrics instances in distinct datacenters (availability zones)
|
||||||
|
- to store the collected data simultaneously into these instances via [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus
|
||||||
|
- to query the first VictoriaMetrics instance and to fail over to the second instance when the first instance becomes temporarily unavailable.
|
||||||
|
|
||||||
|
Such a setup guarantees that the collected data isn't lost when one of VictoriaMetrics instance becomes unavailable.
|
||||||
|
The collected data continues to be written to the available VictoriaMetrics instance, so it should be available for querying.
|
||||||
|
Both [vmagent](https://docs.victoriametrics.com/vmagent.html) and Prometheus buffer the collected data locally if they cannot send it
|
||||||
|
to the configured remote storage. So the collected data will be written to the temporarily unavailable VictoriaMetrics instance
|
||||||
|
after it becomes available.
|
||||||
|
|
||||||
|
If you use [vmagent](https://docs.victoriametrics.com/vmagent.html) for storing the data into VictoriaMetrics,
|
||||||
|
then it can be configured with multiple `-remoteWrite.url` command-line flags, where every flag points to the VictoriaMetrics
|
||||||
|
instance in a particular availability zone, in order to replicate the collected data to all the VictoriaMetrics instances.
|
||||||
|
For example, the following command instructs `vmagent` to replicate data to `vm-az1` and `vm-az2` instances of VictoriaMetrics:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
/path/to/vmagent -remoteWrite.url=http://<victoriametrics-addr-1>:8428/api/v1/write -remoteWrite.url=http://<victoriametrics-addr-2>:8428/api/v1/write
|
/path/to/vmagent \
|
||||||
|
-remoteWrite.url=http://<vm-az1>:8428/api/v1/write \
|
||||||
|
-remoteWrite.url=http://<vm-az2>:8428/api/v1/write
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively these addresses may be passed to `remote_write` section in Prometheus config:
|
If you use Prometheus for collecting and writing the data to VictoriaMetrics,
|
||||||
|
then the following [`remote_write`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) section
|
||||||
|
in Prometheus config can be used for replicating the collected data to `vm-az1` and `vm-az2` VictoriaMetrics instances:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
remote_write:
|
remote_write:
|
||||||
- url: http://<victoriametrics-addr-1>:8428/api/v1/write
|
- url: http://<vm-az1>:8428/api/v1/write
|
||||||
queue_config:
|
- url: http://<vm-az2>:8428/api/v1/write
|
||||||
max_samples_per_send: 10000
|
|
||||||
# ...
|
|
||||||
- url: http://<victoriametrics-addr-N>:8428/api/v1/write
|
|
||||||
queue_config:
|
|
||||||
max_samples_per_send: 10000
|
|
||||||
```
|
```
|
||||||
|
|
||||||
* Apply the updated config:
|
It is recommended to use [vmagent](https://docs.victoriametrics.com/vmagent.html) instead of Prometheus for highly loaded setups,
|
||||||
|
since it uses lower amounts of RAM, CPU and network bandwidth than Prometheus.
|
||||||
|
|
||||||
```console
|
If you use identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances for collecting the same data
|
||||||
kill -HUP `pidof prometheus`
|
and sending it to VictoriaMetrics, then do not forget enabling [deduplication](#deduplication) at VictoriaMetrics side.
|
||||||
```
|
|
||||||
|
|
||||||
It is recommended to use [vmagent](https://docs.victoriametrics.com/vmagent.html) instead of Prometheus for highly loaded setups.
|
|
||||||
|
|
||||||
* Now Prometheus should write data into all the configured `remote_write` urls in parallel.
|
|
||||||
* Set up [Promxy](https://github.com/jacksontj/promxy) in front of all the VictoriaMetrics replicas.
|
|
||||||
* Set up Prometheus datasource in Grafana that points to Promxy.
|
|
||||||
|
|
||||||
If you have Prometheus HA pairs with replicas `r1` and `r2` in each pair, then configure each `r1`
|
|
||||||
to write data to `victoriametrics-addr-1`, while each `r2` should write data to `victoriametrics-addr-2`.
|
|
||||||
|
|
||||||
Another option is to write data simultaneously from Prometheus HA pair to a pair of VictoriaMetrics instances
|
|
||||||
with the enabled de-duplication. See [this section](#deduplication) for details.
|
|
||||||
|
|
||||||
## Deduplication
|
## Deduplication
|
||||||
|
|
||||||
|
@ -2524,6 +2537,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
```
|
```
|
||||||
-bigMergeConcurrency int
|
-bigMergeConcurrency int
|
||||||
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
||||||
|
-blockcache.missesBeforeCaching int
|
||||||
|
The number of cache misses before putting the block into cache. Higher values may reduce indexdb/dataBlocks cache size at the cost of higher CPU and disk read usage (default 2)
|
||||||
-cacheExpireDuration duration
|
-cacheExpireDuration duration
|
||||||
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
||||||
-configAuthKey string
|
-configAuthKey string
|
||||||
|
@ -2651,6 +2666,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
Allows renaming fields in JSON formatted logs. Example: "ts:timestamp,msg:message" renames "ts" to "timestamp" and "msg" to "message". Supported fields: ts, level, caller, msg
|
||||||
-loggerLevel string
|
-loggerLevel string
|
||||||
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO")
|
||||||
|
-loggerMaxArgLen int
|
||||||
|
The maximum length of a single logged argument. Longer arguments are replaced with 'arg_start..arg_end', where 'arg_start' and 'arg_end' is prefix and suffix of the arg with the length not exceeding -loggerMaxArgLen / 2 (default 500)
|
||||||
-loggerOutput string
|
-loggerOutput string
|
||||||
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
Output for the logs. Supported values: stderr, stdout (default "stderr")
|
||||||
-loggerTimezone string
|
-loggerTimezone string
|
||||||
|
|
|
@ -18,6 +18,10 @@ according to [these docs](https://docs.victoriametrics.com/VictoriaLogs/QuickSta
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
* BUGFIX: properly locate logs for the [requested streams](https://docs.victoriametrics.com/VictoriaLogs/LogsQL.html#stream-filter). Previously logs for some streams may be missing in query results. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4856). Thanks to @XLONG96 for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5295)!
|
||||||
|
* BUGFIX: [web UI](https://docs.victoriametrics.com/VictoriaLogs/querying/#web-ui): properly sort found logs by time. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5300).
|
||||||
|
|
||||||
|
|
||||||
## [v0.4.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.4.1-victorialogs)
|
## [v0.4.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.4.1-victorialogs)
|
||||||
|
|
||||||
Released at 2023-10-04
|
Released at 2023-10-04
|
||||||
|
|
390
docs/grafana-datasource.md
Normal file
390
docs/grafana-datasource.md
Normal file
|
@ -0,0 +1,390 @@
|
||||||
|
---
|
||||||
|
sort: 38
|
||||||
|
weight: 38
|
||||||
|
title: Grafana datasource
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: victoriametrics
|
||||||
|
weight: 38
|
||||||
|
aliases:
|
||||||
|
- /grafana-datasource.html"
|
||||||
|
---
|
||||||
|
|
||||||
|
# VictoriaMetrics datasource for Grafana
|
||||||
|
The [VictoriaMetrics](http://docs.victoriametrics.com/) datasource plugin allows you to query and visualize
|
||||||
|
data from VictoriaMetrics in Grafana.
|
||||||
|
|
||||||
|
* [Motivation](#motivation)
|
||||||
|
* [Installation](#installation)
|
||||||
|
* [How to use WITH templates](#how-to-use-with-templates)
|
||||||
|
* [How to make new release](#how-to-make-new-release)
|
||||||
|
* [Frequently Asked Questions](#faq)
|
||||||
|
* [License](#license)
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
Thanks to VictoriaMetrics compatibility with Prometheus API users can use
|
||||||
|
[Prometheus datasource](https://docs.victoriametrics.com/#grafana-setup) for Grafana to query data from VictoriaMetrics.
|
||||||
|
But with time, Prometheus and VictoriaMetrics diverge more and more. After some unexpected changes to Prometheus datasource
|
||||||
|
we decided to create a datasource plugin specifically for VictoriaMetrics.
|
||||||
|
The benefits of using VictoriaMetrics plugin are the following:
|
||||||
|
|
||||||
|
* [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) functions support;
|
||||||
|
* Supports [query tracing](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing) in Explore mode or right in panel's expressions;
|
||||||
|
* Supports [WITH expressions](https://github.com/VictoriaMetrics/grafana-datasource#how-to-use-with-templates);
|
||||||
|
* Plugin fixes [label names validation](https://github.com/grafana/grafana/issues/42615) issue;
|
||||||
|
* Integration with [vmui](https://docs.victoriametrics.com/#vmui).
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
Installing VictoriaMetrics Grafana datasource [requires](https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#allow_loading_unsigned_plugins)
|
||||||
|
the following changes to Grafana's `grafana.ini` config:
|
||||||
|
``` ini
|
||||||
|
[plugins]
|
||||||
|
allow_loading_unsigned_plugins = victoriametrics-datasource
|
||||||
|
```
|
||||||
|
|
||||||
|
See [why VictoriaMetrics datasource is unsigned](#why-victoriaMetrics-datasource-is-unsigned).
|
||||||
|
|
||||||
|
For detailed instructions on how to install the plugin on Grafana Cloud or
|
||||||
|
locally, please checkout the [Plugin installation docs](https://grafana.com/docs/grafana/latest/plugins/installation/).
|
||||||
|
|
||||||
|
### Grafana Provisioning
|
||||||
|
|
||||||
|
Provision of Grafana plugin requires to create
|
||||||
|
[datasource config file](http://docs.grafana.org/administration/provisioning/#datasources).
|
||||||
|
|
||||||
|
Example of config file for provisioning VictoriaMetrics datasource is the following:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
# List of data sources to insert/update depending on what's
|
||||||
|
# available in the database.
|
||||||
|
datasources:
|
||||||
|
# <string, required> Name of the VictoriaMetrics datasource
|
||||||
|
# displayed in Grafana panels and queries.
|
||||||
|
- name: VictoriaMetrics
|
||||||
|
# <string, required> Sets the data source type.
|
||||||
|
type: victoriametrics-datasource
|
||||||
|
# <string, required> Sets the access mode, either
|
||||||
|
# proxy or direct (Server or Browser in the UI).
|
||||||
|
# Some data sources are incompatible with any setting
|
||||||
|
# but proxy (Server).
|
||||||
|
access: proxy
|
||||||
|
# <string> Sets default URL of the single node version of VictoriaMetrics
|
||||||
|
url: http://victoriametrics:8428
|
||||||
|
# <string> Sets the pre-selected datasource for new panels.
|
||||||
|
# You can set only one default data source per organization.
|
||||||
|
isDefault: true
|
||||||
|
|
||||||
|
# <string, required> Name of the VictoriaMetrics datasource
|
||||||
|
# displayed in Grafana panels and queries.
|
||||||
|
- name: VictoriaMetrics - cluster
|
||||||
|
# <string, required> Sets the data source type.
|
||||||
|
type: victoriametrics-datasource
|
||||||
|
# <string, required> Sets the access mode, either
|
||||||
|
# proxy or direct (Server or Browser in the UI).
|
||||||
|
# Some data sources are incompatible with any setting
|
||||||
|
# but proxy (Server).
|
||||||
|
access: proxy
|
||||||
|
# <string> Sets default URL of the cluster version of VictoriaMetrics
|
||||||
|
url: http://vmselect:8481/select/0/prometheus
|
||||||
|
# <string> Sets the pre-selected datasource for new panels.
|
||||||
|
# You can set only one default data source per organization.
|
||||||
|
isDefault: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Please find the example of provisioning Grafana instance with VictoriaMetrics datasource below:
|
||||||
|
|
||||||
|
1. Create folder `./provisioning/datasource` with datasource example file:
|
||||||
|
|
||||||
|
1. Download the latest release:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/grafana-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||||
|
curl -L https://github.com/VictoriaMetrics/grafana-datasource/releases/download/$ver/victoriametrics-datasource-$ver.tar.gz -o plugin.tar.gz
|
||||||
|
tar -xf plugin.tar.gz -C ./victoriametrics-datasource
|
||||||
|
rm plugin.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Define Grafana installation via docker-compose:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: '3.0'
|
||||||
|
services:
|
||||||
|
grafana:
|
||||||
|
container_name: 'grafana-datasource'
|
||||||
|
build:
|
||||||
|
context: ./.config
|
||||||
|
args:
|
||||||
|
grafana_version: ${GRAFANA_VERSION:-9.1.2}
|
||||||
|
ports:
|
||||||
|
- 3000:3000/tcp
|
||||||
|
volumes:
|
||||||
|
- ./victoriametrics-datasource:/var/lib/grafana/plugins/grafana-datasource
|
||||||
|
- ./provisioning:/etc/grafana/provisioning
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Run docker-compose file:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker-compose -f docker-compose.yaml up
|
||||||
|
```
|
||||||
|
|
||||||
|
When Grafana starts successfully datasources should be present on the datasources tab
|
||||||
|
|
||||||
|
<p>
|
||||||
|
<img src="provision_datasources.png" width="800" alt="Configuration">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
### Install in Kubernetes
|
||||||
|
|
||||||
|
#### Grafana helm chart
|
||||||
|
|
||||||
|
Example with Grafana [helm chart](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md):
|
||||||
|
|
||||||
|
``` yaml
|
||||||
|
extraInitContainers:
|
||||||
|
- name: "load-vm-ds-plugin"
|
||||||
|
image: "curlimages/curl:7.85.0"
|
||||||
|
command: [ "/bin/sh" ]
|
||||||
|
workingDir: "/var/lib/grafana"
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 472
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsGroup: 472
|
||||||
|
args:
|
||||||
|
- "-c"
|
||||||
|
- |
|
||||||
|
set -ex
|
||||||
|
mkdir -p /var/lib/grafana/plugins/
|
||||||
|
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/grafana-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||||
|
curl -L https://github.com/VictoriaMetrics/grafana-datasource/releases/download/$ver/victoriametrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/plugin.tar.gz
|
||||||
|
tar -xf /var/lib/grafana/plugins/plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||||
|
rm /var/lib/grafana/plugins/plugin.tar.gz
|
||||||
|
volumeMounts:
|
||||||
|
- name: storage
|
||||||
|
mountPath: /var/lib/grafana
|
||||||
|
```
|
||||||
|
|
||||||
|
This example uses init container to download and install plugin. To allow Grafana using this container as a sidecar
|
||||||
|
set the following config:
|
||||||
|
```yaml
|
||||||
|
sidecar:
|
||||||
|
datasources:
|
||||||
|
initDatasources: true
|
||||||
|
enabled: true
|
||||||
|
```
|
||||||
|
See more about chart settings [here](https://github.com/grafana/helm-charts/blob/541d97051de87a309362e02d08741ffc868cfcd6/charts/grafana/values.yaml)
|
||||||
|
|
||||||
|
Another option would be to build custom Grafana image with plugin based on same installation instructions.
|
||||||
|
|
||||||
|
#### Grafana operator
|
||||||
|
|
||||||
|
Example with Grafana [operator](https://github.com/grafana-operator/grafana-operator):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: grafana.integreatly.org/v1beta1
|
||||||
|
kind: Grafana
|
||||||
|
metadata:
|
||||||
|
name: grafana-vm
|
||||||
|
spec:
|
||||||
|
persistentVolumeClaim:
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 200Mi
|
||||||
|
deployment:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- name: "load-vm-ds-plugin"
|
||||||
|
image: "curlimages/curl:7.85.0"
|
||||||
|
command: [ "/bin/sh" ]
|
||||||
|
workingDir: "/var/lib/grafana"
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 10001
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsGroup: 10001
|
||||||
|
args:
|
||||||
|
- "-c"
|
||||||
|
- |
|
||||||
|
set -ex
|
||||||
|
mkdir -p /var/lib/grafana/plugins/
|
||||||
|
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/grafana-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||||
|
curl -L https://github.com/VictoriaMetrics/grafana-datasource/releases/download/$ver/victoriametrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/plugin.tar.gz
|
||||||
|
tar -xf /var/lib/grafana/plugins/plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||||
|
rm /var/lib/grafana/plugins/plugin.tar.gz
|
||||||
|
volumeMounts:
|
||||||
|
- name: grafana-data
|
||||||
|
mountPath: /var/lib/grafana
|
||||||
|
config:
|
||||||
|
plugins:
|
||||||
|
allow_loading_unsigned_plugins: victoriametrics-datasource
|
||||||
|
```
|
||||||
|
|
||||||
|
See [Grafana operator reference](https://grafana-operator.github.io/grafana-operator/docs/grafana/) to find more about
|
||||||
|
Grafana operator.
|
||||||
|
This example uses init container to download and install plugin.
|
||||||
|
|
||||||
|
### Dev release installation
|
||||||
|
|
||||||
|
|
||||||
|
1. To download plugin build and move contents into Grafana plugins directory:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/grafana-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||||
|
curl -L https://github.com/VictoriaMetrics/grafana-datasource/releases/download/$ver/victoriametrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/plugin.tar.gz
|
||||||
|
tar -xf /var/lib/grafana/plugins/plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||||
|
rm /var/lib/grafana/plugins/plugin.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Restart Grafana
|
||||||
|
|
||||||
|
|
||||||
|
## Getting started development
|
||||||
|
|
||||||
|
### 1. Configure Grafana
|
||||||
|
Installing dev version of Grafana plugin requires to change `grafana.ini` config to allow loading unsigned plugins:
|
||||||
|
``` ini
|
||||||
|
# Directory where Grafana will automatically scan and look for plugins
|
||||||
|
plugins = {{path to directory with plugin}}
|
||||||
|
```
|
||||||
|
``` ini
|
||||||
|
[plugins]
|
||||||
|
allow_loading_unsigned_plugins = victoriametrics-datasource
|
||||||
|
```
|
||||||
|
### 2. Run the plugin
|
||||||
|
In the project directory, you can run:
|
||||||
|
```
|
||||||
|
# install dependencies
|
||||||
|
yarn install
|
||||||
|
|
||||||
|
# run the app in the development mode
|
||||||
|
yarn dev
|
||||||
|
|
||||||
|
# build the plugin for production to the `dist` folder and zip build
|
||||||
|
yarn build:zip
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. How to build backend plugin
|
||||||
|
|
||||||
|
From the root folder of the project run the following command:
|
||||||
|
```
|
||||||
|
make victoriametrics-backend-plugin-build
|
||||||
|
```
|
||||||
|
This command will build executable multi-platform files to the `dist` folder for the following platforms:
|
||||||
|
* linux/amd64
|
||||||
|
* linux/arm64
|
||||||
|
* linux/arm
|
||||||
|
* linux/386
|
||||||
|
* amd64
|
||||||
|
* arm64
|
||||||
|
* windows
|
||||||
|
|
||||||
|
### 4.How to build frontend plugin
|
||||||
|
From the root folder of the project run the following command:
|
||||||
|
```
|
||||||
|
make victorimetrics-frontend-plugin-build
|
||||||
|
```
|
||||||
|
This command will build all frontend app into `dist` folder.
|
||||||
|
|
||||||
|
### 5. How to build frontend and backend parts of the plugin:
|
||||||
|
When frontend and backend parts of the plugin is required, run the following command from
|
||||||
|
the root folder of the project:
|
||||||
|
```
|
||||||
|
make victoriametrics-datasource-plugin-build
|
||||||
|
```
|
||||||
|
This command will build frontend part and backend part or the plugin and locate both
|
||||||
|
parts into `dist` folder.
|
||||||
|
|
||||||
|
## How to use WITH templates
|
||||||
|
|
||||||
|
The `WITH` templates feature simplifies the construction and management of complex queries.
|
||||||
|
You can try this feature in the [WITH templates playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/expand-with-exprs).
|
||||||
|
|
||||||
|
The "WITH templates" section allows you to create expressions with templates that can be used in dashboards.
|
||||||
|
|
||||||
|
WITH expressions are stored in the datasource object. If the dashboard gets exported, the associated WITH templates will not be included in the resulting JSON (due to technical limitations) and need to be migrated separately.
|
||||||
|
|
||||||
|
### Defining WITH Expressions
|
||||||
|
|
||||||
|
1. Navigate to the dashboard where you want to add a template.<br/>
|
||||||
|
*Note: templates are available within the dashboard scope.*
|
||||||
|
1. Click the `WITH templates` button.
|
||||||
|
1. Enter the expression in the input field. Once done, press the `Save` button to apply the changes. For example:
|
||||||
|
```
|
||||||
|
commonFilters = {instance=~"$node:$port",job=~"$job"},
|
||||||
|
|
||||||
|
# `cpuCount` is the number of CPUs on the node
|
||||||
|
cpuCount = count(count(node_cpu_seconds_total{commonFilters}) by (cpu)),
|
||||||
|
|
||||||
|
# `cpuIdle` is the sum of idle CPU cores
|
||||||
|
cpuIdle = sum(rate(node_cpu_seconds_total{mode='idle',commonFilters}[5m]))
|
||||||
|
```
|
||||||
|
|
||||||
|
You can specify a comment before the variable and use markdown in it. The comment will be displayed as a hint during auto-completion. The comment can span multiple lines.
|
||||||
|
|
||||||
|
### Using WITH Expressions
|
||||||
|
|
||||||
|
After saving the template, you can enter it into the query editor field:
|
||||||
|
```
|
||||||
|
((cpuCount - cpuIdle) * 100) / cpuCount
|
||||||
|
```
|
||||||
|
|
||||||
|
Thus, the entire query will look as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
WITH (
|
||||||
|
commonFilters = {instance=~"$node:$port",job=~"$job"},
|
||||||
|
cpuCount = count(count(node_cpu_seconds_total{commonFilters}) by (cpu)),
|
||||||
|
cpuIdle = sum(rate(node_cpu_seconds_total{mode='idle',commonFilters}[5m]))
|
||||||
|
)
|
||||||
|
((cpuCount - cpuIdle) * 100) / cpuCount
|
||||||
|
```
|
||||||
|
To view the raw query in the interface, enable the `Raw` toggle.
|
||||||
|
|
||||||
|
## How to make new release
|
||||||
|
|
||||||
|
1. Make sure there are no open security issues.
|
||||||
|
1. Create a release tag:
|
||||||
|
* `git tag -s v1.xx.y` in `master` branch
|
||||||
|
1. Run `TAG=v1.xx.y make build-release` to build and package binaries in `*.tar.gz` release archives.
|
||||||
|
1. Run `git push origin v1.xx.y` to push the tag created `v1.xx.y` at step 2 to public GitHub repository
|
||||||
|
1. Go to <https://github.com/VictoriaMetrics/grafana-datasource/releases> and verify that draft release with the name `TAG` has been created
|
||||||
|
and this release contains all the needed binaries and checksums.
|
||||||
|
1. Remove the `draft` checkbox for the `TAG` release and manually publish it.
|
||||||
|
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
|
### Why VictoriaMetrics datasource is unsigned?
|
||||||
|
|
||||||
|
Based on our previous experience of [developing Grafana plugins](https://grafana.com/grafana/plugins/vertamedia-clickhouse-datasource/)
|
||||||
|
the signing procedure was a formal act. But when we tried [to sign the plugin](https://grafana.com/docs/grafana/latest/developers/plugins/publish-a-plugin/sign-a-plugin/)
|
||||||
|
we were told by GrafanaLabs representative the plugin falls into a Commercial signature level. It matters not
|
||||||
|
if plugin or VictoriaMetrics itself are opensource. The announced cost of Commercial signature level was much higher
|
||||||
|
than expected, so we interrupted the procedure.
|
||||||
|
|
||||||
|
### How to convert dashboard from Prometheus to VictoriaMetrics datasource?
|
||||||
|
|
||||||
|
Make sure that VictoriaMetrics datasource plugin is [installed](#installation), and a new datasource is created from the plugin.
|
||||||
|
|
||||||
|
Each panel in Grafana dashboard has a datasource dropdown when in Edit mode. Just choose the VictoriaMetrics datasource
|
||||||
|
instead of Prometheus datasource in dropdown.
|
||||||
|
|
||||||
|
If datasource is configured via Grafana variable, then change variable to VictoriaMetrics datasource type.
|
||||||
|
|
||||||
|
### Why VictoriaMetrics datasource doesn't support alerting?
|
||||||
|
|
||||||
|
Grafana doesn't allow forwarding Alert requests to alerting API /api/v1/rules for plugins which are not of Prometheus
|
||||||
|
or Loki type. See more details [here](https://github.com/VictoriaMetrics/grafana-datasource/issues/59#issuecomment-1541456768).
|
||||||
|
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This project is licensed under the [AGPL-3.0-only](https://github.com/VictoriaMetrics/grafana-datasource/blob/main/LICENSE).
|
|
@ -13,7 +13,11 @@ menu:
|
||||||
|
|
||||||
## Next release
|
## Next release
|
||||||
|
|
||||||
- TODO
|
### Features
|
||||||
|
|
||||||
|
- [vmoperator](./README.md): properly compare difference for `statefulSet` claimTemplate metadata. See [this commit](https://github.com/VictoriaMetrics/operator/commit/49f9c72b504582b06f72eda94055fd964a11d342) for details.
|
||||||
|
- [vmoperator](./README.md): sort `statefulSet` pods by id for rolling update order. See [this commit](https://github.com/VictoriaMetrics/operator/commit/e73b03acd073ec3eda34231083a48c6f79a6757b) for details.
|
||||||
|
- [vmoperator](./README.md): optimize statefulset update logic, that should reduce some unneeded operations. See [this PR](https://github.com/VictoriaMetrics/operator/pull/801) for details.
|
||||||
|
|
||||||
<a name="v0.39.1"></a>
|
<a name="v0.39.1"></a>
|
||||||
## [v0.39.1](https://github.com/VictoriaMetrics/operator/releases/tag/v0.39.1) - 1 Nov 2023
|
## [v0.39.1](https://github.com/VictoriaMetrics/operator/releases/tag/v0.39.1) - 1 Nov 2023
|
||||||
|
|
|
@ -10,7 +10,7 @@ menu:
|
||||||
|
|
||||||
<!-- this doc autogenerated - don't edit it manually -->
|
<!-- this doc autogenerated - don't edit it manually -->
|
||||||
# Auto Generated vars for package config
|
# Auto Generated vars for package config
|
||||||
updated at Wed Nov 1 15:52:40 UTC 2023
|
updated at Mon Nov 6 07:44:56 UTC 2023
|
||||||
|
|
||||||
|
|
||||||
| varible name | variable default value | variable required | variable description |
|
| varible name | variable default value | variable required | variable description |
|
||||||
|
|
BIN
docs/provision_datasources.png
Normal file
BIN
docs/provision_datasources.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 524 KiB |
|
@ -137,7 +137,7 @@ to one sample per 5 minutes per each input time series (this operation is also k
|
||||||
|
|
||||||
The aggregated output metrics have the following names according to [output metric naming](#output-metric-names):
|
The aggregated output metrics have the following names according to [output metric naming](#output-metric-names):
|
||||||
|
|
||||||
```
|
```text
|
||||||
# For input metrics ending with _total
|
# For input metrics ending with _total
|
||||||
some_metric_total:5m_total
|
some_metric_total:5m_total
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ See [these docs](#aggregating-by-labels) for more details.
|
||||||
|
|
||||||
The aggregated output metric has the following name according to [output metric naming](#output-metric-names):
|
The aggregated output metric has the following name according to [output metric naming](#output-metric-names):
|
||||||
|
|
||||||
```
|
```text
|
||||||
http_requests_total:30s_without_path_user_total
|
http_requests_total:30s_without_path_user_total
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ For example, if an advertising server generates `hits{some="labels"} 1` and `cli
|
||||||
per each incoming hit and click, then the following [stream aggregation config](#stream-aggregation-config)
|
per each incoming hit and click, then the following [stream aggregation config](#stream-aggregation-config)
|
||||||
can be used for counting these metrics per every 30 second interval:
|
can be used for counting these metrics per every 30 second interval:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
- match: '{__name__=~"hits|clicks"}'
|
- match: '{__name__=~"hits|clicks"}'
|
||||||
interval: 30s
|
interval: 30s
|
||||||
outputs: [count_samples]
|
outputs: [count_samples]
|
||||||
|
@ -194,7 +194,7 @@ can be used for counting these metrics per every 30 second interval:
|
||||||
This config generates the following output metrics for `hits` and `clicks` input metrics
|
This config generates the following output metrics for `hits` and `clicks` input metrics
|
||||||
according to [output metric naming](#output-metric-names):
|
according to [output metric naming](#output-metric-names):
|
||||||
|
|
||||||
```
|
```text
|
||||||
hits:30s_count_samples count1
|
hits:30s_count_samples count1
|
||||||
clicks:30s_count_samples count2
|
clicks:30s_count_samples count2
|
||||||
```
|
```
|
||||||
|
@ -221,7 +221,7 @@ can be used for summing these metrics per every minute:
|
||||||
|
|
||||||
This config generates the following output metrics according to [output metric naming](#output-metric-names):
|
This config generates the following output metrics according to [output metric naming](#output-metric-names):
|
||||||
|
|
||||||
```
|
```text
|
||||||
hits:1m_sum_samples sum1
|
hits:1m_sum_samples sum1
|
||||||
clicks:1m_sum_samples sum2
|
clicks:1m_sum_samples sum2
|
||||||
```
|
```
|
||||||
|
@ -249,7 +249,7 @@ can be used for calculating 50th and 99th percentiles for these metrics every 30
|
||||||
|
|
||||||
This config generates the following output metrics according to [output metric naming](#output-metric-names):
|
This config generates the following output metrics according to [output metric naming](#output-metric-names):
|
||||||
|
|
||||||
```
|
```text
|
||||||
request_duration_seconds:30s_quantiles{quantile="0.50"} value1
|
request_duration_seconds:30s_quantiles{quantile="0.50"} value1
|
||||||
request_duration_seconds:30s_quantiles{quantile="0.99"} value2
|
request_duration_seconds:30s_quantiles{quantile="0.99"} value2
|
||||||
|
|
||||||
|
@ -280,7 +280,7 @@ for these metrics every 60 seconds:
|
||||||
|
|
||||||
This config generates the following output metrics according to [output metric naming](#output-metric-names).
|
This config generates the following output metrics according to [output metric naming](#output-metric-names).
|
||||||
|
|
||||||
```
|
```text
|
||||||
request_duration_seconds:60s_histogram_bucket{vmrange="start1...end1"} count1
|
request_duration_seconds:60s_histogram_bucket{vmrange="start1...end1"} count1
|
||||||
request_duration_seconds:60s_histogram_bucket{vmrange="start2...end2"} count2
|
request_duration_seconds:60s_histogram_bucket{vmrange="start2...end2"} count2
|
||||||
...
|
...
|
||||||
|
@ -326,6 +326,7 @@ See also [quantiles over input metrics](#quantiles-over-input-metrics) and [aggr
|
||||||
[Histogram](https://docs.victoriametrics.com/keyConcepts.html#histogram) is a set of [counter](https://docs.victoriametrics.com/keyConcepts.html#counter)
|
[Histogram](https://docs.victoriametrics.com/keyConcepts.html#histogram) is a set of [counter](https://docs.victoriametrics.com/keyConcepts.html#counter)
|
||||||
metrics with different `vmrange` or `le` labels. As they're counters, the applicable aggregation output is
|
metrics with different `vmrange` or `le` labels. As they're counters, the applicable aggregation output is
|
||||||
[total](https://docs.victoriametrics.com/stream-aggregation.html#total):
|
[total](https://docs.victoriametrics.com/stream-aggregation.html#total):
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
- match: 'http_request_duration_seconds_bucket'
|
- match: 'http_request_duration_seconds_bucket'
|
||||||
interval: 1m
|
interval: 1m
|
||||||
|
@ -337,7 +338,8 @@ metrics with different `vmrange` or `le` labels. As they're counters, the applic
|
||||||
```
|
```
|
||||||
|
|
||||||
This config generates the following output metrics according to [output metric naming](#output-metric-names):
|
This config generates the following output metrics according to [output metric naming](#output-metric-names):
|
||||||
```
|
|
||||||
|
```text
|
||||||
http_request_duration_seconds_bucket:1m_without_instance_total{le="0.1"} value1
|
http_request_duration_seconds_bucket:1m_without_instance_total{le="0.1"} value1
|
||||||
http_request_duration_seconds_bucket:1m_without_instance_total{le="0.2"} value2
|
http_request_duration_seconds_bucket:1m_without_instance_total{le="0.2"} value2
|
||||||
http_request_duration_seconds_bucket:1m_without_instance_total{le="0.4"} value3
|
http_request_duration_seconds_bucket:1m_without_instance_total{le="0.4"} value3
|
||||||
|
@ -368,7 +370,7 @@ See also [histograms over input metrics](#histograms-over-input-metrics) and [qu
|
||||||
|
|
||||||
Output metric names for stream aggregation are constructed according to the following pattern:
|
Output metric names for stream aggregation are constructed according to the following pattern:
|
||||||
|
|
||||||
```
|
```text
|
||||||
<metric_name>:<interval>[_by_<by_labels>][_without_<without_labels>]_<output>
|
<metric_name>:<interval>[_by_<by_labels>][_without_<without_labels>]_<output>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -391,7 +393,7 @@ during stream aggregation via `input_relabel_configs` and `output_relabel_config
|
||||||
|
|
||||||
For example, the following config removes the `:1m_sum_samples` suffix added [to the output metric name](#output-metric-names):
|
For example, the following config removes the `:1m_sum_samples` suffix added [to the output metric name](#output-metric-names):
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
- interval: 1m
|
- interval: 1m
|
||||||
outputs: [sum_samples]
|
outputs: [sum_samples]
|
||||||
output_relabel_configs:
|
output_relabel_configs:
|
||||||
|
@ -677,7 +679,7 @@ support the following approaches for hot reloading stream aggregation configs fr
|
||||||
|
|
||||||
* By sending `SIGHUP` signal to `vmagent` or `victoria-metrics` process:
|
* By sending `SIGHUP` signal to `vmagent` or `victoria-metrics` process:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
kill -SIGHUP `pidof vmagent`
|
kill -SIGHUP `pidof vmagent`
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ menu:
|
||||||
|
|
||||||
# VictoriaMetrics API examples
|
# VictoriaMetrics API examples
|
||||||
|
|
||||||
## /api/v1/admin/tsdb/delete_series
|
### /api/v1/admin/tsdb/delete_series
|
||||||
|
|
||||||
**Deletes time series from VictoriaMetrics**
|
**Deletes time series from VictoriaMetrics**
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ Additional information:
|
||||||
* [How to delete time series](https://docs.victoriametrics.com/#how-to-delete-time-series)
|
* [How to delete time series](https://docs.victoriametrics.com/#how-to-delete-time-series)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/export
|
### /api/v1/export
|
||||||
|
|
||||||
**Exports raw samples from VictoriaMetrics in JSON line format**
|
**Exports raw samples from VictoriaMetrics in JSON line format**
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ Additional information:
|
||||||
* [How to export data in JSON line format](https://docs.victoriametrics.com/#how-to-export-data-in-json-line-format)
|
* [How to export data in JSON line format](https://docs.victoriametrics.com/#how-to-export-data-in-json-line-format)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/export/csv
|
### /api/v1/export/csv
|
||||||
|
|
||||||
**Exports raw samples from VictoriaMetrics in CSV format**
|
**Exports raw samples from VictoriaMetrics in CSV format**
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ Additional information:
|
||||||
* [How to import time series](https://docs.victoriametrics.com/#how-to-import-time-series-data)
|
* [How to import time series](https://docs.victoriametrics.com/#how-to-import-time-series-data)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/export/native
|
### /api/v1/export/native
|
||||||
|
|
||||||
**Exports raw samples from VictoriaMetrics in native format**
|
**Exports raw samples from VictoriaMetrics in native format**
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ More information:
|
||||||
* [How to import time series](https://docs.victoriametrics.com/#how-to-import-time-series-data)
|
* [How to import time series](https://docs.victoriametrics.com/#how-to-import-time-series-data)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/import
|
### /api/v1/import
|
||||||
|
|
||||||
**Imports data to VictoriaMetrics in JSON line format**
|
**Imports data to VictoriaMetrics in JSON line format**
|
||||||
|
|
||||||
|
@ -194,7 +194,7 @@ More information:
|
||||||
* [How to export time series](https://docs.victoriametrics.com/#how-to-export-time-series)
|
* [How to export time series](https://docs.victoriametrics.com/#how-to-export-time-series)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/import/csv
|
### /api/v1/import/csv
|
||||||
|
|
||||||
**Imports CSV data to VictoriaMetrics**
|
**Imports CSV data to VictoriaMetrics**
|
||||||
|
|
||||||
|
@ -222,7 +222,7 @@ Additional information:
|
||||||
* [How to export time series](https://docs.victoriametrics.com/#how-to-export-time-series)
|
* [How to export time series](https://docs.victoriametrics.com/#how-to-export-time-series)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/import/native
|
### /api/v1/import/native
|
||||||
|
|
||||||
**Imports data to VictoriaMetrics in native format**
|
**Imports data to VictoriaMetrics in native format**
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ Additional information:
|
||||||
* [How to export time series](https://docs.victoriametrics.com/#how-to-export-time-series)
|
* [How to export time series](https://docs.victoriametrics.com/#how-to-export-time-series)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/import/prometheus
|
### /api/v1/import/prometheus
|
||||||
|
|
||||||
**Imports data to VictoriaMetrics in Prometheus text exposition format**
|
**Imports data to VictoriaMetrics in Prometheus text exposition format**
|
||||||
|
|
||||||
|
@ -305,7 +305,7 @@ Additional information:
|
||||||
* [Querying label values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values)
|
* [Querying label values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/label/.../values
|
### /api/v1/label/.../values
|
||||||
|
|
||||||
**Get a list of values for a particular label on the given time range**
|
**Get a list of values for a particular label on the given time range**
|
||||||
|
|
||||||
|
@ -335,7 +335,7 @@ Additional information:
|
||||||
* [Getting label names](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names)
|
* [Getting label names](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/query
|
### /api/v1/query
|
||||||
|
|
||||||
**Performs PromQL/MetricsQL instant query**
|
**Performs PromQL/MetricsQL instant query**
|
||||||
|
|
||||||
|
@ -363,7 +363,7 @@ Additional information:
|
||||||
* [Query language](https://docs.victoriametrics.com/keyConcepts.html#metricsql)
|
* [Query language](https://docs.victoriametrics.com/keyConcepts.html#metricsql)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/query_range
|
### /api/v1/query_range
|
||||||
|
|
||||||
**Performs PromQL/MetricsQL range query**
|
**Performs PromQL/MetricsQL range query**
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ Additional information:
|
||||||
* [Query language](https://docs.victoriametrics.com/keyConcepts.html#metricsql)
|
* [Query language](https://docs.victoriametrics.com/keyConcepts.html#metricsql)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/series
|
### /api/v1/series
|
||||||
|
|
||||||
**Returns series names with their labels on the given time range**
|
**Returns series names with their labels on the given time range**
|
||||||
|
|
||||||
|
@ -422,7 +422,7 @@ Additional information:
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
||||||
|
|
||||||
## /api/v1/status/tsdb
|
### /api/v1/status/tsdb
|
||||||
|
|
||||||
**Cardinality statistics**
|
**Cardinality statistics**
|
||||||
|
|
||||||
|
@ -449,7 +449,7 @@ Additional information:
|
||||||
* [TSDB Stats](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats)
|
* [TSDB Stats](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /datadog
|
### /datadog
|
||||||
|
|
||||||
**DataDog URL for Single-node VictoriaMetrics**
|
**DataDog URL for Single-node VictoriaMetrics**
|
||||||
|
|
||||||
|
@ -471,7 +471,7 @@ http://vminsert:8480/insert/0/datadog
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## /datadog/api/v1/series
|
### /datadog/api/v1/series
|
||||||
|
|
||||||
**Imports data in DataDog format into VictoriaMetrics**
|
**Imports data in DataDog format into VictoriaMetrics**
|
||||||
|
|
||||||
|
@ -534,7 +534,7 @@ Additional information:
|
||||||
* [How to send data from datadog agent](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent)
|
* [How to send data from datadog agent](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /federate
|
### /federate
|
||||||
|
|
||||||
**Returns federated metrics**
|
**Returns federated metrics**
|
||||||
|
|
||||||
|
@ -562,7 +562,7 @@ Additional information:
|
||||||
* [Prometheus-compatible federation data](https://prometheus.io/docs/prometheus/latest/federation/#configuring-federation)
|
* [Prometheus-compatible federation data](https://prometheus.io/docs/prometheus/latest/federation/#configuring-federation)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /graphite/metrics/find
|
### /graphite/metrics/find
|
||||||
|
|
||||||
**Searches Graphite metrics in VictoriaMetrics**
|
**Searches Graphite metrics in VictoriaMetrics**
|
||||||
|
|
||||||
|
@ -591,7 +591,7 @@ Additional information:
|
||||||
* [How to send Graphite data to VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
* [How to send Graphite data to VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||||
* [URL Format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL Format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /influx/write
|
### /influx/write
|
||||||
|
|
||||||
**Writes data with InfluxDB line protocol to VictoriaMetrics**
|
**Writes data with InfluxDB line protocol to VictoriaMetrics**
|
||||||
|
|
||||||
|
@ -618,7 +618,7 @@ Additional information:
|
||||||
* [How to send Influx data to VictoriaMetrics](https://docs.victoriametrics.com/#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
* [How to send Influx data to VictoriaMetrics](https://docs.victoriametrics.com/#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||||
* [URL Format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL Format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /internal/resetRollupResultCache
|
### /internal/resetRollupResultCache
|
||||||
|
|
||||||
**Resets the response cache for previously served queries. It is recommended to invoke after [backfilling](https://docs.victoriametrics.com/#backfilling) procedure.**
|
**Resets the response cache for previously served queries. It is recommended to invoke after [backfilling](https://docs.victoriametrics.com/#backfilling) procedure.**
|
||||||
|
|
||||||
|
@ -641,9 +641,9 @@ curl -Is http://<vmselect>:8481/select/internal/resetRollupResultCache
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## TCP and UDP
|
### TCP and UDP
|
||||||
|
|
||||||
### How to send data from OpenTSDB-compatible agents to VictoriaMetrics
|
#### How to send data from OpenTSDB-compatible agents to VictoriaMetrics
|
||||||
|
|
||||||
Turned off by default. Enable OpenTSDB receiver in VictoriaMetrics by setting `-opentsdbListenAddr` command-line flag.
|
Turned off by default. Enable OpenTSDB receiver in VictoriaMetrics by setting `-opentsdbListenAddr` command-line flag.
|
||||||
*If run from docker, '-opentsdbListenAddr' port should be exposed*
|
*If run from docker, '-opentsdbListenAddr' port should be exposed*
|
||||||
|
@ -691,7 +691,7 @@ Additional information:
|
||||||
* [OpenTSDB http put API](http://opentsdb.net/docs/build/html/api_http/put.html)
|
* [OpenTSDB http put API](http://opentsdb.net/docs/build/html/api_http/put.html)
|
||||||
* [How to send data OpenTSDB data to VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents)
|
* [How to send data OpenTSDB data to VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents)
|
||||||
|
|
||||||
### How to send Graphite data to VictoriaMetrics
|
#### How to send Graphite data to VictoriaMetrics
|
||||||
|
|
||||||
Enable Graphite receiver in VictoriaMetrics by setting `-graphiteListenAddr` command-line flag.
|
Enable Graphite receiver in VictoriaMetrics by setting `-graphiteListenAddr` command-line flag.
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ and sending the data to the Prometheus-compatible remote storage:
|
||||||
Example command for writing the data received via [supported push-based protocols](#how-to-push-data-to-vmagent)
|
Example command for writing the data received via [supported push-based protocols](#how-to-push-data-to-vmagent)
|
||||||
to [single-node VictoriaMetrics](https://docs.victoriametrics.com/) located at `victoria-metrics-host:8428`:
|
to [single-node VictoriaMetrics](https://docs.victoriametrics.com/) located at `victoria-metrics-host:8428`:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ the data to [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-V
|
||||||
|
|
||||||
Example command for scraping Prometheus targets and writing the data to single-node VictoriaMetrics:
|
Example command for scraping Prometheus targets and writing the data to single-node VictoriaMetrics:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
/path/to/vmagent -promscrape.config=/path/to/prometheus.yml -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
/path/to/vmagent -promscrape.config=/path/to/prometheus.yml -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ additionally to pull-based Prometheus-compatible targets' scraping:
|
||||||
|
|
||||||
* Sending `SIGHUP` signal to `vmagent` process:
|
* Sending `SIGHUP` signal to `vmagent` process:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
kill -SIGHUP `pidof vmagent`
|
kill -SIGHUP `pidof vmagent`
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -336,7 +336,7 @@ in the `scrape_config_files` section of `-promscrape.config` file. For example,
|
||||||
loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file
|
loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file
|
||||||
and from `https://config-server/scrape_config.yml` url:
|
and from `https://config-server/scrape_config.yml` url:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_config_files:
|
scrape_config_files:
|
||||||
- configs/*.yml
|
- configs/*.yml
|
||||||
- single_scrape_config.yml
|
- single_scrape_config.yml
|
||||||
|
@ -346,7 +346,7 @@ scrape_config_files:
|
||||||
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||||
There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["vmagent:8429"]
|
- targets: ["vmagent:8429"]
|
||||||
|
@ -386,7 +386,7 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
|
||||||
For example, the following command starts `vmagent`, which adds `{datacenter="foobar"}` label to all the metrics pushed
|
For example, the following command starts `vmagent`, which adds `{datacenter="foobar"}` label to all the metrics pushed
|
||||||
to all the configured remote storage systems (all the `-remoteWrite.url` flag values):
|
to all the configured remote storage systems (all the `-remoteWrite.url` flag values):
|
||||||
|
|
||||||
```
|
```bash
|
||||||
/path/to/vmagent -remoteWrite.label=datacenter=foobar ...
|
/path/to/vmagent -remoteWrite.label=datacenter=foobar ...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -751,7 +751,7 @@ stream parsing mode can be explicitly enabled in the following places:
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: 'big-federate'
|
- job_name: 'big-federate'
|
||||||
stream_parse: true
|
stream_parse: true
|
||||||
|
@ -778,7 +778,7 @@ Each `vmagent` instance in the cluster must use identical `-promscrape.config` f
|
||||||
in the range `0 ... N-1`, where `N` is the number of `vmagent` instances in the cluster specified via `-promscrape.cluster.membersCount`.
|
in the range `0 ... N-1`, where `N` is the number of `vmagent` instances in the cluster specified via `-promscrape.cluster.membersCount`.
|
||||||
For example, the following commands spread scrape targets among a cluster of two `vmagent` instances:
|
For example, the following commands spread scrape targets among a cluster of two `vmagent` instances:
|
||||||
|
|
||||||
```
|
```text
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
||||||
```
|
```
|
||||||
|
@ -790,7 +790,7 @@ By default, each scrape target is scraped only by a single `vmagent` instance in
|
||||||
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
|
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
|
||||||
start a cluster of three `vmagent` instances, where each target is scraped by two `vmagent` instances:
|
start a cluster of three `vmagent` instances, where each target is scraped by two `vmagent` instances:
|
||||||
|
|
||||||
```
|
```text
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=2 -promscrape.config=/path/to/config.yml ...
|
/path/to/vmagent -promscrape.cluster.membersCount=3 -promscrape.cluster.replicationFactor=2 -promscrape.cluster.memberNum=2 -promscrape.config=/path/to/config.yml ...
|
||||||
|
@ -804,7 +804,7 @@ The `-promscrape.cluster.memberLabel` command-line flag allows specifying a name
|
||||||
The value of the `member num` label is set to `-promscrape.cluster.memberNum`. For example, the following config instructs adding `vmagent_instance="0"` label
|
The value of the `member num` label is set to `-promscrape.cluster.memberNum`. For example, the following config instructs adding `vmagent_instance="0"` label
|
||||||
to all the metrics scraped by the given `vmagent` instance:
|
to all the metrics scraped by the given `vmagent` instance:
|
||||||
|
|
||||||
```
|
```text
|
||||||
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.cluster.memberLabel=vmagent_instance
|
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.cluster.memberLabel=vmagent_instance
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -831,7 +831,7 @@ See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679)
|
||||||
`vmagent` supports scraping targets via http, https and socks5 proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs
|
`vmagent` supports scraping targets via http, https and socks5 proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs
|
||||||
target scraping via https proxy at `https://proxy-addr:1234`:
|
target scraping via https proxy at `https://proxy-addr:1234`:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
proxy_url: https://proxy-addr:1234
|
proxy_url: https://proxy-addr:1234
|
||||||
|
@ -848,7 +848,7 @@ Proxy can be configured with the following optional settings:
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
proxy_url: https://proxy-addr:1234
|
proxy_url: https://proxy-addr:1234
|
||||||
|
@ -998,7 +998,7 @@ If you have suggestions for improvements or have found a bug - please open an is
|
||||||
* By default `vmagent` evenly spreads scrape load in time. If a particular scrape target must be scraped at the beginning of some interval,
|
* By default `vmagent` evenly spreads scrape load in time. If a particular scrape target must be scraped at the beginning of some interval,
|
||||||
then `scrape_align_interval` option must be used. For example, the following config aligns hourly scrapes to the beginning of hour:
|
then `scrape_align_interval` option must be used. For example, the following config aligns hourly scrapes to the beginning of hour:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
scrape_interval: 1h
|
scrape_interval: 1h
|
||||||
|
@ -1008,7 +1008,7 @@ If you have suggestions for improvements or have found a bug - please open an is
|
||||||
* By default `vmagent` evenly spreads scrape load in time. If a particular scrape target must be scraped at specific offset, then `scrape_offset` option must be used.
|
* By default `vmagent` evenly spreads scrape load in time. If a particular scrape target must be scraped at specific offset, then `scrape_offset` option must be used.
|
||||||
For example, the following config instructs `vmagent` to scrape the target at 10 seconds of every minute:
|
For example, the following config instructs `vmagent` to scrape the target at 10 seconds of every minute:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
scrape_interval: 1m
|
scrape_interval: 1m
|
||||||
|
@ -1021,14 +1021,14 @@ If you have suggestions for improvements or have found a bug - please open an is
|
||||||
|
|
||||||
The following relabeling rule may be added to `relabel_configs` section in order to filter out pods with unneeded ports:
|
The following relabeling rule may be added to `relabel_configs` section in order to filter out pods with unneeded ports:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
- action: keep_if_equal
|
- action: keep_if_equal
|
||||||
source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_container_port_number]
|
source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_container_port_number]
|
||||||
```
|
```
|
||||||
|
|
||||||
The following relabeling rule may be added to `relabel_configs` section in order to filter out init container pods:
|
The following relabeling rule may be added to `relabel_configs` section in order to filter out init container pods:
|
||||||
|
|
||||||
```yml
|
```yaml
|
||||||
- action: drop
|
- action: drop
|
||||||
source_labels: [__meta_kubernetes_pod_container_init]
|
source_labels: [__meta_kubernetes_pod_container_init]
|
||||||
regex: true
|
regex: true
|
||||||
|
@ -1072,7 +1072,7 @@ For example, `-kafka.consumer.topic.brokers=host1:9092;host2:9092`.
|
||||||
The following command starts `vmagent`, which reads metrics in InfluxDB line protocol format from Kafka broker at `localhost:9092`
|
The following command starts `vmagent`, which reads metrics in InfluxDB line protocol format from Kafka broker at `localhost:9092`
|
||||||
from the topic `metrics-by-telegraf` and sends them to remote storage at `http://localhost:8428/api/v1/write`:
|
from the topic `metrics-by-telegraf` and sends them to remote storage at `http://localhost:8428/api/v1/write`:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
./bin/vmagent -remoteWrite.url=http://localhost:8428/api/v1/write \
|
./bin/vmagent -remoteWrite.url=http://localhost:8428/api/v1/write \
|
||||||
-kafka.consumer.topic.brokers=localhost:9092 \
|
-kafka.consumer.topic.brokers=localhost:9092 \
|
||||||
-kafka.consumer.topic.format=influx \
|
-kafka.consumer.topic.format=influx \
|
||||||
|
@ -1095,7 +1095,7 @@ These command-line flags are available only in [enterprise](https://docs.victori
|
||||||
which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) page
|
which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) page
|
||||||
(see `vmutils-...-enterprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
(see `vmutils-...-enterprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||||
|
|
||||||
```
|
```text
|
||||||
-kafka.consumer.topic array
|
-kafka.consumer.topic array
|
||||||
Kafka topic names for data consumption.
|
Kafka topic names for data consumption.
|
||||||
Supports an array of values separated by comma or specified via multiple flags.
|
Supports an array of values separated by comma or specified via multiple flags.
|
||||||
|
@ -1140,13 +1140,13 @@ Two types of auth are supported:
|
||||||
|
|
||||||
* sasl with username and password:
|
* sasl with username and password:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
./bin/vmagent -remoteWrite.url=kafka://localhost:9092/?topic=prom-rw&security.protocol=SASL_SSL&sasl.mechanisms=PLAIN -remoteWrite.basicAuth.username=user -remoteWrite.basicAuth.password=password
|
./bin/vmagent -remoteWrite.url=kafka://localhost:9092/?topic=prom-rw&security.protocol=SASL_SSL&sasl.mechanisms=PLAIN -remoteWrite.basicAuth.username=user -remoteWrite.basicAuth.password=password
|
||||||
```
|
```
|
||||||
|
|
||||||
* tls certificates:
|
* tls certificates:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
./bin/vmagent -remoteWrite.url=kafka://localhost:9092/?topic=prom-rw&security.protocol=SSL -remoteWrite.tlsCAFile=/opt/ca.pem -remoteWrite.tlsCertFile=/opt/cert.pem -remoteWrite.tlsKeyFile=/opt/key.pem
|
./bin/vmagent -remoteWrite.url=kafka://localhost:9092/?topic=prom-rw&security.protocol=SSL -remoteWrite.tlsCAFile=/opt/ca.pem -remoteWrite.tlsCertFile=/opt/cert.pem -remoteWrite.tlsKeyFile=/opt/key.pem
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1177,7 +1177,7 @@ The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package-vmagent`.
|
||||||
The base docker image is [alpine](https://hub.docker.com/_/alpine) but it is possible to use any other base image
|
The base docker image is [alpine](https://hub.docker.com/_/alpine) but it is possible to use any other base image
|
||||||
by setting it via `<ROOT_IMAGE>` environment variable. For example, the following command builds the image on top of [scratch](https://hub.docker.com/_/scratch) image:
|
by setting it via `<ROOT_IMAGE>` environment variable. For example, the following command builds the image on top of [scratch](https://hub.docker.com/_/scratch) image:
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
ROOT_IMAGE=scratch make package-vmagent
|
ROOT_IMAGE=scratch make package-vmagent
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1205,7 +1205,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
curl http://0.0.0.0:8429/debug/pprof/heap > mem.pprof
|
curl http://0.0.0.0:8429/debug/pprof/heap > mem.pprof
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1215,7 +1215,7 @@ curl http://0.0.0.0:8429/debug/pprof/heap > mem.pprof
|
||||||
|
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
```console
|
```bash
|
||||||
curl http://0.0.0.0:8429/debug/pprof/profile > cpu.pprof
|
curl http://0.0.0.0:8429/debug/pprof/profile > cpu.pprof
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1231,7 +1231,7 @@ It is safe sharing the collected profiles from security point of view, since the
|
||||||
|
|
||||||
`vmagent` can be fine-tuned with various command-line flags. Run `./vmagent -help` in order to see the full list of these flags with their descriptions and default values:
|
`vmagent` can be fine-tuned with various command-line flags. Run `./vmagent -help` in order to see the full list of these flags with their descriptions and default values:
|
||||||
|
|
||||||
```
|
```text
|
||||||
./vmagent -help
|
./vmagent -help
|
||||||
|
|
||||||
vmagent collects metrics data via popular data ingestion protocols and routes them to VictoriaMetrics.
|
vmagent collects metrics data via popular data ingestion protocols and routes them to VictoriaMetrics.
|
||||||
|
|
|
@ -1243,7 +1243,7 @@ The shortlist of configuration flags is the following:
|
||||||
-remoteWrite.bearerTokenFile string
|
-remoteWrite.bearerTokenFile string
|
||||||
Optional path to bearer token file to use for -remoteWrite.url.
|
Optional path to bearer token file to use for -remoteWrite.url.
|
||||||
-remoteWrite.concurrency int
|
-remoteWrite.concurrency int
|
||||||
Defines number of writers for concurrent writing into remote querier (default 1)
|
Defines number of writers for concurrent writing into remote write endpoint (default 1)
|
||||||
-remoteWrite.disablePathAppend
|
-remoteWrite.disablePathAppend
|
||||||
Whether to disable automatic appending of '/api/v1/write' path to the configured -remoteWrite.url.
|
Whether to disable automatic appending of '/api/v1/write' path to the configured -remoteWrite.url.
|
||||||
-remoteWrite.flushInterval duration
|
-remoteWrite.flushInterval duration
|
||||||
|
|
|
@ -43,6 +43,38 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi
|
||||||
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
||||||
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
||||||
|
|
||||||
|
## Dropping request path prefix
|
||||||
|
|
||||||
|
By default `vmauth` doesn't drop the path prefix from the original request when proxying the request to the matching backend.
|
||||||
|
Sometimes it is needed to drop path prefix before routing the request to the backend. This can be done by specifying the number of `/`-delimited
|
||||||
|
prefix parts to drop from the request path via `drop_src_path_prefix_parts` option at `url_map` level or at `user` level.
|
||||||
|
|
||||||
|
For example, if you need to serve requests to [vmalert](https://docs.victoriametrics.com/vmalert.html) at `/vmalert/` path prefix,
|
||||||
|
while serving requests to [vmagent](https://docs.victoriametrics.com/vmagent.html) at `/vmagent/` path prefix for a particular user,
|
||||||
|
then the following [-auth.config](#auth-config) can be used:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
users:
|
||||||
|
- username: foo
|
||||||
|
url_map:
|
||||||
|
|
||||||
|
# proxy all the requests, which start with `/vmagent/`, to vmagent backend
|
||||||
|
- src_paths:
|
||||||
|
- "/vmagent/.+"
|
||||||
|
|
||||||
|
# drop /vmagent/ path prefix from the original request before proxying it to url_prefix.
|
||||||
|
drop_src_path_prefix_parts: 1
|
||||||
|
url_prefix: "http://vmagent-backend:8429/"
|
||||||
|
|
||||||
|
# proxy all the requests, which start with `/vmalert`, to vmalert backend
|
||||||
|
- src_paths:
|
||||||
|
- "/vmalert/.+"
|
||||||
|
|
||||||
|
# drop /vmalert/ path prefix from the original request before proxying it to url_prefix.
|
||||||
|
drop_src_path_prefix_parts: 1
|
||||||
|
url_prefix: "http://vmalert-backend:8880/"
|
||||||
|
```
|
||||||
|
|
||||||
## Load balancing
|
## Load balancing
|
||||||
|
|
||||||
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls.
|
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls.
|
||||||
|
@ -112,6 +144,31 @@ The following [metrics](#monitoring) related to concurrency limits are exposed b
|
||||||
- `vmauth_unauthorized_user_concurrent_requests_limit_reached_total` - the number of requests rejected with `429 Too Many Requests` error
|
- `vmauth_unauthorized_user_concurrent_requests_limit_reached_total` - the number of requests rejected with `429 Too Many Requests` error
|
||||||
because of the concurrency limit has been reached for unauthorized users (if `unauthorized_user` section is used).
|
because of the concurrency limit has been reached for unauthorized users (if `unauthorized_user` section is used).
|
||||||
|
|
||||||
|
## Backend TLS setup
|
||||||
|
|
||||||
|
By default `vmauth` uses system settings when performing requests to HTTPS backends specified via `url_prefix` option
|
||||||
|
in the [`-auth.config`](https://docs.victoriametrics.com/vmauth.html#auth-config). These settings can be overridden with the following command-line flags:
|
||||||
|
|
||||||
|
- `-backend.tlsInsecureSkipVerify` allows skipping TLS verification when connecting to HTTPS backends.
|
||||||
|
This global setting can be overridden at per-user level inside [`-auth.config`](https://docs.victoriametrics.com/vmauth.html#auth-config)
|
||||||
|
via `tls_insecure_skip_verify` option. For example:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
- username: "foo"
|
||||||
|
url_prefix: "https://localhost"
|
||||||
|
tls_insecure_skip_verify: true
|
||||||
|
```
|
||||||
|
|
||||||
|
- `-backend.tlsCAFile` allows specifying the path to TLS Root CA, which will be used for TLS verification when connecting to HTTPS backends.
|
||||||
|
The `-backend.tlsCAFile` may point either to local file or to `http` / `https` url.
|
||||||
|
This global setting can be overridden at per-user level inside [`-auth.config`](https://docs.victoriametrics.com/vmauth.html#auth-config)
|
||||||
|
via `tls_ca_file` option. For example:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
- username: "foo"
|
||||||
|
url_prefix: "https://localhost"
|
||||||
|
tls_ca_file: "/path/to/tls/root/ca"
|
||||||
|
```
|
||||||
|
|
||||||
## IP filters
|
## IP filters
|
||||||
|
|
||||||
|
@ -192,6 +249,15 @@ users:
|
||||||
password: "***"
|
password: "***"
|
||||||
url_prefix: "http://localhost:8428?extra_label=team=dev"
|
url_prefix: "http://localhost:8428?extra_label=team=dev"
|
||||||
|
|
||||||
|
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||||
|
# are proxied to https://localhost:8428.
|
||||||
|
# For example, http://vmauth:8427/api/v1/query is routed to https://localhost/api/v1/query
|
||||||
|
# TLS verification is skipped for https://localhost.
|
||||||
|
- username: "local-single-node-with-tls"
|
||||||
|
password: "***"
|
||||||
|
url_prefix: "https://localhost"
|
||||||
|
tls_insecure_skip_verify: true
|
||||||
|
|
||||||
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||||
# are load-balanced among http://vmselect1:8481/select/123/prometheus and http://vmselect2:8481/select/123/prometheus
|
# are load-balanced among http://vmselect1:8481/select/123/prometheus and http://vmselect2:8481/select/123/prometheus
|
||||||
# For example, http://vmauth:8427/api/v1/query is proxied to the following urls in a round-robin manner:
|
# For example, http://vmauth:8427/api/v1/query is proxied to the following urls in a round-robin manner:
|
||||||
|
|
76
go.mod
76
go.mod
|
@ -6,8 +6,8 @@ go 1.20
|
||||||
replace golang.org/x/exp => golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
|
replace golang.org/x/exp => golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.34.1
|
cloud.google.com/go/storage v1.35.1
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
|
||||||
github.com/VictoriaMetrics/fastcache v1.12.1
|
github.com/VictoriaMetrics/fastcache v1.12.1
|
||||||
|
|
||||||
|
@ -16,15 +16,15 @@ require (
|
||||||
github.com/VictoriaMetrics/fasthttp v1.2.0
|
github.com/VictoriaMetrics/fasthttp v1.2.0
|
||||||
github.com/VictoriaMetrics/metrics v1.24.0
|
github.com/VictoriaMetrics/metrics v1.24.0
|
||||||
github.com/VictoriaMetrics/metricsql v0.69.0
|
github.com/VictoriaMetrics/metricsql v0.69.0
|
||||||
github.com/aws/aws-sdk-go-v2 v1.22.1
|
github.com/aws/aws-sdk-go-v2 v1.22.2
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.22.0
|
github.com/aws/aws-sdk-go-v2/config v1.24.0
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.1
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.0
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1
|
||||||
github.com/bmatcuk/doublestar/v4 v4.6.1
|
github.com/bmatcuk/doublestar/v4 v4.6.1
|
||||||
github.com/cespare/xxhash/v2 v2.2.0
|
github.com/cespare/xxhash/v2 v2.2.0
|
||||||
github.com/cheggaaa/pb/v3 v3.1.4
|
github.com/cheggaaa/pb/v3 v3.1.4
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||||
github.com/fatih/color v1.15.0 // indirect
|
github.com/fatih/color v1.16.0 // indirect
|
||||||
github.com/go-kit/kit v0.13.0
|
github.com/go-kit/kit v0.13.0
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/golang/snappy v0.0.4
|
github.com/golang/snappy v0.0.4
|
||||||
|
@ -43,10 +43,10 @@ require (
|
||||||
github.com/valyala/gozstd v1.20.1
|
github.com/valyala/gozstd v1.20.1
|
||||||
github.com/valyala/histogram v1.2.0
|
github.com/valyala/histogram v1.2.0
|
||||||
github.com/valyala/quicktemplate v1.7.0
|
github.com/valyala/quicktemplate v1.7.0
|
||||||
golang.org/x/net v0.17.0
|
golang.org/x/net v0.18.0
|
||||||
golang.org/x/oauth2 v0.13.0
|
golang.org/x/oauth2 v0.14.0
|
||||||
golang.org/x/sys v0.13.0
|
golang.org/x/sys v0.14.0
|
||||||
google.golang.org/api v0.149.0
|
google.golang.org/api v0.150.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -60,31 +60,31 @@ require (
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
|
||||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.47.2 // indirect
|
github.com/aws/aws-sdk-go v1.47.10 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.1 // indirect
|
github.com/aws/aws-sdk-go-v2/credentials v1.15.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.5.0 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.1 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.1 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 // indirect
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 // indirect
|
||||||
github.com/aws/smithy-go v1.16.0 // indirect
|
github.com/aws/smithy-go v1.16.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/dennwc/varint v1.0.0 // indirect
|
github.com/dennwc/varint v1.0.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/go-kit/log v0.2.1 // indirect
|
github.com/go-kit/log v0.2.1 // indirect
|
||||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||||
github.com/go-logr/logr v1.3.0 // indirect
|
github.com/go-logr/logr v1.3.0 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
|
github.com/golang-jwt/jwt/v5 v5.1.0 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/google/s2a-go v0.1.7 // indirect
|
github.com/google/s2a-go v0.1.7 // indirect
|
||||||
|
@ -115,23 +115,23 @@ require (
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 // indirect
|
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 // indirect
|
||||||
go.opentelemetry.io/collector/semconv v0.88.0 // indirect
|
go.opentelemetry.io/collector/semconv v0.88.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.19.0 // indirect
|
go.opentelemetry.io/otel v1.20.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
go.opentelemetry.io/otel/metric v1.20.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.19.0 // indirect
|
go.opentelemetry.io/otel/trace v1.20.0 // indirect
|
||||||
go.uber.org/atomic v1.11.0 // indirect
|
go.uber.org/atomic v1.11.0 // indirect
|
||||||
go.uber.org/goleak v1.3.0 // indirect
|
go.uber.org/goleak v1.3.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.14.0 // indirect
|
golang.org/x/crypto v0.15.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
||||||
golang.org/x/sync v0.4.0 // indirect
|
golang.org/x/sync v0.5.0 // indirect
|
||||||
golang.org/x/text v0.13.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.4.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||||
google.golang.org/appengine v1.6.8 // indirect
|
google.golang.org/appengine v1.6.8 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect
|
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
||||||
google.golang.org/grpc v1.59.0 // indirect
|
google.golang.org/grpc v1.59.0 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
|
150
go.sum
150
go.sum
|
@ -38,11 +38,11 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
cloud.google.com/go/storage v1.34.1 h1:H2Af2dU5J0PF7A5B+ECFIce+RqxVnrVilO+cu0TS3MI=
|
cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w=
|
||||||
cloud.google.com/go/storage v1.34.1/go.mod h1:VN1ElqqvR9adg1k9xlkUJ55cMOP1/QjnNNuT5xQL6dY=
|
cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
|
||||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs=
|
||||||
|
@ -85,44 +85,44 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
|
||||||
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.47.2 h1:KEdO2PbjfEBmHvnEwbYEpr65ZIkmwK5aB85Gj19ASuA=
|
github.com/aws/aws-sdk-go v1.47.10 h1:cvufN7WkD1nlOgpRopsmxKQlFp5X1MfyAw4r7BBORQc=
|
||||||
github.com/aws/aws-sdk-go v1.47.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
github.com/aws/aws-sdk-go v1.47.10/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.22.1 h1:sjnni/AuoTXxHitsIdT0FwmqUuNUuHtufcVDErVFT9U=
|
github.com/aws/aws-sdk-go-v2 v1.22.2 h1:lV0U8fnhAnPz8YcdmZVV60+tr6CakHzqA6P8T46ExJI=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.22.1/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c=
|
github.com/aws/aws-sdk-go-v2 v1.22.2/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 h1:hHgLiIrTRtddC0AKcJr5s7i/hLgcpTt+q/FKxf1Zayk=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 h1:hHgLiIrTRtddC0AKcJr5s7i/hLgcpTt+q/FKxf1Zayk=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0/go.mod h1:w4I/v3NOWgD+qvs1NPEwhd++1h3XPHFaVxasfY6HlYQ=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0/go.mod h1:w4I/v3NOWgD+qvs1NPEwhd++1h3XPHFaVxasfY6HlYQ=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.22.0 h1:9Mm99OalzZRz0ab5fpodMoHBApHS6pqRNp3M9NmzvDg=
|
github.com/aws/aws-sdk-go-v2/config v1.24.0 h1:4LEk29JO3w+y9dEo/5Tq5QTP7uIEw+KQrKiHOs4xlu4=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.22.0/go.mod h1:2eWgw5lps8fKI7LZVTrRTYP6HE6k/uEFUuTSHfXwqP0=
|
github.com/aws/aws-sdk-go-v2/config v1.24.0/go.mod h1:11nNDAuK86kOUHeuEQo8f3CkcV5xuUxvPwFjTZE/PnQ=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.1 h1:hmf6lAm9hk7uLCfapZn/jL05lm6Uwdbn1B0fgjyuf4M=
|
github.com/aws/aws-sdk-go-v2/credentials v1.15.2 h1:rKH7khRMxPdD0u3dHecd0Q7NOVw3EUe7AqdkUOkiOGI=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.15.1/go.mod h1:QTcHga3ZbQOneJuxmGBOCxiClxmp+TlvmjFexAnJ790=
|
github.com/aws/aws-sdk-go-v2/credentials v1.15.2/go.mod h1:tXM8wmaeAhfC7nZoCxb0FzM/aRaB1m1WQ7x0qlBLq80=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 h1:gIeH4+o1MN/caGBWjoGQTUTIu94xD6fI5B2+TcwBf70=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 h1:G5KawTAkyHH6WyKQCdHiW4h3PmAXNJpOgwKg3H7sDRE=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2/go.mod h1:wLyMIo/zPOhQhPXTddpfdkSleyigtFi8iMnC+2m/SK4=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3/go.mod h1:hugKmSFnZB+HgNI1sYGT14BUPZkO6alC/e0AWu+0IAQ=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.1 h1:ULswbgGNVrW8zEhkCNwrwXrs1mUvy2JTqWaCRsD2ZZw=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6 h1:IpQbitxCZeC64C1ALz9QZu6AHHWundnU2evQ9xbp5k8=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.1/go.mod h1:pAXgsDPk1rRwwfkz8/9ISO75vXEHqTGIgbLhGqqQ1GY=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6/go.mod h1:27jIVQK+al9s0yTo3pkMdahRinbscqSC6zNGfNWXPZc=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 h1:fi1ga6WysOyYb5PAf3Exd6B5GiSNpnZim4h1rhlBqx0=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 h1:AaQsr5vvGR7rmeSWBtTCcw16tT9r51mWijuCQhzLnq8=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1/go.mod h1:V5CY8wNurvPUibTi9mwqUqpiFZ5LnioKWIFUDtIzdI8=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2/go.mod h1:o1IiRn7CWocIFTXJjGKJDOwxv1ibL53NpcvcqGWyRBA=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 h1:ZpaV/j48RlPc4AmOZuPv22pJliXjXq8/reL63YzyFnw=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 h1:UZx8SXZ0YtzRiALzYAWcjb9Y9hZUR7MBKaBQ5ouOjPs=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1/go.mod h1:R8aXraabD2e3qv1csxM14/X9WF4wFMIY0kH4YEtYD5M=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2/go.mod h1:ipuRpcSaklmxR6C39G187TpBAO132gUfleTGccUPs8c=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.5.0 h1:DqOQvIfmGkXZUVJnl9VRk0AnxyS59tCtX9k1Pyss4Ak=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 h1:usgqiJtamuGIBj+OvYmMq89+Z1hIKkMJToz1WpoeNUY=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.5.0/go.mod h1:VV/Kbw9Mg1GWJOT9WK+oTL3cWZiXtapnNvDSRqTZLsg=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.1 h1:vzYLDkwTw4CY0vUk84MeSufRf8XIsC/GsoIFXD60sTg=
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2 h1:pyVrNAf7Hwz0u39dLKN5t+n0+K/3rMYKuiOoIum3AsU=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.1/go.mod h1:ToBFBnjeGR2ruMx8IWp/y7vSK3Irj5/oPwifruiqoOM=
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.2/go.mod h1:mydrfOb9uiOYCxuCPR8YHQNQyGQwUQ7gPMZGBKbH8NY=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 h1:CJxo7ZBbaIzmXfV3hjcx36n9V87gJsIUPJflwqEHl3Q=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 h1:CJxo7ZBbaIzmXfV3hjcx36n9V87gJsIUPJflwqEHl3Q=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0/go.mod h1:yjVfjuY4nD1EW9i387Kau+I6V5cBA5YnC/mWNopjZrI=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0/go.mod h1:yjVfjuY4nD1EW9i387Kau+I6V5cBA5YnC/mWNopjZrI=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.1 h1:15FUCJzAP9Y25nioTqTrGlZmhOtthaXBWlt4pS+d3Xo=
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2 h1:f2LhPofnjcdOQKRtumKjMvIHkfSQ8aH/rwKUDEQ/SB4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.1/go.mod h1:5655NW53Un6l7JzkI6AA3rZvf0m532cSnLThA1fVXcA=
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.2/go.mod h1:q+xX0H4OfuWDuBy7y/LDi4v8IBOWuF+vtp8Z6ex+lw4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 h1:2OXw3ppu1XsB6rqKEMV4tnecTjIY3PRV2U6IP6KPJQo=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 h1:h7j73yuAVVjic8pqswh+L/7r2IHP43QwRyOu6zcCDDE=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1/go.mod h1:FZB4AdakIqW/yERVdGJA6Z9jraax1beXfhBBnK2wwR8=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2/go.mod h1:H07AHdK5LSy8F7EJUQhoxyiCNkePoHj2D8P2yGTWafo=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1 h1:dnl0klXYX9EKpzZbWlH5LJL+YTcEZcJEMPFFr/rAHUQ=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2 h1:gbIaOzpXixUpoPK+js/bCBK1QBDXM22SigsnzGZio0U=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1/go.mod h1:Mfk/9Joso4tCQYzM4q4HRUIqwln8lnIIMB/OE8Zebdc=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2/go.mod h1:p+S7RNbdGN8qgHDSg2SCQJ9FeMAmvcETQiVpeGhYnNM=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.0 h1:u0YoSrxjr3Lm+IqIlRAV+4YTFwkXjyB9db9CfUFge2w=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1 h1:o6MCcX1rJW8Y3g+hvg2xpjF6JR6DftuYhfl3Nc1WV9Q=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.0/go.mod h1:98EIdRu+BNsdqITsXfy+57TZfwlUQC9aDn9a9qoo90U=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1/go.mod h1:UDtxEWbREX6y4KREapT+jjtjoH0TiVSS6f5nfaY1UaM=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 h1:I/Oh3IxGPfHXiGnwM54TD6hNr/8TlUrBXAtTyGhR+zw=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 h1:km+ZNjtLtpXYf42RdaDZnNHm9s7SYAuDGTafy6nd89A=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.0/go.mod h1:H6NCMvDBqA+CvIaXzaSqM6LWtzv9BzZrqBOqz+PzRF8=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.17.1/go.mod h1:aHBr3pvBSD5MbzOvQtYutyPLLRPbl/y9x86XyJJnUXQ=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 h1:irbXQkfVYIRaewYSXcu4yVk0m2T+JzZd0dkop7FjmO0=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 h1:iRFNqZH4a67IqPvK8xxtyQYnyrlsvwmpHOe9r55ggBA=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0/go.mod h1:4wPNCkM22+oRe71oydP66K50ojDUC33XutSMi2pEF/M=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1/go.mod h1:pTy5WM+6sNv2tB24JNKFtn6EvciQ5k40ZJ0pq/Iaxj0=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 h1:sYIFy8tm1xQwRvVQ4CRuBGXKIg9sHNuG6+3UAQuoujk=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 h1:txgVXIXWPXyqdiVn92BV6a/rgtpX31HYdsOYj0sVQQQ=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.0/go.mod h1:S/LOQUeYDfJeJpFCIJDMjy7dwL4aA33HUdVi+i7uH8k=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.25.1/go.mod h1:VAiJiNaoP1L89STFlEMgmHX1bKixY+FaP+TpRFrmyZ4=
|
||||||
github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik=
|
github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik=
|
||||||
github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
@ -165,10 +165,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||||
github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=
|
github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
@ -198,8 +198,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||||
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
|
github.com/golang-jwt/jwt/v5 v5.1.0 h1:UGKbA/IPjtS6zLcdB7i5TyACMgSbOTiR8qzXgw8HWQU=
|
||||||
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
github.com/golang-jwt/jwt/v5 v5.1.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
@ -463,14 +463,14 @@ go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaB
|
||||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4=
|
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4=
|
||||||
go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE=
|
go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE=
|
||||||
go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
|
go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U=
|
||||||
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
|
||||||
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
|
go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
|
||||||
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
|
go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
|
||||||
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
|
go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
|
||||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
|
||||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
|
||||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
@ -486,8 +486,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
|
||||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
|
||||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
|
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
|
||||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
@ -543,16 +543,16 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
|
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
|
||||||
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
|
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -564,8 +564,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
@ -611,12 +611,12 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
@ -626,13 +626,13 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY=
|
||||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
@ -696,8 +696,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
||||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||||
google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY=
|
google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE=
|
||||||
google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI=
|
google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -735,12 +735,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U=
|
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
|
||||||
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4=
|
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405 h1:HJMDndgxest5n2y77fnErkM62iUsptE/H8p0dC2Huo4=
|
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg=
|
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
|
|
@ -2,6 +2,7 @@ package blockcache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/heap"
|
"container/heap"
|
||||||
|
"flag"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
@ -12,6 +13,9 @@ import (
|
||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var missesBeforeCaching = flag.Int("blockcache.missesBeforeCaching", 2, "The number of cache misses before putting the block into cache. "+
|
||||||
|
"Higher values may reduce indexdb/dataBlocks cache size at the cost of higher CPU and disk read usage")
|
||||||
|
|
||||||
// Cache caches Block entries.
|
// Cache caches Block entries.
|
||||||
//
|
//
|
||||||
// Call NewCache() for creating new Cache.
|
// Call NewCache() for creating new Cache.
|
||||||
|
@ -184,7 +188,7 @@ type cache struct {
|
||||||
|
|
||||||
// perKeyMisses contains per-block cache misses.
|
// perKeyMisses contains per-block cache misses.
|
||||||
//
|
//
|
||||||
// Blocks with less than 2 cache misses aren't stored in the cache in order to prevent from eviction for frequently accessed items.
|
// Blocks with up to *missesBeforeCaching cache misses aren't stored in the cache in order to prevent from eviction for frequently accessed items.
|
||||||
perKeyMisses map[Key]int
|
perKeyMisses map[Key]int
|
||||||
|
|
||||||
// The heap for removing the least recently used entries from m.
|
// The heap for removing the least recently used entries from m.
|
||||||
|
@ -300,13 +304,14 @@ func (c *cache) GetBlock(k Key) Block {
|
||||||
func (c *cache) PutBlock(k Key, b Block) {
|
func (c *cache) PutBlock(k Key, b Block) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
// If the entry wasn't accessed yet (e.g. c.perKeyMisses[k] == 0), then cache it, since it is likely it will be accessed soon.
|
misses := c.perKeyMisses[k]
|
||||||
// Do not cache the entry only if there was only a single unsuccessful attempt to access it.
|
if misses > 0 && misses <= *missesBeforeCaching {
|
||||||
// This may be one-time-wonders entry, which won't be accessed more, so there is no need in caching it.
|
// If the entry wasn't accessed yet (e.g. misses == 0), then cache it,
|
||||||
doNotCache := c.perKeyMisses[k] == 1
|
// since it has been just created without consulting the cache and will be accessed soon.
|
||||||
if doNotCache {
|
//
|
||||||
// Do not cache b if it has been requested only once (aka one-time-wonders items).
|
// Do not cache the entry if there were up to *missesBeforeCaching unsuccessful attempts to access it.
|
||||||
// This should reduce memory usage for the cache.
|
// This may be one-time-wonders entry, which won't be accessed more, so do not cache it
|
||||||
|
// in order to save memory for frequently accessed items.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,20 +83,22 @@ func TestCache(t *testing.T) {
|
||||||
if n := c.Misses(); n != 2 {
|
if n := c.Misses(); n != 2 {
|
||||||
t.Fatalf("unexpected number of misses; got %d; want %d", n, 2)
|
t.Fatalf("unexpected number of misses; got %d; want %d", n, 2)
|
||||||
}
|
}
|
||||||
// Store the missed entry to the cache. It shouldn't be stored because of the previous cache miss
|
for i := 0; i < *missesBeforeCaching; i++ {
|
||||||
c.PutBlock(k, &b)
|
// Store the missed entry to the cache. It shouldn't be stored because of the previous cache miss
|
||||||
if n := c.SizeBytes(); n != 0 {
|
c.PutBlock(k, &b)
|
||||||
t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, 0)
|
if n := c.SizeBytes(); n != 0 {
|
||||||
}
|
t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, 0)
|
||||||
// Verify that the entry wasn't stored to the cache.
|
}
|
||||||
if b1 := c.GetBlock(k); b1 != nil {
|
// Verify that the entry wasn't stored to the cache.
|
||||||
t.Fatalf("unexpected non-nil block obtained after removing all the blocks for the part; got %v", b1)
|
if b1 := c.GetBlock(k); b1 != nil {
|
||||||
}
|
t.Fatalf("unexpected non-nil block obtained after removing all the blocks for the part; got %v", b1)
|
||||||
if n := c.Requests(); n != 4 {
|
}
|
||||||
t.Fatalf("unexpected number of requests; got %d; want %d", n, 4)
|
if n := c.Requests(); n != uint64(4+i) {
|
||||||
}
|
t.Fatalf("unexpected number of requests; got %d; want %d", n, 4+i)
|
||||||
if n := c.Misses(); n != 3 {
|
}
|
||||||
t.Fatalf("unexpected number of misses; got %d; want %d", n, 3)
|
if n := c.Misses(); n != uint64(3+i) {
|
||||||
|
t.Fatalf("unexpected number of misses; got %d; want %d", n, 3+i)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Store the entry again. Now it must be stored because of the second cache miss.
|
// Store the entry again. Now it must be stored because of the second cache miss.
|
||||||
c.PutBlock(k, &b)
|
c.PutBlock(k, &b)
|
||||||
|
@ -106,11 +108,11 @@ func TestCache(t *testing.T) {
|
||||||
if b1 := c.GetBlock(k); b1 != &b {
|
if b1 := c.GetBlock(k); b1 != &b {
|
||||||
t.Fatalf("unexpected block obtained; got %v; want %v", b1, &b)
|
t.Fatalf("unexpected block obtained; got %v; want %v", b1, &b)
|
||||||
}
|
}
|
||||||
if n := c.Requests(); n != 5 {
|
if n := c.Requests(); n != uint64(4+*missesBeforeCaching) {
|
||||||
t.Fatalf("unexpected number of requests; got %d; want %d", n, 5)
|
t.Fatalf("unexpected number of requests; got %d; want %d", n, 4+*missesBeforeCaching)
|
||||||
}
|
}
|
||||||
if n := c.Misses(); n != 3 {
|
if n := c.Misses(); n != uint64(2+*missesBeforeCaching) {
|
||||||
t.Fatalf("unexpected number of misses; got %d; want %d", n, 3)
|
t.Fatalf("unexpected number of misses; got %d; want %d", n, 2+*missesBeforeCaching)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manually clean the cache. The entry shouldn't be deleted because it was recently accessed.
|
// Manually clean the cache. The entry shouldn't be deleted because it was recently accessed.
|
||||||
|
|
|
@ -79,14 +79,3 @@ func ToUnsafeBytes(s string) (b []byte) {
|
||||||
slh.Cap = sh.Len
|
slh.Cap = sh.Len
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
// LimitStringLen limits the length of s to maxLen.
|
|
||||||
//
|
|
||||||
// If len(s) > maxLen, then the function concatenates s prefix with s suffix.
|
|
||||||
func LimitStringLen(s string, maxLen int) string {
|
|
||||||
if maxLen <= 4 || len(s) <= maxLen {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
n := maxLen/2 - 1
|
|
||||||
return s[:n] + ".." + s[len(s)-n:]
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build cgo
|
//go:build cgo
|
||||||
// +build cgo
|
|
||||||
|
|
||||||
package encoding
|
package encoding
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build !cgo
|
//go:build !cgo
|
||||||
// +build !cgo
|
|
||||||
|
|
||||||
package encoding
|
package encoding
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build cgo
|
//go:build cgo
|
||||||
// +build cgo
|
|
||||||
|
|
||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build !cgo
|
//go:build !cgo
|
||||||
// +build !cgo
|
|
||||||
|
|
||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build cgo
|
//go:build cgo
|
||||||
// +build cgo
|
|
||||||
|
|
||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build linux || freebsd
|
//go:build linux || freebsd
|
||||||
// +build linux freebsd
|
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build linux || darwin || freebsd
|
//go:build linux || darwin || freebsd
|
||||||
// +build linux darwin freebsd
|
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
//go:build linux || darwin || freebsd || openbsd
|
//go:build linux || darwin || freebsd || openbsd
|
||||||
// +build linux darwin freebsd openbsd
|
|
||||||
|
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
|
|
|
@ -5,13 +5,14 @@
|
||||||
<meta charset="utf-8" />
|
<meta charset="utf-8" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||||
<link href="static/css/bootstrap.min.css" rel="stylesheet" />
|
<link href="static/css/bootstrap.min.css" rel="stylesheet" />
|
||||||
|
<link rel="icon" href="favicon.ico"/>
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
|
|
||||||
// Navbar writes navigation bar for /targets-like pages
|
// Navbar writes navigation bar for /targets-like pages
|
||||||
{% func Navbar() %}
|
{% func Navbar() %}
|
||||||
<div class="navbar navbar-dark bg-dark box-shadow">
|
<div class="navbar navbar-dark bg-dark box-shadow">
|
||||||
<div class="d-flex justify-content-between">
|
<div class="d-flex justify-content-between">
|
||||||
<a href="/" class="navbar-brand d-flex align-items-center ms-3" title="The High Performance Open Source Time Series Database & Monitoring Solution ">
|
<a href="." class="navbar-brand d-flex align-items-center ms-3" title="The High Performance Open Source Time Series Database & Monitoring Solution ">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" id="VM_logo" viewBox="0 0 464.61 533.89" width="20" height="20" class="me-1"><defs><style>.cls-1{fill:#fff;}</style></defs><path class="cls-1" d="M459.86,467.77c9,7.67,24.12,13.49,39.3,13.69v0h1.68v0c15.18-.2,30.31-6,39.3-13.69,47.43-40.45,184.65-166.24,184.65-166.24,36.84-34.27-65.64-68.28-223.95-68.47h-1.68c-158.31.19-260.79,34.2-224,68.47C275.21,301.53,412.43,427.32,459.86,467.77Z" transform="translate(-267.7 -233.05)"/><path class="cls-1" d="M540.1,535.88c-9,7.67-24.12,13.5-39.3,13.7h-1.6c-15.18-.2-30.31-6-39.3-13.7-32.81-28-148.56-132.93-192.16-172.7v60.74c0,6.67,2.55,15.52,7.09,19.68,29.64,27.18,143.94,131.8,185.07,166.88,9,7.67,24.12,13.49,39.3,13.69v0h1.6v0c15.18-.2,30.31-6,39.3-13.69,41.13-35.08,155.43-139.7,185.07-166.88,4.54-4.16,7.09-13,7.09-19.68V363.18C688.66,403,572.91,507.9,540.1,535.88Z" transform="translate(-267.7 -233.05)"/><path class="cls-1" d="M540.1,678.64c-9,7.67-24.12,13.49-39.3,13.69v0h-1.6v0c-15.18-.2-30.31-6-39.3-13.69-32.81-28-148.56-132.94-192.16-172.7v60.73c0,6.67,2.55,15.53,7.09,19.69,29.64,27.17,143.94,131.8,185.07,166.87,9,7.67,24.12,13.5,39.3,13.7h1.6c15.18-.2,30.31-6,39.3-13.7,41.13-35.07,155.43-139.7,185.07-166.87,4.54-4.16,7.09-13,7.09-19.69V505.94C688.66,545.7,572.91,650.66,540.1,678.64Z" transform="translate(-267.7 -233.05)"/></svg>
|
<svg xmlns="http://www.w3.org/2000/svg" id="VM_logo" viewBox="0 0 464.61 533.89" width="20" height="20" class="me-1"><defs><style>.cls-1{fill:#fff;}</style></defs><path class="cls-1" d="M459.86,467.77c9,7.67,24.12,13.49,39.3,13.69v0h1.68v0c15.18-.2,30.31-6,39.3-13.69,47.43-40.45,184.65-166.24,184.65-166.24,36.84-34.27-65.64-68.28-223.95-68.47h-1.68c-158.31.19-260.79,34.2-224,68.47C275.21,301.53,412.43,427.32,459.86,467.77Z" transform="translate(-267.7 -233.05)"/><path class="cls-1" d="M540.1,535.88c-9,7.67-24.12,13.5-39.3,13.7h-1.6c-15.18-.2-30.31-6-39.3-13.7-32.81-28-148.56-132.93-192.16-172.7v60.74c0,6.67,2.55,15.52,7.09,19.68,29.64,27.18,143.94,131.8,185.07,166.88,9,7.67,24.12,13.49,39.3,13.69v0h1.6v0c15.18-.2,30.31-6,39.3-13.69,41.13-35.08,155.43-139.7,185.07-166.88,4.54-4.16,7.09-13,7.09-19.68V363.18C688.66,403,572.91,507.9,540.1,535.88Z" transform="translate(-267.7 -233.05)"/><path class="cls-1" d="M540.1,678.64c-9,7.67-24.12,13.49-39.3,13.69v0h-1.6v0c-15.18-.2-30.31-6-39.3-13.69-32.81-28-148.56-132.94-192.16-172.7v60.73c0,6.67,2.55,15.53,7.09,19.69,29.64,27.17,143.94,131.8,185.07,166.87,9,7.67,24.12,13.5,39.3,13.7h1.6c15.18-.2,30.31-6,39.3-13.7,41.13-35.07,155.43-139.7,185.07-166.87,4.54-4.16,7.09-13,7.09-19.69V505.94C688.66,545.7,572.91,650.66,540.1,678.64Z" transform="translate(-267.7 -233.05)"/></svg>
|
||||||
<strong>VictoriaMetrics</strong>
|
<strong>VictoriaMetrics</strong>
|
||||||
</a>
|
</a>
|
||||||
|
|
|
@ -22,106 +22,106 @@ var (
|
||||||
//line lib/htmlcomponents/components.qtpl:4
|
//line lib/htmlcomponents/components.qtpl:4
|
||||||
func StreamCommonHeader(qw422016 *qt422016.Writer) {
|
func StreamCommonHeader(qw422016 *qt422016.Writer) {
|
||||||
//line lib/htmlcomponents/components.qtpl:4
|
//line lib/htmlcomponents/components.qtpl:4
|
||||||
qw422016.N().S(`<meta charset="utf-8" /><meta name="viewport" content="width=device-width, initial-scale=1" /><link href="static/css/bootstrap.min.css" rel="stylesheet" />`)
|
qw422016.N().S(`<meta charset="utf-8" /><meta name="viewport" content="width=device-width, initial-scale=1" /><link href="static/css/bootstrap.min.css" rel="stylesheet" /><link rel="icon" href="favicon.ico"/>`)
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
}
|
}
|
||||||
|
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
func WriteCommonHeader(qq422016 qtio422016.Writer) {
|
func WriteCommonHeader(qq422016 qtio422016.Writer) {
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
StreamCommonHeader(qw422016)
|
StreamCommonHeader(qw422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
}
|
}
|
||||||
|
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
func CommonHeader() string {
|
func CommonHeader() string {
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
WriteCommonHeader(qb422016)
|
WriteCommonHeader(qb422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
return qs422016
|
return qs422016
|
||||||
//line lib/htmlcomponents/components.qtpl:8
|
//line lib/htmlcomponents/components.qtpl:9
|
||||||
}
|
}
|
||||||
|
|
||||||
// Navbar writes navigation bar for /targets-like pages
|
// Navbar writes navigation bar for /targets-like pages
|
||||||
|
|
||||||
//line lib/htmlcomponents/components.qtpl:11
|
//line lib/htmlcomponents/components.qtpl:12
|
||||||
func StreamNavbar(qw422016 *qt422016.Writer) {
|
func StreamNavbar(qw422016 *qt422016.Writer) {
|
||||||
//line lib/htmlcomponents/components.qtpl:11
|
//line lib/htmlcomponents/components.qtpl:12
|
||||||
qw422016.N().S(`<div class="navbar navbar-dark bg-dark box-shadow"><div class="d-flex justify-content-between"><a href="/" class="navbar-brand d-flex align-items-center ms-3" title="The High Performance Open Source Time Series Database & Monitoring Solution "><svg xmlns="http://www.w3.org/2000/svg" id="VM_logo" viewBox="0 0 464.61 533.89" width="20" height="20" class="me-1"><defs><style>.cls-1{fill:#fff;}</style></defs><path class="cls-1" d="M459.86,467.77c9,7.67,24.12,13.49,39.3,13.69v0h1.68v0c15.18-.2,30.31-6,39.3-13.69,47.43-40.45,184.65-166.24,184.65-166.24,36.84-34.27-65.64-68.28-223.95-68.47h-1.68c-158.31.19-260.79,34.2-224,68.47C275.21,301.53,412.43,427.32,459.86,467.77Z" transform="translate(-267.7 -233.05)"/><path class="cls-1" d="M540.1,535.88c-9,7.67-24.12,13.5-39.3,13.7h-1.6c-15.18-.2-30.31-6-39.3-13.7-32.81-28-148.56-132.93-192.16-172.7v60.74c0,6.67,2.55,15.52,7.09,19.68,29.64,27.18,143.94,131.8,185.07,166.88,9,7.67,24.12,13.49,39.3,13.69v0h1.6v0c15.18-.2,30.31-6,39.3-13.69,41.13-35.08,155.43-139.7,185.07-166.88,4.54-4.16,7.09-13,7.09-19.68V363.18C688.66,403,572.91,507.9,540.1,535.88Z" transform="translate(-267.7 -233.05)"/><path class="cls-1" d="M540.1,678.64c-9,7.67-24.12,13.49-39.3,13.69v0h-1.6v0c-15.18-.2-30.31-6-39.3-13.69-32.81-28-148.56-132.94-192.16-172.7v60.73c0,6.67,2.55,15.53,7.09,19.69,29.64,27.17,143.94,131.8,185.07,166.87,9,7.67,24.12,13.5,39.3,13.7h1.6c15.18-.2,30.31-6,39.3-13.7,41.13-35.07,155.43-139.7,185.07-166.87,4.54-4.16,7.09-13,7.09-19.69V505.94C688.66,545.7,572.91,650.66,540.1,678.64Z" transform="translate(-267.7 -233.05)"/></svg><strong>VictoriaMetrics</strong></a></div></div>`)
|
qw422016.N().S(`<div class="navbar navbar-dark bg-dark box-shadow"><div class="d-flex justify-content-between"><a href="." class="navbar-brand d-flex align-items-center ms-3" title="The High Performance Open Source Time Series Database & Monitoring Solution "><svg xmlns="http://www.w3.org/2000/svg" id="VM_logo" viewBox="0 0 464.61 533.89" width="20" height="20" class="me-1"><defs><style>.cls-1{fill:#fff;}</style></defs><path class="cls-1" d="M459.86,467.77c9,7.67,24.12,13.49,39.3,13.69v0h1.68v0c15.18-.2,30.31-6,39.3-13.69,47.43-40.45,184.65-166.24,184.65-166.24,36.84-34.27-65.64-68.28-223.95-68.47h-1.68c-158.31.19-260.79,34.2-224,68.47C275.21,301.53,412.43,427.32,459.86,467.77Z" transform="translate(-267.7 -233.05)"/><path class="cls-1" d="M540.1,535.88c-9,7.67-24.12,13.5-39.3,13.7h-1.6c-15.18-.2-30.31-6-39.3-13.7-32.81-28-148.56-132.93-192.16-172.7v60.74c0,6.67,2.55,15.52,7.09,19.68,29.64,27.18,143.94,131.8,185.07,166.88,9,7.67,24.12,13.49,39.3,13.69v0h1.6v0c15.18-.2,30.31-6,39.3-13.69,41.13-35.08,155.43-139.7,185.07-166.88,4.54-4.16,7.09-13,7.09-19.68V363.18C688.66,403,572.91,507.9,540.1,535.88Z" transform="translate(-267.7 -233.05)"/><path class="cls-1" d="M540.1,678.64c-9,7.67-24.12,13.49-39.3,13.69v0h-1.6v0c-15.18-.2-30.31-6-39.3-13.69-32.81-28-148.56-132.94-192.16-172.7v60.73c0,6.67,2.55,15.53,7.09,19.69,29.64,27.17,143.94,131.8,185.07,166.87,9,7.67,24.12,13.5,39.3,13.7h1.6c15.18-.2,30.31-6,39.3-13.7,41.13-35.07,155.43-139.7,185.07-166.87,4.54-4.16,7.09-13,7.09-19.69V505.94C688.66,545.7,572.91,650.66,540.1,678.64Z" transform="translate(-267.7 -233.05)"/></svg><strong>VictoriaMetrics</strong></a></div></div>`)
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
}
|
}
|
||||||
|
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
func WriteNavbar(qq422016 qtio422016.Writer) {
|
func WriteNavbar(qq422016 qtio422016.Writer) {
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
StreamNavbar(qw422016)
|
StreamNavbar(qw422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
}
|
}
|
||||||
|
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
func Navbar() string {
|
func Navbar() string {
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
WriteNavbar(qb422016)
|
WriteNavbar(qb422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
return qs422016
|
return qs422016
|
||||||
//line lib/htmlcomponents/components.qtpl:20
|
//line lib/htmlcomponents/components.qtpl:21
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorNotification writes the given err as error notification
|
// ErrorNotification writes the given err as error notification
|
||||||
|
|
||||||
//line lib/htmlcomponents/components.qtpl:23
|
//line lib/htmlcomponents/components.qtpl:24
|
||||||
func StreamErrorNotification(qw422016 *qt422016.Writer, err error) {
|
func StreamErrorNotification(qw422016 *qt422016.Writer, err error) {
|
||||||
//line lib/htmlcomponents/components.qtpl:23
|
//line lib/htmlcomponents/components.qtpl:24
|
||||||
qw422016.N().S(`<div class="alert alert-danger d-flex align-items-center" role="alert"><svg class="bi flex-shrink-0 me-2" width="24" height="24" role="img" aria-label="Danger:"><use xlink:href="#exclamation-triangle-fill"/></svg><div>`)
|
qw422016.N().S(`<div class="alert alert-danger d-flex align-items-center" role="alert"><svg class="bi flex-shrink-0 me-2" width="24" height="24" role="img" aria-label="Danger:"><use xlink:href="#exclamation-triangle-fill"/></svg><div>`)
|
||||||
//line lib/htmlcomponents/components.qtpl:28
|
//line lib/htmlcomponents/components.qtpl:29
|
||||||
qw422016.E().S(err.Error())
|
qw422016.E().S(err.Error())
|
||||||
//line lib/htmlcomponents/components.qtpl:28
|
//line lib/htmlcomponents/components.qtpl:29
|
||||||
qw422016.N().S(`</div></div>`)
|
qw422016.N().S(`</div></div>`)
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
}
|
}
|
||||||
|
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
func WriteErrorNotification(qq422016 qtio422016.Writer, err error) {
|
func WriteErrorNotification(qq422016 qtio422016.Writer, err error) {
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
StreamErrorNotification(qw422016, err)
|
StreamErrorNotification(qw422016, err)
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
}
|
}
|
||||||
|
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
func ErrorNotification(err error) string {
|
func ErrorNotification(err error) string {
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
WriteErrorNotification(qb422016, err)
|
WriteErrorNotification(qb422016, err)
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
return qs422016
|
return qs422016
|
||||||
//line lib/htmlcomponents/components.qtpl:31
|
//line lib/htmlcomponents/components.qtpl:32
|
||||||
}
|
}
|
||||||
|
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
|
@ -3,6 +3,7 @@ package httpserver
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
_ "embed"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -259,6 +260,12 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
||||||
h.Set("Connection", "close")
|
h.Set("Connection", "close")
|
||||||
}
|
}
|
||||||
path := r.URL.Path
|
path := r.URL.Path
|
||||||
|
if strings.HasSuffix(path, "/favicon.ico") {
|
||||||
|
w.Header().Set("Cache-Control", "max-age=3600")
|
||||||
|
faviconRequests.Inc()
|
||||||
|
w.Write(faviconData)
|
||||||
|
return
|
||||||
|
}
|
||||||
prefix := GetPathPrefix()
|
prefix := GetPathPrefix()
|
||||||
if prefix != "" {
|
if prefix != "" {
|
||||||
// Trim -http.pathPrefix from path
|
// Trim -http.pathPrefix from path
|
||||||
|
@ -306,10 +313,6 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
||||||
}
|
}
|
||||||
w.WriteHeader(status)
|
w.WriteHeader(status)
|
||||||
return
|
return
|
||||||
case "/favicon.ico":
|
|
||||||
faviconRequests.Inc()
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
return
|
|
||||||
case "/metrics":
|
case "/metrics":
|
||||||
metricsRequests.Inc()
|
metricsRequests.Inc()
|
||||||
if !CheckAuthFlag(w, r, *metricsAuthKey, "metricsAuthKey") {
|
if !CheckAuthFlag(w, r, *metricsAuthKey, "metricsAuthKey") {
|
||||||
|
@ -446,7 +449,7 @@ var (
|
||||||
pprofTraceRequests = metrics.NewCounter(`vm_http_requests_total{path="/debug/pprof/trace"}`)
|
pprofTraceRequests = metrics.NewCounter(`vm_http_requests_total{path="/debug/pprof/trace"}`)
|
||||||
pprofMutexRequests = metrics.NewCounter(`vm_http_requests_total{path="/debug/pprof/mutex"}`)
|
pprofMutexRequests = metrics.NewCounter(`vm_http_requests_total{path="/debug/pprof/mutex"}`)
|
||||||
pprofDefaultRequests = metrics.NewCounter(`vm_http_requests_total{path="/debug/pprof/default"}`)
|
pprofDefaultRequests = metrics.NewCounter(`vm_http_requests_total{path="/debug/pprof/default"}`)
|
||||||
faviconRequests = metrics.NewCounter(`vm_http_requests_total{path="/favicon.ico"}`)
|
faviconRequests = metrics.NewCounter(`vm_http_requests_total{path="*/favicon.ico"}`)
|
||||||
|
|
||||||
authBasicRequestErrors = metrics.NewCounter(`vm_http_request_errors_total{path="*", reason="wrong_basic_auth"}`)
|
authBasicRequestErrors = metrics.NewCounter(`vm_http_request_errors_total{path="*", reason="wrong_basic_auth"}`)
|
||||||
authKeyRequestErrors = metrics.NewCounter(`vm_http_request_errors_total{path="*", reason="wrong_auth_key"}`)
|
authKeyRequestErrors = metrics.NewCounter(`vm_http_request_errors_total{path="*", reason="wrong_auth_key"}`)
|
||||||
|
@ -455,6 +458,9 @@ var (
|
||||||
requestsTotal = metrics.NewCounter(`vm_http_requests_all_total`)
|
requestsTotal = metrics.NewCounter(`vm_http_requests_all_total`)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//go:embed favicon.ico
|
||||||
|
var faviconData []byte
|
||||||
|
|
||||||
// GetQuotedRemoteAddr returns quoted remote address.
|
// GetQuotedRemoteAddr returns quoted remote address.
|
||||||
func GetQuotedRemoteAddr(r *http.Request) string {
|
func GetQuotedRemoteAddr(r *http.Request) string {
|
||||||
remoteAddr := r.RemoteAddr
|
remoteAddr := r.RemoteAddr
|
||||||
|
|
|
@ -2,18 +2,25 @@ package ingestserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConnsMap is used for tracking active connections.
|
// ConnsMap is used for tracking active connections.
|
||||||
type ConnsMap struct {
|
type ConnsMap struct {
|
||||||
|
clientName string
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
m map[net.Conn]struct{}
|
m map[net.Conn]struct{}
|
||||||
isClosed bool
|
isClosed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init initializes cm.
|
// Init initializes cm.
|
||||||
func (cm *ConnsMap) Init() {
|
func (cm *ConnsMap) Init(clientName string) {
|
||||||
|
cm.clientName = clientName
|
||||||
cm.m = make(map[net.Conn]struct{})
|
cm.m = make(map[net.Conn]struct{})
|
||||||
cm.isClosed = false
|
cm.isClosed = false
|
||||||
}
|
}
|
||||||
|
@ -36,12 +43,51 @@ func (cm *ConnsMap) Delete(c net.Conn) {
|
||||||
cm.mu.Unlock()
|
cm.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloseAll closes all the added conns.
|
// CloseAll gradually closes all the cm conns with during the given shutdownDuration.
|
||||||
func (cm *ConnsMap) CloseAll() {
|
func (cm *ConnsMap) CloseAll(shutdownDuration time.Duration) {
|
||||||
cm.mu.Lock()
|
cm.mu.Lock()
|
||||||
|
conns := make([]net.Conn, len(cm.m))
|
||||||
for c := range cm.m {
|
for c := range cm.m {
|
||||||
_ = c.Close()
|
conns = append(conns, c)
|
||||||
|
delete(cm.m, c)
|
||||||
}
|
}
|
||||||
cm.isClosed = true
|
cm.isClosed = true
|
||||||
cm.mu.Unlock()
|
cm.mu.Unlock()
|
||||||
|
|
||||||
|
if shutdownDuration <= 0 {
|
||||||
|
// Close all the connections at once.
|
||||||
|
for _, c := range conns {
|
||||||
|
_ = c.Close()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(conns) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(conns) == 1 {
|
||||||
|
// Simple case - just close a single connection and that's it!
|
||||||
|
_ = conns[0].Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort vminsert conns in order to make the order of closing connections deterministic across vmstorage nodes.
|
||||||
|
// This should reduce resource usage spikes at vmstorage nodes during rolling restarts.
|
||||||
|
sort.Slice(conns, func(i, j int) bool {
|
||||||
|
return conns[i].RemoteAddr().String() < conns[j].RemoteAddr().String()
|
||||||
|
})
|
||||||
|
|
||||||
|
shutdownInterval := shutdownDuration / time.Duration(len(conns)-1)
|
||||||
|
startTime := time.Now()
|
||||||
|
logger.Infof("closing %d %s connections with %dms interval between them", len(conns), cm.clientName, shutdownInterval.Milliseconds())
|
||||||
|
remoteAddr := conns[0].RemoteAddr().String()
|
||||||
|
_ = conns[0].Close()
|
||||||
|
logger.Infof("closed %s connection %s", cm.clientName, remoteAddr)
|
||||||
|
conns = conns[1:]
|
||||||
|
for _, c := range conns {
|
||||||
|
time.Sleep(shutdownInterval)
|
||||||
|
remoteAddr := c.RemoteAddr().String()
|
||||||
|
_ = c.Close()
|
||||||
|
logger.Infof("closed %s connection %s", cm.clientName, remoteAddr)
|
||||||
|
}
|
||||||
|
logger.Infof("closed %d %s connections in %s", len(conns), cm.clientName, time.Since(startTime))
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,7 +59,7 @@ func MustStart(addr string, useProxyProtocol bool, insertHandler func(r io.Reade
|
||||||
lnTCP: lnTCP,
|
lnTCP: lnTCP,
|
||||||
lnUDP: lnUDP,
|
lnUDP: lnUDP,
|
||||||
}
|
}
|
||||||
s.cm.Init()
|
s.cm.Init("graphite")
|
||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer s.wg.Done()
|
defer s.wg.Done()
|
||||||
|
@ -85,7 +85,7 @@ func (s *Server) MustStop() {
|
||||||
if err := s.lnUDP.Close(); err != nil {
|
if err := s.lnUDP.Close(); err != nil {
|
||||||
logger.Errorf("cannot close UDP Graphite server: %s", err)
|
logger.Errorf("cannot close UDP Graphite server: %s", err)
|
||||||
}
|
}
|
||||||
s.cm.CloseAll()
|
s.cm.CloseAll(0)
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
logger.Infof("TCP and UDP Graphite servers at %q have been stopped", s.addr)
|
logger.Infof("TCP and UDP Graphite servers at %q have been stopped", s.addr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,7 +59,7 @@ func MustStart(addr string, useProxyProtocol bool, insertHandler func(r io.Reade
|
||||||
lnTCP: lnTCP,
|
lnTCP: lnTCP,
|
||||||
lnUDP: lnUDP,
|
lnUDP: lnUDP,
|
||||||
}
|
}
|
||||||
s.cm.Init()
|
s.cm.Init("influx")
|
||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer s.wg.Done()
|
defer s.wg.Done()
|
||||||
|
@ -85,7 +85,7 @@ func (s *Server) MustStop() {
|
||||||
if err := s.lnUDP.Close(); err != nil {
|
if err := s.lnUDP.Close(); err != nil {
|
||||||
logger.Errorf("cannot close UDP InfluxDB server: %s", err)
|
logger.Errorf("cannot close UDP InfluxDB server: %s", err)
|
||||||
}
|
}
|
||||||
s.cm.CloseAll()
|
s.cm.CloseAll(0)
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
logger.Infof("TCP and UDP InfluxDB servers at %q have been stopped", s.addr)
|
logger.Infof("TCP and UDP InfluxDB servers at %q have been stopped", s.addr)
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue