Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2023-09-09 06:18:18 +02:00
commit af85055f3a
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
781 changed files with 74538 additions and 5873 deletions
.github/workflows
MakefileREADME.mdSECURITY.md
app
vlinsert/loki
vlselect/vmui
vlstorage
vmagent
vmalert
vmauth
vmbackup
vmbackupmanager
vmctl
vmgateway
vminsert/relabel
vmrestore
vmselect
vmstorage
vmui
Dockerfile-web
packages/vmui
public
src/components
Chart
Configurators/AdditionalSettings

View file

@ -17,7 +17,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@main uses: actions/setup-go@main
with: with:
go-version: 1.21.0 go-version: 1.21.1
id: go id: go
- name: Code checkout - name: Code checkout
uses: actions/checkout@master uses: actions/checkout@master

View file

@ -33,7 +33,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v2

View file

@ -52,12 +52,12 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: 1.21.0 go-version: 1.21.1
check-latest: true check-latest: true
cache: true cache: true
if: ${{ matrix.language == 'go' }} if: ${{ matrix.language == 'go' }}

View file

@ -27,12 +27,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Code checkout - name: Code checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: 1.21.0 go-version: 1.21.1
check-latest: true check-latest: true
cache: true cache: true
@ -51,12 +51,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Code checkout - name: Code checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: 1.21.0 go-version: 1.21.1
check-latest: true check-latest: true
cache: true cache: true
@ -75,13 +75,13 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Code checkout - name: Code checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup Go - name: Setup Go
id: go id: go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: 1.21.0 go-version: 1.21.1
check-latest: true check-latest: true
cache: true cache: true

View file

@ -15,11 +15,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Code checkout - name: Code checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
path: main path: main
- name: Checkout private code - name: Checkout private code
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
repository: VictoriaMetrics/vmdocs repository: VictoriaMetrics/vmdocs
token: ${{ secrets.VM_BOT_GH_TOKEN }} token: ${{ secrets.VM_BOT_GH_TOKEN }}

View file

@ -13,7 +13,7 @@ jobs:
run: exit 1 run: exit 1
- name: Check out code - name: Check out code
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
repository: VictoriaMetrics/ops repository: VictoriaMetrics/ops
token: ${{ secrets.VM_BOT_GH_TOKEN }} token: ${{ secrets.VM_BOT_GH_TOKEN }}

View file

@ -16,6 +16,7 @@ GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TA
include app/*/Makefile include app/*/Makefile
include deployment/*/Makefile include deployment/*/Makefile
include dashboards/Makefile
include snap/local/Makefile include snap/local/Makefile
include package/release/Makefile include package/release/Makefile
@ -34,7 +35,6 @@ clean:
publish: package-base \ publish: package-base \
publish-victoria-metrics \ publish-victoria-metrics \
publish-victoria-logs \
publish-vmagent \ publish-vmagent \
publish-vmalert \ publish-vmalert \
publish-vmauth \ publish-vmauth \
@ -174,6 +174,7 @@ vmutils-crossbuild: \
vmutils-windows-amd64 vmutils-windows-amd64
publish-release: publish-release:
rm -rf bin/*
git checkout $(TAG) && LATEST_TAG=stable $(MAKE) release publish && \ git checkout $(TAG) && LATEST_TAG=stable $(MAKE) release publish && \
git checkout $(TAG)-cluster && LATEST_TAG=cluster-stable $(MAKE) release publish && \ git checkout $(TAG)-cluster && LATEST_TAG=cluster-stable $(MAKE) release publish && \
git checkout $(TAG)-enterprise && LATEST_TAG=enterprise-stable $(MAKE) release publish && \ git checkout $(TAG)-enterprise && LATEST_TAG=enterprise-stable $(MAKE) release publish && \
@ -181,7 +182,6 @@ publish-release:
release: \ release: \
release-victoria-metrics \ release-victoria-metrics \
release-victoria-logs \
release-vmutils release-vmutils
release-victoria-metrics: \ release-victoria-metrics: \
@ -437,7 +437,7 @@ benchmark-pure:
vendor-update: vendor-update:
go get -u -d ./lib/... go get -u -d ./lib/...
go get -u -d ./app/... go get -u -d ./app/...
go mod tidy -compat=1.19 go mod tidy -compat=1.20
go mod vendor go mod vendor
app-local: app-local:
@ -463,7 +463,7 @@ golangci-lint: install-golangci-lint
golangci-lint run golangci-lint run
install-golangci-lint: install-golangci-lint:
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.51.2 which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.54.2
govulncheck: install-govulncheck govulncheck: install-govulncheck
govulncheck ./... govulncheck ./...

View file

@ -152,7 +152,7 @@ VictoriaMetrics can also be installed via these installation methods:
The following command-line flags are used the most: The following command-line flags are used the most:
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory. * `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month. The minimum retention period is 24h or 1d. See [the Retention section](#retention) for more details. * `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month (31 days). The minimum retention period is 24h or 1d. See [these docs](#retention) for more details.
Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags). Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
@ -173,7 +173,8 @@ VictoriaMetrics is developed at a fast pace, so it is recommended periodically c
### Environment variables ### Environment variables
All the VictoriaMetrics components allow referring environment variables in command-line flags via `%{ENV_VAR}` syntax. All the VictoriaMetrics components allow referring environment variables in `yaml` configuration files (such as `-promscrape.config`)
and in command-line flags via `%{ENV_VAR}` syntax.
For example, `-metricsAuthKey=%{METRICS_AUTH_KEY}` is automatically expanded to `-metricsAuthKey=top-secret` For example, `-metricsAuthKey=%{METRICS_AUTH_KEY}` is automatically expanded to `-metricsAuthKey=top-secret`
if `METRICS_AUTH_KEY=top-secret` environment variable exists at VictoriaMetrics startup. if `METRICS_AUTH_KEY=top-secret` environment variable exists at VictoriaMetrics startup.
This expansion is performed by VictoriaMetrics itself. This expansion is performed by VictoriaMetrics itself.
@ -914,7 +915,7 @@ to your needs or when testing bugfixes.
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics` binary and puts it into the `bin` folder. It builds `victoria-metrics` binary and puts it into the `bin` folder.
@ -930,7 +931,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
### Development ARM build ### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder. It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
@ -944,7 +945,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies. `Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder. It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
@ -1609,8 +1610,8 @@ See also [how to work with snapshots](#how-to-work-with-snapshots).
## Retention ## Retention
Retention is configured with the `-retentionPeriod` command-line flag, which takes a number followed by a time unit Retention is configured with the `-retentionPeriod` command-line flag, which takes a number followed by a time unit
character - `h(ours)`, `d(ays)`, `w(eeks)`, `y(ears)`. If the time unit is not specified, a month is assumed. character - `h(ours)`, `d(ays)`, `w(eeks)`, `y(ears)`. If the time unit is not specified, a month (31 days) is assumed.
For instance, `-retentionPeriod=3` means that the data will be stored for 3 months and then deleted. For instance, `-retentionPeriod=3` means that the data will be stored for 3 months (93 days) and then deleted.
The default retention period is one month. The **minimum retention** period is 24h or 1d. The default retention period is one month. The **minimum retention** period is 24h or 1d.
Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders. Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
@ -2293,7 +2294,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-finalMergeDelay duration -finalMergeDelay duration
The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge
-flagsAuthKey string -flagsAuthKey string
@ -2363,6 +2364,12 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int -internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500) The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-license.forceOffline
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-licenseFile string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-logNewSeries -logNewSeries
Whether to log new series. This option is for debug purposes only. It can lead to performance issues when big number of new series are ingested into VictoriaMetrics Whether to log new series. This option is for debug purposes only. It can lead to performance issues when big number of new series are ingested into VictoriaMetrics
-loggerDisableTimestamps -loggerDisableTimestamps

View file

@ -5,8 +5,8 @@
| Version | Supported | | Version | Supported |
|---------|--------------------| |---------|--------------------|
| [latest release](https://docs.victoriametrics.com/CHANGELOG.html) | :white_check_mark: | | [latest release](https://docs.victoriametrics.com/CHANGELOG.html) | :white_check_mark: |
| v1.93.x LTS release | :white_check_mark: |
| v1.87.x LTS release | :white_check_mark: | | v1.87.x LTS release | :white_check_mark: |
| v1.79.x LTS release | :white_check_mark: |
| other releases | :x: | | other releases | :x: |
## Reporting a Vulnerability ## Reporting a Vulnerability

View file

@ -3,9 +3,10 @@ package loki
import ( import (
"net/http" "net/http"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils" "github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/metrics"
) )
var ( var (
@ -14,12 +15,22 @@ var (
) )
// RequestHandler processes Loki insert requests // RequestHandler processes Loki insert requests
//
// See https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool { func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
if path != "/api/v1/push" { switch path {
case "/api/v1/push":
return handleInsert(r, w)
case "/ready":
// See https://grafana.com/docs/loki/latest/api/#identify-ready-loki-instance
w.WriteHeader(http.StatusOK)
w.Write([]byte("ready"))
return true
default:
return false return false
} }
}
// See https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
func handleInsert(r *http.Request, w http.ResponseWriter) bool {
contentType := r.Header.Get("Content-Type") contentType := r.Header.Get("Content-Type")
switch contentType { switch contentType {
case "application/json": case "application/json":

View file

@ -1,14 +1,14 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.5f91b1c5.css", "main.css": "./static/css/main.17914339.css",
"main.js": "./static/js/main.7226aaff.js", "main.js": "./static/js/main.b6509627.js",
"static/js/522.b5ae4365.chunk.js": "./static/js/522.b5ae4365.chunk.js", "static/js/522.b5ae4365.chunk.js": "./static/js/522.b5ae4365.chunk.js",
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf", "static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf", "static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.5f91b1c5.css", "static/css/main.17914339.css",
"static/js/main.7226aaff.js" "static/js/main.b6509627.js"
] ]
} }

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.7226aaff.js"></script><link href="./static/css/main.5f91b1c5.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.b6509627.js"></script><link href="./static/css/main.17914339.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -39,13 +39,13 @@ func Init() {
logger.Panicf("BUG: Init() has been already called") logger.Panicf("BUG: Init() has been already called")
} }
if retentionPeriod.Msecs < 24*3600*1000 { if retentionPeriod.Duration() < 24*time.Hour {
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod) logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
} }
cfg := &logstorage.StorageConfig{ cfg := &logstorage.StorageConfig{
Retention: time.Millisecond * time.Duration(retentionPeriod.Msecs), Retention: retentionPeriod.Duration(),
FlushInterval: *inmemoryDataFlushInterval, FlushInterval: *inmemoryDataFlushInterval,
FutureRetention: time.Millisecond * time.Duration(futureRetention.Msecs), FutureRetention: futureRetention.Duration(),
LogNewStreams: *logNewStreams, LogNewStreams: *logNewStreams,
LogIngestedRows: *logIngestedRows, LogIngestedRows: *logIngestedRows,
} }

View file

@ -754,10 +754,10 @@ as soon as it is parsed in stream parsing mode.
A single `vmagent` instance can scrape tens of thousands of scrape targets. Sometimes this isn't enough due to limitations on CPU, network, RAM, etc. A single `vmagent` instance can scrape tens of thousands of scrape targets. Sometimes this isn't enough due to limitations on CPU, network, RAM, etc.
In this case scrape targets can be split among multiple `vmagent` instances (aka `vmagent` horizontal scaling, sharding and clustering). In this case scrape targets can be split among multiple `vmagent` instances (aka `vmagent` horizontal scaling, sharding and clustering).
Each `vmagent` instance in the cluster must use identical `-promscrape.config` files with distinct `-promscrape.cluster.memberNum` values. The number of `vmagent` instances in the cluster must be passed to `-promscrape.cluster.membersCount` command-line flag.
The flag value must be in the range `0 ... N-1`, where `N` is the number of `vmagent` instances in the cluster. Each `vmagent` instance in the cluster must use identical `-promscrape.config` files with distinct `-promscrape.cluster.memberNum` values
The number of `vmagent` instances in the cluster must be passed to `-promscrape.cluster.membersCount` command-line flag. For example, the following commands in the range `0 ... N-1`, where `N` is the number of `vmagent` instances in the cluster specified via `-promscrape.cluster.membersCount`.
spread scrape targets among a cluster of two `vmagent` instances: For example, the following commands spread scrape targets among a cluster of two `vmagent` instances:
``` ```
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ... /path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.config=/path/to/config.yml ...
@ -765,7 +765,7 @@ spread scrape targets among a cluster of two `vmagent` instances:
``` ```
The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes. The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes.
The pod name must end with a number in the range `0 ... promscrape.cluster.memberNum-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`. The pod name must end with a number in the range `0 ... promscrape.cluster.membersCount-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`.
By default, each scrape target is scraped only by a single `vmagent` instance in the cluster. If there is a need for replicating scrape targets among multiple `vmagent` instances, By default, each scrape target is scraped only by a single `vmagent` instance in the cluster. If there is a need for replicating scrape targets among multiple `vmagent` instances,
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
@ -781,6 +781,14 @@ If each target is scraped by multiple `vmagent` instances, then data deduplicati
The `-dedup.minScrapeInterval` must be set to the `scrape_interval` configured at `-promscrape.config`. The `-dedup.minScrapeInterval` must be set to the `scrape_interval` configured at `-promscrape.config`.
See [these docs](https://docs.victoriametrics.com/#deduplication) for details. See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
The `-promscrape.cluster.memberLabel` command-line flag allows specifying a name for `member num` label to add to all the scraped metrics.
The value of the `member num` label is set to `-promscrape.cluster.memberNum`. For example, the following config instructs adding `vmagent_instance="0"` label
to all the metrics scraped by the given `vmagent` instance:
```
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=0 -promscrape.cluster.memberLabel=vmagent_instance
```
See also [how to shard data among multiple remote storage systems](#sharding-among-remote-storages). See also [how to shard data among multiple remote storage systems](#sharding-among-remote-storages).
@ -1130,7 +1138,7 @@ It may be needed to build `vmagent` from source code when developing or testing
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds the `vmagent` binary and puts it into the `bin` folder. It builds the `vmagent` binary and puts it into the `bin` folder.
@ -1159,7 +1167,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
### Development ARM build ### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmagent-linux-arm` or `make vmagent-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics) 1. Run `make vmagent-linux-arm` or `make vmagent-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
It builds `vmagent-linux-arm` or `vmagent-linux-arm64` binary respectively and puts it into the `bin` folder. It builds `vmagent-linux-arm` or `vmagent-linux-arm64` binary respectively and puts it into the `bin` folder.
@ -1236,7 +1244,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-flagsAuthKey string -flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap -fs.disableMmap
@ -1327,6 +1335,12 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-kafka.consumer.topic.options array -kafka.consumer.topic.options array
Optional key=value;key1=value2 settings for topic consumer. See full configuration options at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Optional key=value;key1=value2 settings for topic consumer. See full configuration options at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-license string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-license.forceOffline
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-licenseFile string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-loggerDisableTimestamps -loggerDisableTimestamps
Whether to disable writing timestamps in logs Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int -loggerErrorsPerSecondLimit int
@ -1376,8 +1390,10 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
Items in the previous caches are removed when the percent of requests it serves becomes lower than this value. Higher values reduce memory usage at the cost of higher CPU usage. See also -cacheExpireDuration (default 0.1) Items in the previous caches are removed when the percent of requests it serves becomes lower than this value. Higher values reduce memory usage at the cost of higher CPU usage. See also -cacheExpireDuration (default 0.1)
-promscrape.azureSDCheckInterval duration -promscrape.azureSDCheckInterval duration
Interval for checking for changes in Azure. This works only if azure_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#azure_sd_configs for details (default 1m0s) Interval for checking for changes in Azure. This works only if azure_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#azure_sd_configs for details (default 1m0s)
-promscrape.cluster.memberLabel string
If non-empty, then the label with this name and the -promscrape.cluster.memberNum value is added to all the scraped metrics
-promscrape.cluster.memberNum string -promscrape.cluster.memberNum string
The number of number in the cluster of scrapers. It must be a unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name (default "0") The number of vmagent instance in the cluster of scrapers. It must be a unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name. See also -promscrape.cluster.memberLabel (default "0")
-promscrape.cluster.membersCount int -promscrape.cluster.membersCount int
The number of members in a cluster of scrapers. Each member must have a unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default, cluster scraping is disabled, i.e. a single scraper scrapes all the targets The number of members in a cluster of scrapers. Each member must have a unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default, cluster scraping is disabled, i.e. a single scraper scrapes all the targets
-promscrape.cluster.name string -promscrape.cluster.name string

View file

@ -2,6 +2,7 @@ package remotewrite
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -322,6 +323,20 @@ func (c *client) runWorker() {
} }
func (c *client) doRequest(url string, body []byte) (*http.Response, error) { func (c *client) doRequest(url string, body []byte) (*http.Response, error) {
req := c.newRequest(url, body)
resp, err := c.hc.Do(req)
if err != nil && errors.Is(err, io.EOF) {
// it is likely connection became stale.
// So we do one more attempt in hope request will succeed.
// If not, the error should be handled by the caller as usual.
// This should help with https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4139
req = c.newRequest(url, body)
resp, err = c.hc.Do(req)
}
return resp, err
}
func (c *client) newRequest(url string, body []byte) *http.Request {
reqBody := bytes.NewBuffer(body) reqBody := bytes.NewBuffer(body)
req, err := http.NewRequest(http.MethodPost, url, reqBody) req, err := http.NewRequest(http.MethodPost, url, reqBody)
if err != nil { if err != nil {
@ -345,7 +360,7 @@ func (c *client) doRequest(url string, body []byte) (*http.Response, error) {
logger.Warnf("cannot sign remoteWrite request with AWS sigv4: %s", err) logger.Warnf("cannot sign remoteWrite request with AWS sigv4: %s", err)
} }
} }
return c.hc.Do(req) return req
} }
// sendBlockHTTP sends the given block to c.remoteWriteURL. // sendBlockHTTP sends the given block to c.remoteWriteURL.

View file

@ -87,8 +87,8 @@ func initLabelsGlobal() {
} }
} }
func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label, pcs *promrelabel.ParsedConfigs) []prompbmarshal.TimeSeries { func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries, pcs *promrelabel.ParsedConfigs) []prompbmarshal.TimeSeries {
if len(extraLabels) == 0 && pcs.Len() == 0 && !*usePromCompatibleNaming { if pcs.Len() == 0 && !*usePromCompatibleNaming {
// Nothing to change. // Nothing to change.
return tss return tss
} }
@ -98,34 +98,15 @@ func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries, extraLab
ts := &tss[i] ts := &tss[i]
labelsLen := len(labels) labelsLen := len(labels)
labels = append(labels, ts.Labels...) labels = append(labels, ts.Labels...)
// extraLabels must be added before applying relabeling according to https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
for j := range extraLabels {
extraLabel := &extraLabels[j]
tmp := promrelabel.GetLabelByName(labels[labelsLen:], extraLabel.Name)
if tmp != nil {
tmp.Value = extraLabel.Value
} else {
labels = append(labels, *extraLabel)
}
}
if *usePromCompatibleNaming {
// Replace unsupported Prometheus chars in label names and metric names with underscores.
tmpLabels := labels[labelsLen:]
for j := range tmpLabels {
label := &tmpLabels[j]
if label.Name == "__name__" {
label.Value = promrelabel.SanitizeName(label.Value)
} else {
label.Name = promrelabel.SanitizeName(label.Name)
}
}
}
labels = pcs.Apply(labels, labelsLen) labels = pcs.Apply(labels, labelsLen)
labels = promrelabel.FinalizeLabels(labels[:labelsLen], labels[labelsLen:]) labels = promrelabel.FinalizeLabels(labels[:labelsLen], labels[labelsLen:])
if len(labels) == labelsLen { if len(labels) == labelsLen {
// Drop the current time series, since relabeling removed all the labels. // Drop the current time series, since relabeling removed all the labels.
continue continue
} }
if *usePromCompatibleNaming {
fixPromCompatibleNaming(labels[labelsLen:])
}
tssDst = append(tssDst, prompbmarshal.TimeSeries{ tssDst = append(tssDst, prompbmarshal.TimeSeries{
Labels: labels[labelsLen:], Labels: labels[labelsLen:],
Samples: ts.Samples, Samples: ts.Samples,
@ -135,6 +116,29 @@ func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries, extraLab
return tssDst return tssDst
} }
func (rctx *relabelCtx) appendExtraLabels(tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label) {
if len(extraLabels) == 0 {
return
}
labels := rctx.labels[:0]
for i := range tss {
ts := &tss[i]
labelsLen := len(labels)
labels = append(labels, ts.Labels...)
for j := range extraLabels {
extraLabel := extraLabels[j]
tmp := promrelabel.GetLabelByName(labels[labelsLen:], extraLabel.Name)
if tmp != nil {
tmp.Value = extraLabel.Value
} else {
labels = append(labels, extraLabel)
}
}
ts.Labels = labels[labelsLen:]
}
rctx.labels = labels
}
type relabelCtx struct { type relabelCtx struct {
// pool for labels, which are used during the relabeling. // pool for labels, which are used during the relabeling.
labels []prompbmarshal.Label labels []prompbmarshal.Label
@ -159,3 +163,15 @@ func putRelabelCtx(rctx *relabelCtx) {
rctx.labels = rctx.labels[:0] rctx.labels = rctx.labels[:0]
relabelCtxPool.Put(rctx) relabelCtxPool.Put(rctx)
} }
func fixPromCompatibleNaming(labels []prompbmarshal.Label) {
// Replace unsupported Prometheus chars in label names and metric names with underscores.
for i := range labels {
label := &labels[i]
if label.Name == "__name__" {
label.Value = promrelabel.SanitizeMetricName(label.Value)
} else {
label.Name = promrelabel.SanitizeLabelName(label.Name)
}
}
}

View file

@ -10,18 +10,16 @@ import (
) )
func TestApplyRelabeling(t *testing.T) { func TestApplyRelabeling(t *testing.T) {
f := func(extraLabels []prompbmarshal.Label, pcs *promrelabel.ParsedConfigs, sTss, sExpTss string) { f := func(pcs *promrelabel.ParsedConfigs, sTss, sExpTss string) {
rctx := &relabelCtx{} rctx := &relabelCtx{}
tss, expTss := parseSeries(sTss), parseSeries(sExpTss) tss, expTss := parseSeries(sTss), parseSeries(sExpTss)
gotTss := rctx.applyRelabeling(tss, extraLabels, pcs) gotTss := rctx.applyRelabeling(tss, pcs)
if !reflect.DeepEqual(gotTss, expTss) { if !reflect.DeepEqual(gotTss, expTss) {
t.Fatalf("expected to have: \n%v;\ngot: \n%v", expTss, gotTss) t.Fatalf("expected to have: \n%v;\ngot: \n%v", expTss, gotTss)
} }
} }
f(nil, nil, "up", "up") f(nil, "up", "up")
f([]prompbmarshal.Label{{Name: "foo", Value: "bar"}}, nil, "up", `up{foo="bar"}`)
f([]prompbmarshal.Label{{Name: "foo", Value: "bar"}}, nil, `up{foo="baz"}`, `up{foo="bar"}`)
pcs, err := promrelabel.ParseRelabelConfigsData([]byte(` pcs, err := promrelabel.ParseRelabelConfigsData([]byte(`
- target_label: "foo" - target_label: "foo"
@ -32,11 +30,33 @@ func TestApplyRelabeling(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
} }
f(nil, pcs, `up{foo="baz", env="prod"}`, `up{foo="aaa"}`) f(pcs, `up{foo="baz", env="prod"}`, `up{foo="aaa"}`)
oldVal := *usePromCompatibleNaming oldVal := *usePromCompatibleNaming
*usePromCompatibleNaming = true *usePromCompatibleNaming = true
f(nil, nil, `foo.bar`, `foo_bar`) f(nil, `foo.bar`, `foo_bar`)
*usePromCompatibleNaming = oldVal
}
func TestAppendExtraLabels(t *testing.T) {
f := func(extraLabels []prompbmarshal.Label, sTss, sExpTss string) {
t.Helper()
rctx := &relabelCtx{}
tss, expTss := parseSeries(sTss), parseSeries(sExpTss)
rctx.appendExtraLabels(tss, extraLabels)
if !reflect.DeepEqual(tss, expTss) {
t.Fatalf("expected to have: \n%v;\ngot: \n%v", expTss, tss)
}
}
f(nil, "up", "up")
f([]prompbmarshal.Label{{Name: "foo", Value: "bar"}}, "up", `up{foo="bar"}`)
f([]prompbmarshal.Label{{Name: "foo", Value: "bar"}}, `up{foo="baz"}`, `up{foo="bar"}`)
f([]prompbmarshal.Label{{Name: "baz", Value: "qux"}}, `up{foo="baz"}`, `up{foo="baz",baz="qux"}`)
oldVal := *usePromCompatibleNaming
*usePromCompatibleNaming = true
f([]prompbmarshal.Label{{Name: "foo.bar", Value: "baz"}}, "up", `up{foo.bar="baz"}`)
*usePromCompatibleNaming = oldVal *usePromCompatibleNaming = oldVal
} }

View file

@ -258,7 +258,7 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
if *showRemoteWriteURL { if *showRemoteWriteURL {
sanitizedURL = fmt.Sprintf("%d:%s", i+1, remoteWriteURL) sanitizedURL = fmt.Sprintf("%d:%s", i+1, remoteWriteURL)
} }
rwctxs[i] = newRemoteWriteCtx(i, at, remoteWriteURL, maxInmemoryBlocks, sanitizedURL) rwctxs[i] = newRemoteWriteCtx(i, remoteWriteURL, maxInmemoryBlocks, sanitizedURL)
} }
if !*keepDanglingQueues { if !*keepDanglingQueues {
@ -355,7 +355,7 @@ func Push(at *auth.Token, wr *prompbmarshal.WriteRequest) {
var rctx *relabelCtx var rctx *relabelCtx
rcs := allRelabelConfigs.Load() rcs := allRelabelConfigs.Load()
pcsGlobal := rcs.global pcsGlobal := rcs.global
if pcsGlobal.Len() > 0 || len(labelsGlobal) > 0 { if pcsGlobal.Len() > 0 {
rctx = getRelabelCtx() rctx = getRelabelCtx()
} }
tss := wr.Timeseries tss := wr.Timeseries
@ -386,7 +386,7 @@ func Push(at *auth.Token, wr *prompbmarshal.WriteRequest) {
} }
if rctx != nil { if rctx != nil {
rowsCountBeforeRelabel := getRowsCount(tssBlock) rowsCountBeforeRelabel := getRowsCount(tssBlock)
tssBlock = rctx.applyRelabeling(tssBlock, labelsGlobal, pcsGlobal) tssBlock = rctx.applyRelabeling(tssBlock, pcsGlobal)
rowsCountAfterRelabel := getRowsCount(tssBlock) rowsCountAfterRelabel := getRowsCount(tssBlock)
rowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel) rowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)
} }
@ -559,7 +559,7 @@ type remoteWriteCtx struct {
rowsDroppedByRelabel *metrics.Counter rowsDroppedByRelabel *metrics.Counter
} }
func newRemoteWriteCtx(argIdx int, at *auth.Token, remoteWriteURL *url.URL, maxInmemoryBlocks int, sanitizedURL string) *remoteWriteCtx { func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks int, sanitizedURL string) *remoteWriteCtx {
// strip query params, otherwise changing params resets pq // strip query params, otherwise changing params resets pq
pqURL := *remoteWriteURL pqURL := *remoteWriteURL
pqURL.RawQuery = "" pqURL.RawQuery = ""
@ -668,7 +668,7 @@ func (rwctx *remoteWriteCtx) Push(tss []prompbmarshal.TimeSeries) {
v = tssPool.Get().(*[]prompbmarshal.TimeSeries) v = tssPool.Get().(*[]prompbmarshal.TimeSeries)
tss = append(*v, tss...) tss = append(*v, tss...)
rowsCountBeforeRelabel := getRowsCount(tss) rowsCountBeforeRelabel := getRowsCount(tss)
tss = rctx.applyRelabeling(tss, nil, pcs) tss = rctx.applyRelabeling(tss, pcs)
rowsCountAfterRelabel := getRowsCount(tss) rowsCountAfterRelabel := getRowsCount(tss)
rwctx.rowsDroppedByRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel) rwctx.rowsDroppedByRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)
} }
@ -719,9 +719,26 @@ func dropAggregatedSeries(src []prompbmarshal.TimeSeries, matchIdxs []byte, drop
} }
func (rwctx *remoteWriteCtx) pushInternal(tss []prompbmarshal.TimeSeries) { func (rwctx *remoteWriteCtx) pushInternal(tss []prompbmarshal.TimeSeries) {
var rctx *relabelCtx
var v *[]prompbmarshal.TimeSeries
if len(labelsGlobal) > 0 {
// Make a copy of tss before adding extra labels in order to prevent
// from affecting time series for other remoteWrite.url configs.
rctx = getRelabelCtx()
v = tssPool.Get().(*[]prompbmarshal.TimeSeries)
tss = append(*v, tss...)
rctx.appendExtraLabels(tss, labelsGlobal)
}
pss := rwctx.pss pss := rwctx.pss
idx := atomic.AddUint64(&rwctx.pssNextIdx, 1) % uint64(len(pss)) idx := atomic.AddUint64(&rwctx.pssNextIdx, 1) % uint64(len(pss))
pss[idx].Push(tss) pss[idx].Push(tss)
if rctx != nil {
*v = prompbmarshal.ResetTimeSeries(tss)
tssPool.Put(v)
putRelabelCtx(rctx)
}
} }
func (rwctx *remoteWriteCtx) reinitStreamAggr() { func (rwctx *remoteWriteCtx) reinitStreamAggr() {

View file

@ -27,7 +27,7 @@ var (
stdDialerOnce sync.Once stdDialerOnce sync.Once
) )
func statDial(ctx context.Context, networkUnused, addr string) (conn net.Conn, err error) { func statDial(ctx context.Context, _, addr string) (conn net.Conn, err error) {
network := netutil.GetTCPNetwork() network := netutil.GetTCPNetwork()
d := getStdDialer() d := getStdDialer()
conn, err = d.DialContext(ctx, network, addr) conn, err = d.DialContext(ctx, network, addr)

View file

@ -112,6 +112,13 @@ name: <string>
# How often rules in the group are evaluated. # How often rules in the group are evaluated.
[ interval: <duration> | default = -evaluationInterval flag ] [ interval: <duration> | default = -evaluationInterval flag ]
# Optional
# Group will be evaluated at the exact offset in the range of [0...interval].
# E.g. for Group with `interval: 1h` and `eval_offset: 5m` the evaluation will
# start at 5th minute of the hour. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3409
# `eval_offset` can't be bigger than `interval`.
[ eval_offset: <duration> ]
# Limit the number of alerts an alerting rule and series a recording # Limit the number of alerts an alerting rule and series a recording
# rule can produce. 0 is no limit. # rule can produce. 0 is no limit.
[ limit: <int> | default = 0 ] [ limit: <int> | default = 0 ]
@ -983,7 +990,7 @@ The shortlist of configuration flags is the following:
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-evaluationInterval duration -evaluationInterval duration
How often to evaluate the rules (default 1m0s) How often to evaluate the rules (default 1m0s)
-external.alert.source string -external.alert.source string
@ -1023,6 +1030,12 @@ The shortlist of configuration flags is the following:
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int -internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500) The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-license.forceOffline
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-licenseFile string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-loggerDisableTimestamps -loggerDisableTimestamps
Whether to disable writing timestamps in logs Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int -loggerErrorsPerSecondLimit int
@ -1494,7 +1507,7 @@ spec:
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmalert` binary and puts it into the `bin` folder. It builds `vmalert` binary and puts it into the `bin` folder.
@ -1510,7 +1523,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
### Development ARM build ### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmalert-linux-arm` or `make vmalert-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make vmalert-linux-arm` or `make vmalert-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmalert-linux-arm` or `vmalert-linux-arm64` binary respectively and puts it into the `bin` folder. It builds `vmalert-linux-arm` or `vmalert-linux-arm64` binary respectively and puts it into the `bin` folder.

View file

@ -72,6 +72,7 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
q: qb.BuildWithParams(datasource.QuerierParams{ q: qb.BuildWithParams(datasource.QuerierParams{
DataSourceType: group.Type.String(), DataSourceType: group.Type.String(),
EvaluationInterval: group.Interval, EvaluationInterval: group.Interval,
EvalOffset: group.EvalOffset,
QueryParams: group.Params, QueryParams: group.Params,
Headers: group.Headers, Headers: group.Headers,
Debug: cfg.Debug, Debug: cfg.Debug,

View file

@ -427,7 +427,8 @@ func TestAlertingRule_ExecRange(t *testing.T) {
newTestAlertingRule("multi-series-for=>pending=>pending=>firing", 3*time.Second), newTestAlertingRule("multi-series-for=>pending=>pending=>firing", 3*time.Second),
[]datasource.Metric{ []datasource.Metric{
{Values: []float64{1, 1, 1}, Timestamps: []int64{1, 3, 5}}, {Values: []float64{1, 1, 1}, Timestamps: []int64{1, 3, 5}},
{Values: []float64{1, 1}, Timestamps: []int64{1, 5}, {
Values: []float64{1, 1}, Timestamps: []int64{1, 5},
Labels: []datasource.Label{{Name: "foo", Value: "bar"}}, Labels: []datasource.Label{{Name: "foo", Value: "bar"}},
}, },
}, },
@ -436,21 +437,26 @@ func TestAlertingRule_ExecRange(t *testing.T) {
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0)}, {State: notifier.StatePending, ActiveAt: time.Unix(1, 0)},
{State: notifier.StateFiring, ActiveAt: time.Unix(1, 0)}, {State: notifier.StateFiring, ActiveAt: time.Unix(1, 0)},
// //
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0), {
State: notifier.StatePending, ActiveAt: time.Unix(1, 0),
Labels: map[string]string{ Labels: map[string]string{
"foo": "bar", "foo": "bar",
}}, },
{State: notifier.StatePending, ActiveAt: time.Unix(5, 0), },
{
State: notifier.StatePending, ActiveAt: time.Unix(5, 0),
Labels: map[string]string{ Labels: map[string]string{
"foo": "bar", "foo": "bar",
}}, },
},
}, },
}, },
{ {
newTestRuleWithLabels("multi-series-firing", "source", "vm"), newTestRuleWithLabels("multi-series-firing", "source", "vm"),
[]datasource.Metric{ []datasource.Metric{
{Values: []float64{1, 1}, Timestamps: []int64{1, 100}}, {Values: []float64{1, 1}, Timestamps: []int64{1, 100}},
{Values: []float64{1, 1}, Timestamps: []int64{1, 5}, {
Values: []float64{1, 1}, Timestamps: []int64{1, 5},
Labels: []datasource.Label{{Name: "foo", Value: "bar"}}, Labels: []datasource.Label{{Name: "foo", Value: "bar"}},
}, },
}, },
@ -586,7 +592,8 @@ func TestGroup_Restore(t *testing.T) {
[]config.Rule{{Alert: "foo", Expr: "foo", For: promutils.NewDuration(time.Second)}}, []config.Rule{{Alert: "foo", Expr: "foo", For: promutils.NewDuration(time.Second)}},
map[uint64]*notifier.Alert{ map[uint64]*notifier.Alert{
hash(map[string]string{alertNameLabel: "foo", alertGroupNameLabel: "TestRestore"}): { hash(map[string]string{alertNameLabel: "foo", alertGroupNameLabel: "TestRestore"}): {
ActiveAt: ts}, ActiveAt: ts,
},
}) })
// two rules, two active alerts, one with state restored // two rules, two active alerts, one with state restored
@ -603,7 +610,8 @@ func TestGroup_Restore(t *testing.T) {
ActiveAt: defaultTS, ActiveAt: defaultTS,
}, },
hash(map[string]string{alertNameLabel: "bar", alertGroupNameLabel: "TestRestore"}): { hash(map[string]string{alertNameLabel: "bar", alertGroupNameLabel: "TestRestore"}): {
ActiveAt: ts}, ActiveAt: ts,
},
}) })
// two rules, two active alerts, two with state restored // two rules, two active alerts, two with state restored
@ -622,7 +630,8 @@ func TestGroup_Restore(t *testing.T) {
ActiveAt: ts, ActiveAt: ts,
}, },
hash(map[string]string{alertNameLabel: "bar", alertGroupNameLabel: "TestRestore"}): { hash(map[string]string{alertNameLabel: "bar", alertGroupNameLabel: "TestRestore"}): {
ActiveAt: ts}, ActiveAt: ts,
},
}) })
// one active alert but wrong state restore // one active alert but wrong state restore
@ -844,7 +853,8 @@ func TestAlertingRule_Template(t *testing.T) {
hash(map[string]string{ hash(map[string]string{
alertNameLabel: "OriginLabels", alertNameLabel: "OriginLabels",
alertGroupNameLabel: "Testing", alertGroupNameLabel: "Testing",
"instance": "foo"}): { "instance": "foo",
}): {
Labels: map[string]string{ Labels: map[string]string{
alertNameLabel: "OriginLabels", alertNameLabel: "OriginLabels",
alertGroupNameLabel: "Testing", alertGroupNameLabel: "Testing",
@ -872,7 +882,6 @@ func TestAlertingRule_Template(t *testing.T) {
gotAlert := tc.rule.alerts[hash] gotAlert := tc.rule.alerts[hash]
if gotAlert == nil { if gotAlert == nil {
t.Fatalf("alert %d is missing; labels: %v; annotations: %v", hash, expAlert.Labels, expAlert.Annotations) t.Fatalf("alert %d is missing; labels: %v; annotations: %v", hash, expAlert.Labels, expAlert.Annotations)
break
} }
if !reflect.DeepEqual(expAlert.Annotations, gotAlert.Annotations) { if !reflect.DeepEqual(expAlert.Annotations, gotAlert.Annotations) {
t.Fatalf("expected to have annotations %#v; got %#v", expAlert.Annotations, gotAlert.Annotations) t.Fatalf("expected to have annotations %#v; got %#v", expAlert.Annotations, gotAlert.Annotations)

View file

@ -23,6 +23,7 @@ type Group struct {
File string File string
Name string `yaml:"name"` Name string `yaml:"name"`
Interval *promutils.Duration `yaml:"interval,omitempty"` Interval *promutils.Duration `yaml:"interval,omitempty"`
EvalOffset *promutils.Duration `yaml:"eval_offset,omitempty"`
Limit int `yaml:"limit,omitempty"` Limit int `yaml:"limit,omitempty"`
Rules []Rule `yaml:"rules"` Rules []Rule `yaml:"rules"`
Concurrency int `yaml:"concurrency"` Concurrency int `yaml:"concurrency"`
@ -63,11 +64,27 @@ func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// Validate check for internal Group or Rule configuration errors // Validate checks configuration errors for group and internal rules
func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool) error { func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool) error {
if g.Name == "" { if g.Name == "" {
return fmt.Errorf("group name must be set") return fmt.Errorf("group name must be set")
} }
if g.Interval.Duration() < 0 {
return fmt.Errorf("interval shouldn't be lower than 0")
}
if g.EvalOffset.Duration() < 0 {
return fmt.Errorf("eval_offset shouldn't be lower than 0")
}
// if `eval_offset` is set, interval won't use global evaluationInterval flag and must bigger than offset.
if g.EvalOffset.Duration() > g.Interval.Duration() {
return fmt.Errorf("eval_offset should be smaller than interval; now eval_offset: %v, interval: %v", g.EvalOffset.Duration(), g.Interval.Duration())
}
if g.Limit < 0 {
return fmt.Errorf("invalid limit %d, shouldn't be less than 0", g.Limit)
}
if g.Concurrency < 0 {
return fmt.Errorf("invalid concurrency %d, shouldn't be less than 0", g.Concurrency)
}
uniqueRules := map[uint64]struct{}{} uniqueRules := map[uint64]struct{}{}
for _, r := range g.Rules { for _, r := range g.Rules {
@ -76,26 +93,26 @@ func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool)
ruleName = r.Alert ruleName = r.Alert
} }
if _, ok := uniqueRules[r.ID]; ok { if _, ok := uniqueRules[r.ID]; ok {
return fmt.Errorf("%q is a duplicate within the group %q", r.String(), g.Name) return fmt.Errorf("%q is a duplicate in group", r.String())
} }
uniqueRules[r.ID] = struct{}{} uniqueRules[r.ID] = struct{}{}
if err := r.Validate(); err != nil { if err := r.Validate(); err != nil {
return fmt.Errorf("invalid rule %q.%q: %w", g.Name, ruleName, err) return fmt.Errorf("invalid rule %q: %w", ruleName, err)
} }
if validateExpressions { if validateExpressions {
// its needed only for tests. // its needed only for tests.
// because correct types must be inherited after unmarshalling. // because correct types must be inherited after unmarshalling.
exprValidator := g.Type.ValidateExpr exprValidator := g.Type.ValidateExpr
if err := exprValidator(r.Expr); err != nil { if err := exprValidator(r.Expr); err != nil {
return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err) return fmt.Errorf("invalid expression for rule %q: %w", ruleName, err)
} }
} }
if validateTplFn != nil { if validateTplFn != nil {
if err := validateTplFn(r.Annotations); err != nil { if err := validateTplFn(r.Annotations); err != nil {
return fmt.Errorf("invalid annotations for rule %q.%q: %w", g.Name, ruleName, err) return fmt.Errorf("invalid annotations for rule %q: %w", ruleName, err)
} }
if err := validateTplFn(r.Labels); err != nil { if err := validateTplFn(r.Labels); err != nil {
return fmt.Errorf("invalid labels for rule %q.%q: %w", g.Name, ruleName, err) return fmt.Errorf("invalid labels for rule %q: %w", ruleName, err)
} }
} }
} }

View file

@ -68,6 +68,10 @@ func TestParseBad(t *testing.T) {
path []string path []string
expErr string expErr string
}{ }{
{
[]string{"testdata/rules/rules_interval_bad.rules"},
"eval_offset should be smaller than interval",
},
{ {
[]string{"testdata/rules/rules0-bad.rules"}, []string{"testdata/rules/rules0-bad.rules"},
"unexpected token", "unexpected token",
@ -141,6 +145,35 @@ func TestGroup_Validate(t *testing.T) {
group: &Group{}, group: &Group{},
expErr: "group name must be set", expErr: "group name must be set",
}, },
{
group: &Group{
Name: "negative interval",
Interval: promutils.NewDuration(-1),
},
expErr: "interval shouldn't be lower than 0",
},
{
group: &Group{
Name: "wrong eval_offset",
Interval: promutils.NewDuration(time.Minute),
EvalOffset: promutils.NewDuration(2 * time.Minute),
},
expErr: "eval_offset should be smaller than interval",
},
{
group: &Group{
Name: "wrong limit",
Limit: -1,
},
expErr: "invalid limit",
},
{
group: &Group{
Name: "wrong concurrency",
Concurrency: -1,
},
expErr: "invalid concurrency",
},
{ {
group: &Group{ group: &Group{
Name: "test", Name: "test",

View file

@ -0,0 +1,13 @@
groups:
- name: groupTest
## default interval is 1min, eval_offset shouldn't be greater than interval
eval_offset: 2m
rules:
- alert: VMRows
for: 2s
expr: sum(rate(vm_http_request_errors_total[2s])) > 0
labels:
label: bar
host: "{{ $labels.instance }}"
annotations:
summary: "{{ $value }}"

View file

@ -44,6 +44,7 @@ type QuerierBuilder interface {
type QuerierParams struct { type QuerierParams struct {
DataSourceType string DataSourceType string
EvaluationInterval time.Duration EvaluationInterval time.Duration
EvalOffset *time.Duration
QueryParams url.Values QueryParams url.Values
Headers map[string]string Headers map[string]string
Debug bool Debug bool

View file

@ -37,11 +37,20 @@ type VMStorage struct {
appendTypePrefix bool appendTypePrefix bool
lookBack time.Duration lookBack time.Duration
queryStep time.Duration queryStep time.Duration
dataSourceType datasourceType
dataSourceType datasourceType // evaluationInterval will align the request's timestamp
// if `datasource.queryTimeAlignment` is enabled,
// will set request's `step` param as well.
evaluationInterval time.Duration evaluationInterval time.Duration
extraParams url.Values // evaluationOffset shifts the request's timestamp, will be equal
extraHeaders []keyValue // to the offset specified evaluationInterval.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4693
evaluationOffset *time.Duration
// extraParams contains params to be attached to each HTTP request
extraParams url.Values
// extraHeaders are headers to be attached to each HTTP request
extraHeaders []keyValue
// whether to print additional log messages // whether to print additional log messages
// for each sent request // for each sent request
@ -86,13 +95,21 @@ func (s *VMStorage) Clone() *VMStorage {
func (s *VMStorage) ApplyParams(params QuerierParams) *VMStorage { func (s *VMStorage) ApplyParams(params QuerierParams) *VMStorage {
s.dataSourceType = toDatasourceType(params.DataSourceType) s.dataSourceType = toDatasourceType(params.DataSourceType)
s.evaluationInterval = params.EvaluationInterval s.evaluationInterval = params.EvaluationInterval
s.evaluationOffset = params.EvalOffset
if params.QueryParams != nil { if params.QueryParams != nil {
if s.extraParams == nil { if s.extraParams == nil {
s.extraParams = url.Values{} s.extraParams = url.Values{}
} }
for k, vl := range params.QueryParams { for k, vl := range params.QueryParams {
for _, v := range vl { // custom query params are prior to default ones // custom query params are prior to default ones
s.extraParams.Set(k, v) if s.extraParams.Has(k) {
s.extraParams.Del(k)
}
for _, v := range vl {
// don't use .Set() instead of Del/Add since it is allowed
// for GET params to be duplicated
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4908
s.extraParams.Add(k, v)
} }
} }
} }
@ -127,21 +144,14 @@ func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Durati
// Query executes the given query and returns parsed response // Query executes the given query and returns parsed response
func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) (Result, *http.Request, error) { func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) (Result, *http.Request, error) {
req, err := s.newRequestPOST() req := s.newQueryRequest(query, ts)
if err != nil {
return Result{}, nil, err
}
switch s.dataSourceType {
case "", datasourcePrometheus:
s.setPrometheusInstantReqParams(req, query, ts)
case datasourceGraphite:
s.setGraphiteReqParams(req, query, ts)
default:
return Result{}, nil, fmt.Errorf("engine not found: %q", s.dataSourceType)
}
resp, err := s.do(ctx, req) resp, err := s.do(ctx, req)
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
// something in the middle between client and datasource might be closing
// the connection. So we do a one more attempt in hope request will succeed.
req = s.newQueryRequest(query, ts)
resp, err = s.do(ctx, req)
}
if err != nil { if err != nil {
return Result{}, req, err return Result{}, req, err
} }
@ -164,18 +174,20 @@ func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end tim
if s.dataSourceType != datasourcePrometheus { if s.dataSourceType != datasourcePrometheus {
return res, fmt.Errorf("%q is not supported for QueryRange", s.dataSourceType) return res, fmt.Errorf("%q is not supported for QueryRange", s.dataSourceType)
} }
req, err := s.newRequestPOST()
if err != nil {
return res, err
}
if start.IsZero() { if start.IsZero() {
return res, fmt.Errorf("start param is missing") return res, fmt.Errorf("start param is missing")
} }
if end.IsZero() { if end.IsZero() {
return res, fmt.Errorf("end param is missing") return res, fmt.Errorf("end param is missing")
} }
s.setPrometheusRangeReqParams(req, query, start, end) req := s.newQueryRangeRequest(query, start, end)
resp, err := s.do(ctx, req) resp, err := s.do(ctx, req)
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
// something in the middle between client and datasource might be closing
// the connection. So we do a one more attempt in hope request will succeed.
req = s.newQueryRangeRequest(query, start, end)
resp, err = s.do(ctx, req)
}
if err != nil { if err != nil {
return res, err return res, err
} }
@ -190,11 +202,6 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
logger.Infof("DEBUG datasource request: executing %s request with params %q", req.Method, req.URL.RawQuery) logger.Infof("DEBUG datasource request: executing %s request with params %q", req.Method, req.URL.RawQuery)
} }
resp, err := s.c.Do(req.WithContext(ctx)) resp, err := s.c.Do(req.WithContext(ctx))
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
// something in the middle between client and datasource might be closing
// the connection. So we do a one more attempt in hope request will succeed.
resp, err = s.c.Do(req.WithContext(ctx))
}
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting response from %s: %w", req.URL.Redacted(), err) return nil, fmt.Errorf("error getting response from %s: %w", req.URL.Redacted(), err)
} }
@ -206,10 +213,29 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
return resp, nil return resp, nil
} }
func (s *VMStorage) newRequestPOST() (*http.Request, error) { func (s *VMStorage) newQueryRangeRequest(query string, start, end time.Time) *http.Request {
req := s.newRequest()
s.setPrometheusRangeReqParams(req, query, start, end)
return req
}
func (s *VMStorage) newQueryRequest(query string, ts time.Time) *http.Request {
req := s.newRequest()
switch s.dataSourceType {
case "", datasourcePrometheus:
s.setPrometheusInstantReqParams(req, query, ts)
case datasourceGraphite:
s.setGraphiteReqParams(req, query, ts)
default:
logger.Panicf("BUG: engine not found: %q", s.dataSourceType)
}
return req
}
func (s *VMStorage) newRequest() *http.Request {
req, err := http.NewRequest(http.MethodPost, s.datasourceURL, nil) req, err := http.NewRequest(http.MethodPost, s.datasourceURL, nil)
if err != nil { if err != nil {
return nil, err logger.Panicf("BUG: unexpected error from http.NewRequest(%q): %s", s.datasourceURL, err)
} }
req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Type", "application/json")
if s.authCfg != nil { if s.authCfg != nil {
@ -218,5 +244,5 @@ func (s *VMStorage) newRequestPOST() (*http.Request, error) {
for _, h := range s.extraHeaders { for _, h := range s.extraHeaders {
req.Header.Set(h.key, h.value) req.Header.Set(h.key, h.value)
} }
return req, nil return req
} }

View file

@ -161,13 +161,8 @@ func (s *VMStorage) setPrometheusInstantReqParams(r *http.Request, query string,
r.URL.Path += "/api/v1/query" r.URL.Path += "/api/v1/query"
} }
q := r.URL.Query() q := r.URL.Query()
if s.lookBack > 0 {
timestamp = timestamp.Add(-s.lookBack) timestamp = s.adjustReqTimestamp(timestamp)
}
if *queryTimeAlignment && s.evaluationInterval > 0 {
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1232
timestamp = timestamp.Truncate(s.evaluationInterval)
}
q.Set("time", timestamp.Format(time.RFC3339)) q.Set("time", timestamp.Format(time.RFC3339))
if !*disableStepParam && s.evaluationInterval > 0 { // set step as evaluationInterval by default if !*disableStepParam && s.evaluationInterval > 0 { // set step as evaluationInterval by default
// always convert to seconds to keep compatibility with older // always convert to seconds to keep compatibility with older
@ -191,6 +186,9 @@ func (s *VMStorage) setPrometheusRangeReqParams(r *http.Request, query string, s
r.URL.Path += "/api/v1/query_range" r.URL.Path += "/api/v1/query_range"
} }
q := r.URL.Query() q := r.URL.Query()
if s.evaluationOffset != nil {
start = start.Truncate(s.evaluationInterval).Add(*s.evaluationOffset)
}
q.Add("start", start.Format(time.RFC3339)) q.Add("start", start.Format(time.RFC3339))
q.Add("end", end.Format(time.RFC3339)) q.Add("end", end.Format(time.RFC3339))
if s.evaluationInterval > 0 { // set step as evaluationInterval by default if s.evaluationInterval > 0 { // set step as evaluationInterval by default
@ -215,3 +213,30 @@ func (s *VMStorage) setPrometheusReqParams(r *http.Request, query string) {
q.Set("query", query) q.Set("query", query)
r.URL.RawQuery = q.Encode() r.URL.RawQuery = q.Encode()
} }
func (s *VMStorage) adjustReqTimestamp(timestamp time.Time) time.Time {
if s.evaluationOffset != nil {
// calculate the min timestamp on the evaluationInterval
intervalStart := timestamp.Truncate(s.evaluationInterval)
ts := intervalStart.Add(*s.evaluationOffset)
if timestamp.Before(ts) {
// if passed timestamp is before the expected evaluation offset,
// then we should adjust it to the previous evaluation round.
// E.g. request with evaluationInterval=1h and evaluationOffset=30m
// was evaluated at 11:20. Then the timestamp should be adjusted
// to 10:30, to the previous evaluationInterval.
return ts.Add(-s.evaluationInterval)
}
// evaluationOffset shouldn't interfere with queryTimeAlignment or lookBack,
// so we return it immediately
return ts
}
if *queryTimeAlignment {
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1232
timestamp = timestamp.Truncate(s.evaluationInterval)
}
if s.lookBack > 0 {
timestamp = timestamp.Add(-s.lookBack)
}
return timestamp
}

View file

@ -3,6 +3,7 @@ package datasource
import ( import (
"encoding/json" "encoding/json"
"testing" "testing"
"time"
) )
func BenchmarkMetrics(b *testing.B) { func BenchmarkMetrics(b *testing.B) {
@ -18,3 +19,74 @@ func BenchmarkMetrics(b *testing.B) {
} }
}) })
} }
func TestGetPrometheusReqTimestamp(t *testing.T) {
offset := 30 * time.Minute
testCases := []struct {
name string
s *VMStorage
queryTimeAlignment bool
originTS, expTS string
}{
{
"with eval_offset, find previous offset point",
&VMStorage{
evaluationOffset: &offset,
evaluationInterval: time.Hour,
lookBack: 1 * time.Minute,
},
false,
"2023-08-28T11:11:00+00:00",
"2023-08-28T10:30:00+00:00",
},
{
"with eval_offset",
&VMStorage{
evaluationOffset: &offset,
evaluationInterval: time.Hour,
},
true,
"2023-08-28T11:41:00+00:00",
"2023-08-28T11:30:00+00:00",
},
{
"with query align",
&VMStorage{
evaluationInterval: time.Hour,
},
true,
"2023-08-28T11:11:00+00:00",
"2023-08-28T11:00:00+00:00",
},
{
"with query align and lookback",
&VMStorage{
evaluationInterval: time.Hour,
lookBack: 1 * time.Minute,
},
true,
"2023-08-28T11:11:00+00:00",
"2023-08-28T10:59:00+00:00",
},
{
"without query align",
&VMStorage{
evaluationInterval: time.Hour,
},
false,
"2023-08-28T11:11:00+00:00",
"2023-08-28T11:11:00+00:00",
},
}
for _, tc := range testCases {
oldAlignPara := *queryTimeAlignment
*queryTimeAlignment = tc.queryTimeAlignment
originT, _ := time.Parse(time.RFC3339, tc.originTS)
expT, _ := time.Parse(time.RFC3339, tc.expTS)
gotTS := tc.s.adjustReqTimestamp(originT)
if !gotTS.Equal(expT) {
t.Fatalf("get wrong prometheus request timestamp, expect %s, got %s", expT, gotTS)
}
*queryTimeAlignment = oldAlignPara
}
}

View file

@ -596,6 +596,17 @@ func TestRequestParams(t *testing.T) {
checkEqualString(t, exp.Encode(), r.URL.RawQuery) checkEqualString(t, exp.Encode(), r.URL.RawQuery)
}, },
}, },
{
"allow duplicates in query params",
false,
storage.Clone().ApplyParams(QuerierParams{
QueryParams: url.Values{"extra_labels": {"env=dev", "foo=bar"}},
}),
func(t *testing.T, r *http.Request) {
exp := url.Values{"query": {query}, "round_digits": {"10"}, "extra_labels": {"env=dev", "foo=bar"}, "time": {timestamp.Format(time.RFC3339)}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
},
},
{ {
"graphite extra params", "graphite extra params",
false, false,
@ -629,10 +640,7 @@ func TestRequestParams(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
req, err := tc.vm.newRequestPOST() req := tc.vm.newRequest()
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
switch tc.vm.dataSourceType { switch tc.vm.dataSourceType {
case "", datasourcePrometheus: case "", datasourcePrometheus:
if tc.queryRange { if tc.queryRange {
@ -727,10 +735,7 @@ func TestHeaders(t *testing.T) {
for _, tt := range testCases { for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
vm := tt.vmFn() vm := tt.vmFn()
req, err := vm.newRequestPOST() req := vm.newQueryRequest("foo", time.Now())
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
tt.checkFn(t, req) tt.checkFn(t, req)
}) })
} }

View file

@ -31,6 +31,7 @@ type Group struct {
Rules []Rule Rules []Rule
Type config.Type Type config.Type
Interval time.Duration Interval time.Duration
EvalOffset *time.Duration
Limit int Limit int
Concurrency int Concurrency int
Checksum string Checksum string
@ -116,6 +117,9 @@ func newGroup(cfg config.Group, qb datasource.QuerierBuilder, defaultInterval ti
if g.Concurrency < 1 { if g.Concurrency < 1 {
g.Concurrency = 1 g.Concurrency = 1
} }
if cfg.EvalOffset != nil {
g.EvalOffset = &cfg.EvalOffset.D
}
for _, h := range cfg.Headers { for _, h := range cfg.Headers {
g.Headers[h.Key] = h.Value g.Headers[h.Key] = h.Value
} }
@ -163,6 +167,10 @@ func (g *Group) ID() uint64 {
hash.Write([]byte("\xff")) hash.Write([]byte("\xff"))
hash.Write([]byte(g.Name)) hash.Write([]byte(g.Name))
hash.Write([]byte(g.Type.Get())) hash.Write([]byte(g.Type.Get()))
hash.Write([]byte(g.Interval.String()))
if g.EvalOffset != nil {
hash.Write([]byte(g.EvalOffset.String()))
}
return hash.Sum64() return hash.Sum64()
} }
@ -277,15 +285,13 @@ var skipRandSleepOnGroupStart bool
func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *remotewrite.Client, rr datasource.QuerierBuilder) { func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *remotewrite.Client, rr datasource.QuerierBuilder) {
defer func() { close(g.finishedCh) }() defer func() { close(g.finishedCh) }()
// Spread group rules evaluation over time in order to reduce load on VictoriaMetrics. // sleep random duration to spread group rules evaluation
// over time in order to reduce load on datasource.
if !skipRandSleepOnGroupStart { if !skipRandSleepOnGroupStart {
randSleep := uint64(float64(g.Interval) * (float64(g.ID()) / (1 << 64))) sleepBeforeStart := delayBeforeStart(time.Now(), g.ID(), g.Interval, g.EvalOffset)
sleepOffset := uint64(time.Now().UnixNano()) % uint64(g.Interval) g.infof("will start in %v", sleepBeforeStart)
if randSleep < sleepOffset {
randSleep += uint64(g.Interval) sleepTimer := time.NewTimer(sleepBeforeStart)
}
randSleep -= sleepOffset
sleepTimer := time.NewTimer(time.Duration(randSleep))
select { select {
case <-ctx.Done(): case <-ctx.Done():
sleepTimer.Stop() sleepTimer.Stop()
@ -297,6 +303,8 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
} }
} }
evalTS := time.Now()
e := &executor{ e := &executor{
rw: rw, rw: rw,
notifiers: nts, notifiers: nts,
@ -304,9 +312,7 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label), previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
} }
evalTS := time.Now() g.infof("started")
logger.Infof("group %q started; interval=%v; concurrency=%d", g.Name, g.Interval, g.Concurrency)
eval := func(ctx context.Context, ts time.Time) { eval := func(ctx context.Context, ts time.Time) {
g.metrics.iterationTotal.Inc() g.metrics.iterationTotal.Inc()
@ -375,19 +381,12 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
continue continue
} }
// ensure that staleness is tracked or existing rules only // ensure that staleness is tracked for existing rules only
e.purgeStaleSeries(g.Rules) e.purgeStaleSeries(g.Rules)
e.notifierHeaders = g.NotifierHeaders e.notifierHeaders = g.NotifierHeaders
if g.Interval != ng.Interval {
g.Interval = ng.Interval
t.Stop()
t = time.NewTicker(g.Interval)
evalTS = time.Now()
}
g.mu.Unlock() g.mu.Unlock()
logger.Infof("group %q re-started; interval=%v; concurrency=%d", g.Name, g.Interval, g.Concurrency)
g.infof("re-started")
case <-t.C: case <-t.C:
missed := (time.Since(evalTS) / g.Interval) - 1 missed := (time.Since(evalTS) / g.Interval) - 1
if missed < 0 { if missed < 0 {
@ -405,6 +404,35 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
} }
} }
// delayBeforeStart returns a duration on the interval between [ts..ts+interval].
// delayBeforeStart accounts for `offset`, so returned duration should be always
// bigger than the `offset`.
func delayBeforeStart(ts time.Time, key uint64, interval time.Duration, offset *time.Duration) time.Duration {
var randSleep time.Duration
randSleep = time.Duration(float64(interval) * (float64(key) / (1 << 64)))
sleepOffset := time.Duration(ts.UnixNano() % interval.Nanoseconds())
if randSleep < sleepOffset {
randSleep += interval
}
randSleep -= sleepOffset
// check if `ts` after randSleep is before `offset`,
// if it is, add extra eval_offset to randSleep.
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3409.
if offset != nil {
tmpEvalTS := ts.Add(randSleep)
if tmpEvalTS.Before(tmpEvalTS.Truncate(interval).Add(*offset)) {
randSleep += *offset
}
}
return randSleep.Truncate(time.Second)
}
func (g *Group) infof(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
logger.Infof("group %q %s; interval=%v; eval_offset=%v; concurrency=%d",
g.Name, msg, g.Interval, g.EvalOffset, g.Concurrency)
}
// getResolveDuration returns the duration after which firing alert // getResolveDuration returns the duration after which firing alert
// can be considered as resolved. // can be considered as resolved.
func getResolveDuration(groupInterval, delta, maxDuration time.Duration) time.Duration { func getResolveDuration(groupInterval, delta, maxDuration time.Duration) time.Duration {

View file

@ -3,6 +3,7 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"math"
"reflect" "reflect"
"sort" "sort"
"testing" "testing"
@ -35,18 +36,19 @@ func TestUpdateWith(t *testing.T) {
}, },
{ {
"update alerting rule", "update alerting rule",
[]config.Rule{{ []config.Rule{
Alert: "foo", {
Expr: "up > 0", Alert: "foo",
For: promutils.NewDuration(time.Second), Expr: "up > 0",
Labels: map[string]string{ For: promutils.NewDuration(time.Second),
"bar": "baz", Labels: map[string]string{
"bar": "baz",
},
Annotations: map[string]string{
"summary": "{{ $value|humanize }}",
"description": "{{$labels}}",
},
}, },
Annotations: map[string]string{
"summary": "{{ $value|humanize }}",
"description": "{{$labels}}",
},
},
{ {
Alert: "bar", Alert: "bar",
Expr: "up > 0", Expr: "up > 0",
@ -54,7 +56,8 @@ func TestUpdateWith(t *testing.T) {
Labels: map[string]string{ Labels: map[string]string{
"bar": "baz", "bar": "baz",
}, },
}}, },
},
[]config.Rule{ []config.Rule{
{ {
Alert: "foo", Alert: "foo",
@ -75,7 +78,8 @@ func TestUpdateWith(t *testing.T) {
Labels: map[string]string{ Labels: map[string]string{
"bar": "baz", "bar": "baz",
}, },
}}, },
},
}, },
{ {
"update recording rule", "update recording rule",
@ -520,3 +524,62 @@ func TestCloseWithEvalInterruption(t *testing.T) {
case <-g.finishedCh: case <-g.finishedCh:
} }
} }
func TestGroupStartDelay(t *testing.T) {
g := &Group{}
// interval of 5min and key generate a static delay of 30s
g.Interval = time.Minute * 5
key := uint64(math.MaxUint64 / 10)
f := func(atS, expS string) {
t.Helper()
at, err := time.Parse(time.DateTime, atS)
if err != nil {
t.Fatal(err)
}
expTS, err := time.Parse(time.DateTime, expS)
if err != nil {
t.Fatal(err)
}
delay := delayBeforeStart(at, key, g.Interval, g.EvalOffset)
gotStart := at.Add(delay)
if expTS != gotStart {
t.Errorf("expected to get %v; got %v instead", expTS, gotStart)
}
}
// test group without offset
f("2023-01-01 00:00:00", "2023-01-01 00:00:30")
f("2023-01-01 00:00:29", "2023-01-01 00:00:30")
f("2023-01-01 00:00:31", "2023-01-01 00:05:30")
// test group with offset smaller than above fixed randSleep,
// this way randSleep will always be enough
offset := 20 * time.Second
g.EvalOffset = &offset
f("2023-01-01 00:00:00", "2023-01-01 00:00:30")
f("2023-01-01 00:00:29", "2023-01-01 00:00:30")
f("2023-01-01 00:00:31", "2023-01-01 00:05:30")
// test group with offset bigger than above fixed randSleep,
// this way offset will be added to delay
offset = 3 * time.Minute
g.EvalOffset = &offset
f("2023-01-01 00:00:00", "2023-01-01 00:03:30")
f("2023-01-01 00:00:29", "2023-01-01 00:03:30")
f("2023-01-01 00:01:00", "2023-01-01 00:08:30")
f("2023-01-01 00:03:30", "2023-01-01 00:08:30")
f("2023-01-01 00:07:30", "2023-01-01 00:13:30")
offset = 10 * time.Minute
g.EvalOffset = &offset
// interval of 1h and key generate a static delay of 6m
g.Interval = time.Hour
f("2023-01-01 00:00:00", "2023-01-01 00:16:00")
f("2023-01-01 00:05:00", "2023-01-01 00:16:00")
f("2023-01-01 00:30:00", "2023-01-01 01:16:00")
}

View file

@ -168,7 +168,8 @@ func TestManagerUpdate(t *testing.T) {
Name: "TestGroup", Rules: []Rule{ Name: "TestGroup", Rules: []Rule{
Conns, Conns,
ExampleAlertAlwaysFiring, ExampleAlertAlwaysFiring,
}}, },
},
}, },
}, },
{ {
@ -191,7 +192,8 @@ func TestManagerUpdate(t *testing.T) {
Rules: []Rule{ Rules: []Rule{
Conns, Conns,
ExampleAlertAlwaysFiring, ExampleAlertAlwaysFiring,
}}, },
},
}, },
}, },
{ {
@ -264,7 +266,8 @@ func TestManagerUpdateNegative(t *testing.T) {
{ {
nil, nil,
nil, nil,
config.Group{Name: "Recording rule only", config.Group{
Name: "Recording rule only",
Rules: []config.Rule{ Rules: []config.Rule{
{Record: "record", Expr: "max(up)"}, {Record: "record", Expr: "max(up)"},
}, },
@ -274,7 +277,8 @@ func TestManagerUpdateNegative(t *testing.T) {
{ {
nil, nil,
nil, nil,
config.Group{Name: "Alerting rule only", config.Group{
Name: "Alerting rule only",
Rules: []config.Rule{ Rules: []config.Rule{
{Alert: "alert", Expr: "up > 0"}, {Alert: "alert", Expr: "up > 0"},
}, },
@ -284,7 +288,8 @@ func TestManagerUpdateNegative(t *testing.T) {
{ {
[]notifier.Notifier{&fakeNotifier{}}, []notifier.Notifier{&fakeNotifier{}},
nil, nil,
config.Group{Name: "Recording and alerting rules", config.Group{
Name: "Recording and alerting rules",
Rules: []config.Rule{ Rules: []config.Rule{
{Alert: "alert1", Expr: "up > 0"}, {Alert: "alert1", Expr: "up > 0"},
{Alert: "alert2", Expr: "up > 0"}, {Alert: "alert2", Expr: "up > 0"},
@ -296,7 +301,8 @@ func TestManagerUpdateNegative(t *testing.T) {
{ {
nil, nil,
&remotewrite.Client{}, &remotewrite.Client{},
config.Group{Name: "Recording and alerting rules", config.Group{
Name: "Recording and alerting rules",
Rules: []config.Rule{ Rules: []config.Rule{
{Record: "record1", Expr: "max(up)"}, {Record: "record1", Expr: "max(up)"},
{Record: "record2", Expr: "max(up)"}, {Record: "record2", Expr: "max(up)"},

View file

@ -61,6 +61,7 @@ func newRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rul
q: qb.BuildWithParams(datasource.QuerierParams{ q: qb.BuildWithParams(datasource.QuerierParams{
DataSourceType: group.Type.String(), DataSourceType: group.Type.String(),
EvaluationInterval: group.Interval, EvaluationInterval: group.Interval,
EvalOffset: group.EvalOffset,
QueryParams: group.Params, QueryParams: group.Params,
Headers: group.Headers, Headers: group.Headers,
}), }),

View file

@ -79,7 +79,7 @@ func TestRule_state(t *testing.T) {
// TestRule_stateConcurrent supposed to test concurrent // TestRule_stateConcurrent supposed to test concurrent
// execution of state updates. // execution of state updates.
// Should be executed with -race flag // Should be executed with -race flag
func TestRule_stateConcurrent(t *testing.T) { func TestRule_stateConcurrent(_ *testing.T) {
state := newRuleState(20) state := newRuleState(20)
const workers = 50 const workers = 50

View file

@ -41,7 +41,7 @@ func TestErrGroup(t *testing.T) {
// TestErrGroupConcurrent supposed to test concurrent // TestErrGroupConcurrent supposed to test concurrent
// use of error group. // use of error group.
// Should be executed with -race flag // Should be executed with -race flag
func TestErrGroupConcurrent(t *testing.T) { func TestErrGroupConcurrent(_ *testing.T) {
eg := new(ErrGroup) eg := new(ErrGroup)
const writersN = 4 const writersN = 4

View file

@ -35,9 +35,42 @@ accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.co
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls. Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls.
In the latter case `vmauth` balances load among the configured urls in least-loaded round-robin manner. In the latter case `vmauth` balances load among the configured urls in least-loaded round-robin manner.
`vmauth` retries failing `GET` requests across the configured list of urls.
This feature is useful for balancing the load among multiple `vmselect` and/or `vminsert` nodes If the backend at the configured url isn't available, then `vmauth` tries sending the request to the remaining configured urls.
in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
It is possible to configure automatic retry of requests if the backend responds with status code from optional `retry_status_codes` list.
Load balancing feature can be used in the following cases:
- Balancing the load among multiple `vmselect` and/or `vminsert` nodes in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
The following `-auth.config` file can be used for spreading incoming requests among 3 vmselect nodes and re-trying failed requests
or requests with 500 and 502 response status codes:
```yml
unauthorized_user:
url_prefix:
- http://vmselect1:8481/
- http://vmselect2:8481/
- http://vmselect3:8481/
retry_status_codes: [500, 502]
```
- Spreading select queries among multiple availability zones (AZs) with identical data. For example, the following config spreads select queries
among 3 AZs. Requests are re-tried if some AZs are temporarily unavailable or if some `vmstorage` nodes in some AZs are temporarily unavailable.
`vmauth` adds `deny_partial_response=1` query arg to all the queries in order to guarantee to get full response from every AZ.
See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-availability) for details.
```yml
unauthorized_user:
url_prefix:
- https://vmselect-az1/?deny_partial_response=1
- https://vmselect-az2/?deny_partial_response=1
- https://vmselect-az3/?deny_partial_response=1
retry_status_codes: [500, 502, 503]
```
Load balancig can also be configured independently per each user and per each `url_map` entry.
See [auth config docs](#auth-config) for more details.
## Concurrency limiting ## Concurrency limiting
@ -117,11 +150,16 @@ users:
# Requests with the 'Authorization: Bearer YYY' header are proxied to http://localhost:8428 , # Requests with the 'Authorization: Bearer YYY' header are proxied to http://localhost:8428 ,
# The `X-Scope-OrgID: foobar` http header is added to every proxied request. # The `X-Scope-OrgID: foobar` http header is added to every proxied request.
# The `X-Server-Hostname` http header is removed from the proxied response.
# For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query # For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query
- bearer_token: "YYY" - bearer_token: "YYY"
url_prefix: "http://localhost:8428" url_prefix: "http://localhost:8428"
# extra headers to add to the request or remove from the request (if header value is empty)
headers: headers:
- "X-Scope-OrgID: foobar" - "X-Scope-OrgID: foobar"
# extra headers to add to the response or remove from the response (if header value is empty)
response_headers:
- "X-Server-Hostname:" # empty value means the header will be removed from the response
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password) # All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
# are proxied to http://localhost:8428 . # are proxied to http://localhost:8428 .
@ -172,9 +210,11 @@ users:
# - http://vmselect2:8481/select/42/prometheus # - http://vmselect2:8481/select/42/prometheus
# For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query # For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query
# or to http://vmselect2:8480/select/42/prometheus/api/v1/query . # or to http://vmselect2:8480/select/42/prometheus/api/v1/query .
# Requests are re-tried at other url_prefix backends if response status codes match 500 or 502.
# #
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write . # - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write .
# The "X-Scope-OrgID: abc" http header is added to these requests. # The "X-Scope-OrgID: abc" http header is added to these requests.
# The "X-Server-Hostname" http header is removed from the proxied response.
# #
# Request which do not match `src_paths` from the `url_map` are proxied to the urls from `default_url` # Request which do not match `src_paths` from the `url_map` are proxied to the urls from `default_url`
# in a round-robin manner. The original request path is passed in `request_path` query arg. # in a round-robin manner. The original request path is passed in `request_path` query arg.
@ -190,10 +230,13 @@ users:
url_prefix: url_prefix:
- "http://vmselect1:8481/select/42/prometheus" - "http://vmselect1:8481/select/42/prometheus"
- "http://vmselect2:8481/select/42/prometheus" - "http://vmselect2:8481/select/42/prometheus"
retry_status_codes: [500, 502]
- src_paths: ["/api/v1/write"] - src_paths: ["/api/v1/write"]
url_prefix: "http://vminsert:8480/insert/42/prometheus" url_prefix: "http://vminsert:8480/insert/42/prometheus"
headers: headers:
- "X-Scope-OrgID: abc" - "X-Scope-OrgID: abc"
response_headers:
- "X-Server-Hostname:" # empty value means the header will be removed from the response
ip_filters: ip_filters:
deny_list: [127.0.0.1] deny_list: [127.0.0.1]
default_url: default_url:
@ -201,16 +244,14 @@ users:
- "http://default2:8888/unsupported_url_handler" - "http://default2:8888/unsupported_url_handler"
# Requests without Authorization header are routed according to `unauthorized_user` section. # Requests without Authorization header are routed according to `unauthorized_user` section.
# Requests are routed in round-robin fashion between `url_prefix` backends.
# The deny_partial_response query arg is added to all the routed requests.
# The requests are re-tried if url_prefix backends send 500 or 503 response status codes.
unauthorized_user: unauthorized_user:
url_map: url_prefix:
- src_paths: - http://vmselect-az1/?deny_partial_response=1
- /api/v1/query - http://vmselect-az2/?deny_partial_response=1
- /api/v1/query_range retry_status_codes: [503, 500]
url_prefix:
- http://vmselect1:8481/select/0/prometheus
- http://vmselect2:8481/select/0/prometheus
ip_filters:
allow_list: [8.8.8.8]
ip_filters: ip_filters:
allow_list: ["1.2.3.0/24", "127.0.0.1"] allow_list: ["1.2.3.0/24", "127.0.0.1"]
@ -221,6 +262,9 @@ ip_filters:
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values. The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
This may be useful for passing secrets to the config. This may be useful for passing secrets to the config.
Please note, vmauth doesn't follow redirects. If destination redirects request to a new location, make sure this
location is supported in vmauth `url_map` config.
## Security ## Security
It is expected that all the backend services protected by `vmauth` are located in an isolated private network, so they can be accessed by external users only via `vmauth`. It is expected that all the backend services protected by `vmauth` are located in an isolated private network, so they can be accessed by external users only via `vmauth`.
@ -240,11 +284,11 @@ Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termi
It is recommended protecting the following endpoints with authKeys: It is recommended protecting the following endpoints with authKeys:
* `/-/reload` with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload. * `/-/reload` with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload.
* `/flags` with `-flagsAuthkey` command-line flag, so unauthorized users couldn't get application command-line flags. * `/flags` with `-flagsAuthKey` command-line flag, so unauthorized users couldn't get application command-line flags.
* `/metrics` with `metricsAuthkey` command-line flag, so unauthorized users couldn't get access to [vmauth metrics](#monitoring). * `/metrics` with `-metricsAuthKey` command-line flag, so unauthorized users couldn't get access to [vmauth metrics](#monitoring).
* `/debug/pprof` with `pprofAuthKey` command-line flag, so unauthorized users couldn't get access to [profiling information](#profiling). * `/debug/pprof` with `-pprofAuthKey` command-line flag, so unauthorized users couldn't get access to [profiling information](#profiling).
`vmauth` also supports the ability to restict access by IP - see [these docs](#ip-filters). See also [concurrency limiting docs](#concurrency-limiting). `vmauth` also supports the ability to restrict access by IP - see [these docs](#ip-filters). See also [concurrency limiting docs](#concurrency-limiting).
## Monitoring ## Monitoring
@ -276,7 +320,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmauth` binary and puts it into the `bin` folder. It builds `vmauth` binary and puts it into the `bin` folder.
@ -350,7 +394,7 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-failTimeout duration -failTimeout duration
Sets a delay period for load balancing to skip a malfunctioning backend. (defaults 3s) Sets a delay period for load balancing to skip a malfunctioning backend. (defaults 3s)
-flagsAuthKey string -flagsAuthKey string
@ -383,6 +427,12 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int -internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500) The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-license.forceOffline
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-licenseFile string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-logInvalidAuthTokens -logInvalidAuthTokens
Whether to log requests with invalid auth tokens. Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page Whether to log requests with invalid auth tokens. Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
-loggerDisableTimestamps -loggerDisableTimestamps
@ -407,6 +457,9 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
The maximum number of concurrent requests vmauth can process. Other requests are rejected with '429 Too Many Requests' http status code. See also -maxConcurrentPerUserRequests and -maxIdleConnsPerBackend command-line options (default 1000) The maximum number of concurrent requests vmauth can process. Other requests are rejected with '429 Too Many Requests' http status code. See also -maxConcurrentPerUserRequests and -maxIdleConnsPerBackend command-line options (default 1000)
-maxIdleConnsPerBackend int -maxIdleConnsPerBackend int
The maximum number of idle connections vmauth can open per each backend host. See also -maxConcurrentRequests (default 100) The maximum number of idle connections vmauth can open per each backend host. See also -maxConcurrentRequests (default 100)
-maxRequestBodySizeToRetry size
The maximum request body size, which can be cached and re-tried at other backends. Bigger values may require more memory
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 16384)
-memory.allowedBytes size -memory.allowedBytes size
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from the OS page cache resulting in higher disk IO usage Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from the OS page cache resulting in higher disk IO usage
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0) Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)

View file

@ -37,15 +37,16 @@ type AuthConfig struct {
// UserInfo is user information read from authConfigPath // UserInfo is user information read from authConfigPath
type UserInfo struct { type UserInfo struct {
Name string `yaml:"name,omitempty"` Name string `yaml:"name,omitempty"`
BearerToken string `yaml:"bearer_token,omitempty"` BearerToken string `yaml:"bearer_token,omitempty"`
Username string `yaml:"username,omitempty"` Username string `yaml:"username,omitempty"`
Password string `yaml:"password,omitempty"` Password string `yaml:"password,omitempty"`
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"` URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
URLMaps []URLMap `yaml:"url_map,omitempty"` URLMaps []URLMap `yaml:"url_map,omitempty"`
Headers []Header `yaml:"headers,omitempty"` HeadersConf HeadersConf `yaml:",inline"`
MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"` MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"`
DefaultURL *URLPrefix `yaml:"default_url,omitempty"` DefaultURL *URLPrefix `yaml:"default_url,omitempty"`
RetryStatusCodes []int `yaml:"retry_status_codes,omitempty"`
concurrencyLimitCh chan struct{} concurrencyLimitCh chan struct{}
concurrencyLimitReached *metrics.Counter concurrencyLimitReached *metrics.Counter
@ -54,6 +55,12 @@ type UserInfo struct {
requestsDuration *metrics.Summary requestsDuration *metrics.Summary
} }
// HeadersConf represents config for request and response headers.
type HeadersConf struct {
RequestHeaders []Header `yaml:"headers,omitempty"`
ResponseHeaders []Header `yaml:"response_headers,omitempty"`
}
func (ui *UserInfo) beginConcurrencyLimit() error { func (ui *UserInfo) beginConcurrencyLimit() error {
select { select {
case ui.concurrencyLimitCh <- struct{}{}: case ui.concurrencyLimitCh <- struct{}{}:
@ -105,9 +112,10 @@ func (h *Header) MarshalYAML() (interface{}, error) {
// URLMap is a mapping from source paths to target urls. // URLMap is a mapping from source paths to target urls.
type URLMap struct { type URLMap struct {
SrcPaths []*SrcPath `yaml:"src_paths,omitempty"` SrcPaths []*SrcPath `yaml:"src_paths,omitempty"`
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"` URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
Headers []Header `yaml:"headers,omitempty"` HeadersConf HeadersConf `yaml:",inline"`
RetryStatusCodes []int `yaml:"retry_status_codes,omitempty"`
} }
// SrcPath represents an src path // SrcPath represents an src path

View file

@ -299,14 +299,16 @@ users:
"http://vminsert1/insert/0/prometheus", "http://vminsert1/insert/0/prometheus",
"http://vminsert2/insert/0/prometheus", "http://vminsert2/insert/0/prometheus",
}), }),
Headers: []Header{ HeadersConf: HeadersConf{
{ RequestHeaders: []Header{
Name: "foo", {
Value: "bar", Name: "foo",
}, Value: "bar",
{ },
Name: "xxx", {
Value: "y", Name: "xxx",
Value: "y",
},
}, },
}, },
}, },
@ -325,14 +327,16 @@ users:
"http://vminsert1/insert/0/prometheus", "http://vminsert1/insert/0/prometheus",
"http://vminsert2/insert/0/prometheus", "http://vminsert2/insert/0/prometheus",
}), }),
Headers: []Header{ HeadersConf: HeadersConf{
{ RequestHeaders: []Header{
Name: "foo", {
Value: "bar", Name: "foo",
}, Value: "bar",
{ },
Name: "xxx", {
Value: "y", Name: "xxx",
Value: "y",
},
}, },
}, },
}, },
@ -389,14 +393,16 @@ users:
"http://vminsert1/insert/0/prometheus", "http://vminsert1/insert/0/prometheus",
"http://vminsert2/insert/0/prometheus", "http://vminsert2/insert/0/prometheus",
}), }),
Headers: []Header{ HeadersConf: HeadersConf{
{ RequestHeaders: []Header{
Name: "foo", {
Value: "bar", Name: "foo",
}, Value: "bar",
{ },
Name: "xxx", {
Value: "y", Name: "xxx",
Value: "y",
},
}, },
}, },
}, },
@ -419,14 +425,16 @@ users:
"http://vminsert1/insert/0/prometheus", "http://vminsert1/insert/0/prometheus",
"http://vminsert2/insert/0/prometheus", "http://vminsert2/insert/0/prometheus",
}), }),
Headers: []Header{ HeadersConf: HeadersConf{
{ RequestHeaders: []Header{
Name: "foo", {
Value: "bar", Name: "foo",
}, Value: "bar",
{ },
Name: "xxx", {
Value: "y", Name: "xxx",
Value: "y",
},
}, },
}, },
}, },

View file

@ -12,11 +12,16 @@ users:
# Requests with the 'Authorization: Bearer YYY' header are proxied to http://localhost:8428 , # Requests with the 'Authorization: Bearer YYY' header are proxied to http://localhost:8428 ,
# The `X-Scope-OrgID: foobar` http header is added to every proxied request. # The `X-Scope-OrgID: foobar` http header is added to every proxied request.
# The `X-Server-Hostname:` http header is removed from the proxied response.
# For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query # For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query
- bearer_token: "YYY" - bearer_token: "YYY"
url_prefix: "http://localhost:8428" url_prefix: "http://localhost:8428"
# extra headers to add to the request or remove from the request (if header value is empty)
headers: headers:
- "X-Scope-OrgID: foobar" - "X-Scope-OrgID: foobar"
# extra headers to add to the response or remove from the response (if header value is empty)
response_headers:
- "X-Server-Hostname:" # empty value means the header will be removed from the response
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password) # All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
# are proxied to http://localhost:8428 . # are proxied to http://localhost:8428 .
@ -67,6 +72,7 @@ users:
# - http://vmselect2:8481/select/42/prometheus # - http://vmselect2:8481/select/42/prometheus
# For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query # For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query
# or to http://vmselect2:8480/select/42/prometheus/api/v1/query . # or to http://vmselect2:8480/select/42/prometheus/api/v1/query .
# Requests are re-tried at other url_prefix backends if response status codes match 500 or 502.
# #
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write . # - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write .
# The "X-Scope-OrgID: abc" http header is added to these requests. # The "X-Scope-OrgID: abc" http header is added to these requests.
@ -85,6 +91,7 @@ users:
url_prefix: url_prefix:
- "http://vmselect1:8481/select/42/prometheus" - "http://vmselect1:8481/select/42/prometheus"
- "http://vmselect2:8481/select/42/prometheus" - "http://vmselect2:8481/select/42/prometheus"
retry_status_codes: [500, 502]
- src_paths: ["/api/v1/write"] - src_paths: ["/api/v1/write"]
url_prefix: "http://vminsert:8480/insert/42/prometheus" url_prefix: "http://vminsert:8480/insert/42/prometheus"
headers: headers:
@ -94,11 +101,11 @@ users:
- "http://default2:8888/unsupported_url_handler" - "http://default2:8888/unsupported_url_handler"
# Requests without Authorization header are routed according to `unauthorized_user` section. # Requests without Authorization header are routed according to `unauthorized_user` section.
# Requests are routed in round-robin fashion between `url_prefix` backends.
# The deny_partial_response query arg is added to all the routed requests.
# The requests are re-tried if url_prefix backends send 500 or 503 response status codes.
unauthorized_user: unauthorized_user:
url_map: url_prefix:
- src_paths: - http://vmselect-az1/?deny_partial_response=1
- /api/v1/query - http://vmselect-az2/?deny_partial_response=1
- /api/v1/query_range retry_status_codes: [503, 500]
url_prefix:
- http://vmselect1:8481/select/0/prometheus
- http://vmselect2:8481/select/0/prometheus

View file

@ -39,18 +39,6 @@ users:
- "http://default1:8888/unsupported_url_handler" - "http://default1:8888/unsupported_url_handler"
- "http://default2:8888/unsupported_url_handler" - "http://default2:8888/unsupported_url_handler"
# Requests without Authorization header are routed according to `unauthorized_user` section.
unauthorized_user:
url_map:
- src_paths:
- /api/v1/query
- /api/v1/query_range
url_prefix:
- http://vmselect1:8481/select/0/prometheus
- http://vmselect2:8481/select/0/prometheus
ip_filters:
allow_list: [8.8.8.8]
ip_filters: ip_filters:
allow_list: ["1.2.3.0/24", "127.0.0.1"] allow_list: ["1.2.3.0/24", "127.0.0.1"]
deny_list: deny_list:

View file

@ -1,6 +1,8 @@
package main package main
import ( import (
"context"
"errors"
"flag" "flag"
"fmt" "fmt"
"io" "io"
@ -41,7 +43,9 @@ var (
reloadAuthKey = flag.String("reloadAuthKey", "", "Auth key for /-/reload http endpoint. It must be passed as authKey=...") reloadAuthKey = flag.String("reloadAuthKey", "", "Auth key for /-/reload http endpoint. It must be passed as authKey=...")
logInvalidAuthTokens = flag.Bool("logInvalidAuthTokens", false, "Whether to log requests with invalid auth tokens. "+ logInvalidAuthTokens = flag.Bool("logInvalidAuthTokens", false, "Whether to log requests with invalid auth tokens. "+
`Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page`) `Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page`)
failTimeout = flag.Duration("failTimeout", 3*time.Second, "Sets a delay period for load balancing to skip a malfunctioning backend.") failTimeout = flag.Duration("failTimeout", 3*time.Second, "Sets a delay period for load balancing to skip a malfunctioning backend")
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size, which can be cached and re-tried at other backends. "+
"Bigger values may require more memory")
) )
func main() { func main() {
@ -151,7 +155,7 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) { func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
u := normalizeURL(r.URL) u := normalizeURL(r.URL)
up, headers := ui.getURLPrefixAndHeaders(u) up, hc, retryStatusCodes := ui.getURLPrefixAndHeaders(u)
isDefault := false isDefault := false
if up == nil { if up == nil {
missingRouteRequests.Inc() missingRouteRequests.Inc()
@ -159,14 +163,15 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
httpserver.Errorf(w, r, "missing route for %q", u.String()) httpserver.Errorf(w, r, "missing route for %q", u.String())
return return
} }
up, headers = ui.DefaultURL, ui.Headers up, hc, retryStatusCodes = ui.DefaultURL, ui.HeadersConf, ui.RetryStatusCodes
isDefault = true isDefault = true
} }
r.Body = &readTrackingBody{
r: r.Body,
}
maxAttempts := up.getBackendsCount() maxAttempts := up.getBackendsCount()
if maxAttempts > 1 {
r.Body = &readTrackingBody{
r: r.Body,
}
}
for i := 0; i < maxAttempts; i++ { for i := 0; i < maxAttempts; i++ {
bu := up.getLeastLoadedBackendURL() bu := up.getLeastLoadedBackendURL()
targetURL := bu.url targetURL := bu.url
@ -178,7 +183,7 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
} else { // Update path for regular routes. } else { // Update path for regular routes.
targetURL = mergeURLs(targetURL, u) targetURL = mergeURLs(targetURL, u)
} }
ok := tryProcessingRequest(w, r, targetURL, headers) ok := tryProcessingRequest(w, r, targetURL, hc, retryStatusCodes)
bu.put() bu.put()
if ok { if ok {
return return
@ -192,20 +197,24 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
httpserver.Errorf(w, r, "%s", err) httpserver.Errorf(w, r, "%s", err)
} }
func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, headers []Header) bool { func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, hc HeadersConf, retryStatusCodes []int) bool {
// This code has been copied from net/http/httputil/reverseproxy.go // This code has been copied from net/http/httputil/reverseproxy.go
req := sanitizeRequestHeaders(r) req := sanitizeRequestHeaders(r)
req.URL = targetURL req.URL = targetURL
for _, h := range headers { updateHeadersByConfig(req.Header, hc.RequestHeaders)
req.Header.Set(h.Name, h.Value)
}
transportOnce.Do(transportInit) transportOnce.Do(transportInit)
res, err := transport.RoundTrip(req) res, err := transport.RoundTrip(req)
rtb, rtbOK := req.Body.(*readTrackingBody)
if err != nil { if err != nil {
rtb := req.Body.(*readTrackingBody) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
if rtb.readStarted { // Do not retry canceled or timed out requests
// Request body has been already read, so it is impossible to retry the request. remoteAddr := httpserver.GetQuotedRemoteAddr(r)
// Return the error to the client then. requestURI := httpserver.GetRequestURI(r)
logger.Warnf("remoteAddr: %s; requestURI: %s; error when proxying response body from %s: %s", remoteAddr, requestURI, targetURL, err)
return true
}
if !rtbOK || !rtb.canRetry() {
// Request body cannot be re-sent to another backend. Return the error to the client then.
err = &httpserver.ErrorWithStatusCode{ err = &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("cannot proxy the request to %q: %w", targetURL, err), Err: fmt.Errorf("cannot proxy the request to %q: %w", targetURL, err),
StatusCode: http.StatusServiceUnavailable, StatusCode: http.StatusServiceUnavailable,
@ -216,12 +225,23 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
// Retry the request if its body wasn't read yet. This usually means that the backend isn't reachable. // Retry the request if its body wasn't read yet. This usually means that the backend isn't reachable.
remoteAddr := httpserver.GetQuotedRemoteAddr(r) remoteAddr := httpserver.GetQuotedRemoteAddr(r)
// NOTE: do not use httpserver.GetRequestURI // NOTE: do not use httpserver.GetRequestURI
// it explicitly reads request body and fails retries. // it explicitly reads request body, which may fail retries.
logger.Warnf("remoteAddr: %s; requestURI: %s; error when proxying the request to %q: %s", remoteAddr, req.URL, targetURL, err) logger.Warnf("remoteAddr: %s; requestURI: %s; retrying the request to %s because of response error: %s", remoteAddr, req.URL, targetURL, err)
return false
}
if (rtbOK && rtb.canRetry()) && hasInt(retryStatusCodes, res.StatusCode) {
// Retry requests at other backends if it matches retryStatusCodes.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4893
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
// NOTE: do not use httpserver.GetRequestURI
// it explicitly reads request body, which may fail retries.
logger.Warnf("remoteAddr: %s; requestURI: %s; retrying the request to %s because response status code=%d belongs to retry_status_codes=%d",
remoteAddr, req.URL, targetURL, res.StatusCode, retryStatusCodes)
return false return false
} }
removeHopHeaders(res.Header) removeHopHeaders(res.Header)
copyHeader(w.Header(), res.Header) copyHeader(w.Header(), res.Header)
updateHeadersByConfig(w.Header(), hc.ResponseHeaders)
w.WriteHeader(res.StatusCode) w.WriteHeader(res.StatusCode)
copyBuf := copyBufPool.Get() copyBuf := copyBufPool.Get()
@ -237,6 +257,15 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
return true return true
} }
func hasInt(a []int, n int) bool {
for _, x := range a {
if x == n {
return true
}
}
return false
}
var copyBufPool bytesutil.ByteBufferPool var copyBufPool bytesutil.ByteBufferPool
func copyHeader(dst, src http.Header) { func copyHeader(dst, src http.Header) {
@ -247,6 +276,16 @@ func copyHeader(dst, src http.Header) {
} }
} }
func updateHeadersByConfig(headers http.Header, config []Header) {
for _, h := range config {
if h.Value == "" {
headers.Del(h.Name)
} else {
headers.Set(h.Name, h.Value)
}
}
}
func sanitizeRequestHeaders(r *http.Request) *http.Request { func sanitizeRequestHeaders(r *http.Request) *http.Request {
// This code has been copied from net/http/httputil/reverseproxy.go // This code has been copied from net/http/httputil/reverseproxy.go
req := r.Clone(r.Context()) req := r.Clone(r.Context())
@ -361,26 +400,86 @@ func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err err
} }
type readTrackingBody struct { type readTrackingBody struct {
r io.ReadCloser // r contains reader for initial data reading
readStarted bool r io.ReadCloser
// buf is a buffer for data read from r. Buf size is limited by maxRequestBodySizeToRetry.
// If more than maxRequestBodySizeToRetry is read from r, then cannotRetry is set to true.
buf []byte
// cannotRetry is set to true when more than maxRequestBodySizeToRetry are read from r.
// In this case the read data cannot fit buf, so it cannot be re-read from buf.
cannotRetry bool
// bufComplete is set to true when buf contains complete request body read from r.
bufComplete bool
// needReadBuf is set to true when Read() must be performed from buf instead of r.
needReadBuf bool
// offset is an offset at buf for the next data read if needReadBuf is set to true.
offset int
} }
// Read implements io.Reader interface // Read implements io.Reader interface
// tracks body reading requests // tracks body reading requests
func (rtb *readTrackingBody) Read(p []byte) (int, error) { func (rtb *readTrackingBody) Read(p []byte) (int, error) {
if len(p) > 0 { if rtb.needReadBuf {
rtb.readStarted = true if rtb.offset >= len(rtb.buf) {
return 0, io.EOF
}
n := copy(p, rtb.buf[rtb.offset:])
rtb.offset += n
return n, nil
} }
return rtb.r.Read(p)
if rtb.r == nil {
return 0, fmt.Errorf("cannot read data after closing the reader")
}
n, err := rtb.r.Read(p)
if rtb.cannotRetry {
return n, err
}
if len(rtb.buf)+n > maxRequestBodySizeToRetry.IntN() {
rtb.cannotRetry = true
return n, err
}
rtb.buf = append(rtb.buf, p[:n]...)
if err == io.EOF {
rtb.bufComplete = true
}
return n, err
}
func (rtb *readTrackingBody) canRetry() bool {
if rtb.cannotRetry {
return false
}
if len(rtb.buf) > 0 && !rtb.needReadBuf {
return false
}
return true
} }
// Close implements io.Closer interface. // Close implements io.Closer interface.
func (rtb *readTrackingBody) Close() error { func (rtb *readTrackingBody) Close() error {
// Close rtb.r only if at least a single Read call was performed. rtb.offset = 0
// http.Roundtrip performs body.Close call even without any Read calls if rtb.bufComplete {
// so this hack allows us to reuse request body rtb.needReadBuf = true
if rtb.readStarted {
return rtb.r.Close()
} }
// Close rtb.r only if the request body is completely read or if it is too big.
// http.Roundtrip performs body.Close call even without any Read calls,
// so this hack allows us to reuse request body.
if rtb.bufComplete || rtb.cannotRetry {
if rtb.r == nil {
return nil
}
err := rtb.r.Close()
rtb.r = nil
return err
}
return nil return nil
} }

90
app/vmauth/main_test.go Normal file
View file

@ -0,0 +1,90 @@
package main
import (
"bytes"
"io"
"testing"
)
func TestReadTrackingBodyRetrySuccess(t *testing.T) {
f := func(s string) {
t.Helper()
rtb := &readTrackingBody{
r: io.NopCloser(bytes.NewBufferString(s)),
}
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true")
}
for i := 0; i < 5; i++ {
data, err := io.ReadAll(rtb)
if err != nil {
t.Fatalf("unexpected error when reading all the data at iteration %d: %s", i, err)
}
if string(data) != s {
t.Fatalf("unexpected data read at iteration %d\ngot\n%s\nwant\n%s", i, data, s)
}
if err := rtb.Close(); err != nil {
t.Fatalf("unexpected error when closing readTrackingBody at iteration %d: %s", i, err)
}
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true at iteration %d", i)
}
}
}
f("")
f("foo")
f("foobar")
f(newTestString(maxRequestBodySizeToRetry.IntN()))
}
func TestReadTrackingBodyRetryFailure(t *testing.T) {
f := func(s string) {
t.Helper()
rtb := &readTrackingBody{
r: io.NopCloser(bytes.NewBufferString(s)),
}
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true")
}
buf := make([]byte, 1)
n, err := rtb.Read(buf)
if err != nil {
t.Fatalf("unexpected error when reading a single byte: %s", err)
}
if n != 1 {
t.Fatalf("unexpected number of bytes read; got %d; want 1", n)
}
if rtb.canRetry() {
t.Fatalf("canRetry() must return false")
}
data, err := io.ReadAll(rtb)
if err != nil {
t.Fatalf("unexpected error when reading all the data: %s", err)
}
if string(buf)+string(data) != s {
t.Fatalf("unexpected data read\ngot\n%s\nwant\n%s", string(buf)+string(data), s)
}
if err := rtb.Close(); err != nil {
t.Fatalf("unexpected error when closing readTrackingBody: %s", err)
}
if rtb.canRetry() {
t.Fatalf("canRetry() must return false")
}
data, err = io.ReadAll(rtb)
if err == nil {
t.Fatalf("expecting non-nil error")
}
if len(data) != 0 {
t.Fatalf("unexpected non-empty data read: %q", data)
}
}
f(newTestString(maxRequestBodySizeToRetry.IntN() + 1))
f(newTestString(2 * maxRequestBodySizeToRetry.IntN()))
}
func newTestString(sLen int) string {
return string(make([]byte, sLen))
}

View file

@ -8,6 +8,9 @@ import (
func mergeURLs(uiURL, requestURI *url.URL) *url.URL { func mergeURLs(uiURL, requestURI *url.URL) *url.URL {
targetURL := *uiURL targetURL := *uiURL
if strings.HasPrefix(requestURI.Path, "/") {
targetURL.Path = strings.TrimSuffix(targetURL.Path, "/")
}
targetURL.Path += requestURI.Path targetURL.Path += requestURI.Path
requestParams := requestURI.Query() requestParams := requestURI.Query()
// fast path // fast path
@ -29,18 +32,18 @@ func mergeURLs(uiURL, requestURI *url.URL) *url.URL {
return &targetURL return &targetURL
} }
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL) (*URLPrefix, []Header) { func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL) (*URLPrefix, HeadersConf, []int) {
for _, e := range ui.URLMaps { for _, e := range ui.URLMaps {
for _, sp := range e.SrcPaths { for _, sp := range e.SrcPaths {
if sp.match(u.Path) { if sp.match(u.Path) {
return e.URLPrefix, e.Headers return e.URLPrefix, e.HeadersConf, e.RetryStatusCodes
} }
} }
} }
if ui.URLPrefix != nil { if ui.URLPrefix != nil {
return ui.URLPrefix, ui.Headers return ui.URLPrefix, ui.HeadersConf, ui.RetryStatusCodes
} }
return nil, nil return nil, HeadersConf{}, nil
} }
func normalizeURL(uOrig *url.URL) *url.URL { func normalizeURL(uOrig *url.URL) *url.URL {

View file

@ -3,18 +3,19 @@ package main
import ( import (
"fmt" "fmt"
"net/url" "net/url"
"reflect"
"testing" "testing"
) )
func TestCreateTargetURLSuccess(t *testing.T) { func TestCreateTargetURLSuccess(t *testing.T) {
f := func(ui *UserInfo, requestURI, expectedTarget, expectedHeaders string) { f := func(ui *UserInfo, requestURI, expectedTarget, expectedRequestHeaders, expectedResponseHeaders string, expectedRetryStatusCodes []int) {
t.Helper() t.Helper()
u, err := url.Parse(requestURI) u, err := url.Parse(requestURI)
if err != nil { if err != nil {
t.Fatalf("cannot parse %q: %s", requestURI, err) t.Fatalf("cannot parse %q: %s", requestURI, err)
} }
u = normalizeURL(u) u = normalizeURL(u)
up, headers := ui.getURLPrefixAndHeaders(u) up, hc, retryStatusCodes := ui.getURLPrefixAndHeaders(u)
if up == nil { if up == nil {
t.Fatalf("cannot determie backend: %s", err) t.Fatalf("cannot determie backend: %s", err)
} }
@ -24,37 +25,43 @@ func TestCreateTargetURLSuccess(t *testing.T) {
if target.String() != expectedTarget { if target.String() != expectedTarget {
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget) t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
} }
headersStr := fmt.Sprintf("%q", headers) headersStr := fmt.Sprintf("%q", hc.RequestHeaders)
if headersStr != expectedHeaders { if headersStr != expectedRequestHeaders {
t.Fatalf("unexpected headers; got %s; want %s", headersStr, expectedHeaders) t.Fatalf("unexpected request headers; got %s; want %s", headersStr, expectedRequestHeaders)
}
if !reflect.DeepEqual(retryStatusCodes, expectedRetryStatusCodes) {
t.Fatalf("unexpected retryStatusCodes; got %d; want %d", retryStatusCodes, expectedRetryStatusCodes)
} }
} }
// Simple routing with `url_prefix` // Simple routing with `url_prefix`
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar"), URLPrefix: mustParseURL("http://foo.bar"),
}, "", "http://foo.bar/.", "[]") }, "", "http://foo.bar/.", "[]", "[]", nil)
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar"), URLPrefix: mustParseURL("http://foo.bar"),
Headers: []Header{{ HeadersConf: HeadersConf{
Name: "bb", RequestHeaders: []Header{{
Value: "aaa", Name: "bb",
}}, Value: "aaa",
}, "/", "http://foo.bar", `[{"bb" "aaa"}]`) }},
},
RetryStatusCodes: []int{503, 501},
}, "/", "http://foo.bar", `[{"bb" "aaa"}]`, `[]`, []int{503, 501})
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar/federate"), URLPrefix: mustParseURL("http://foo.bar/federate"),
}, "/", "http://foo.bar/federate", "[]") }, "/", "http://foo.bar/federate", "[]", "[]", nil)
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar"), URLPrefix: mustParseURL("http://foo.bar"),
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "[]") }, "a/b?c=d", "http://foo.bar/a/b?c=d", "[]", "[]", nil)
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("https://sss:3894/x/y"), URLPrefix: mustParseURL("https://sss:3894/x/y"),
}, "/z", "https://sss:3894/x/y/z", "[]") }, "/z", "https://sss:3894/x/y/z", "[]", "[]", nil)
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("https://sss:3894/x/y"), URLPrefix: mustParseURL("https://sss:3894/x/y"),
}, "/../../aaa", "https://sss:3894/x/y/aaa", "[]") }, "/../../aaa", "https://sss:3894/x/y/aaa", "[]", "[]", nil)
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("https://sss:3894/x/y"), URLPrefix: mustParseURL("https://sss:3894/x/y"),
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "[]") }, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "[]", "[]", nil)
// Complex routing with `url_map` // Complex routing with `url_map`
ui := &UserInfo{ ui := &UserInfo{
@ -62,16 +69,25 @@ func TestCreateTargetURLSuccess(t *testing.T) {
{ {
SrcPaths: getSrcPaths([]string{"/api/v1/query"}), SrcPaths: getSrcPaths([]string{"/api/v1/query"}),
URLPrefix: mustParseURL("http://vmselect/0/prometheus"), URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
Headers: []Header{ HeadersConf: HeadersConf{
{ RequestHeaders: []Header{
Name: "xx", {
Value: "aa", Name: "xx",
Value: "aa",
},
{
Name: "yy",
Value: "asdf",
},
}, },
{ ResponseHeaders: []Header{
Name: "yy", {
Value: "asdf", Name: "qwe",
Value: "rty",
},
}, },
}, },
RetryStatusCodes: []int{503, 500, 501},
}, },
{ {
SrcPaths: getSrcPaths([]string{"/api/v1/write"}), SrcPaths: getSrcPaths([]string{"/api/v1/write"}),
@ -79,14 +95,21 @@ func TestCreateTargetURLSuccess(t *testing.T) {
}, },
}, },
URLPrefix: mustParseURL("http://default-server"), URLPrefix: mustParseURL("http://default-server"),
Headers: []Header{{ HeadersConf: HeadersConf{
Name: "bb", RequestHeaders: []Header{{
Value: "aaa", Name: "bb",
}}, Value: "aaa",
}},
ResponseHeaders: []Header{{
Name: "x",
Value: "y",
}},
},
RetryStatusCodes: []int{502},
} }
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", `[{"xx" "aa"} {"yy" "asdf"}]`) f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", `[{"xx" "aa"} {"yy" "asdf"}]`, `[{"qwe" "rty"}]`, []int{503, 500, 501})
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]") f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", nil)
f(ui, "/api/v1/query_range", "http://default-server/api/v1/query_range", `[{"bb" "aaa"}]`) f(ui, "/api/v1/query_range", "http://default-server/api/v1/query_range", `[{"bb" "aaa"}]`, `[{"x" "y"}]`, []int{502})
// Complex routing regexp paths in `url_map` // Complex routing regexp paths in `url_map`
ui = &UserInfo{ ui = &UserInfo{
@ -102,18 +125,17 @@ func TestCreateTargetURLSuccess(t *testing.T) {
}, },
URLPrefix: mustParseURL("http://default-server"), URLPrefix: mustParseURL("http://default-server"),
} }
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "[]") f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "[]", "[]", nil)
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "[]") f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "[]", "[]", nil)
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "[]") f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "[]", "[]", nil)
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]") f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", nil)
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "[]") f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "[]", "[]", nil)
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=dev"), URLPrefix: mustParseURL("http://foo.bar?extra_label=team=dev"),
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "[]") }, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "[]", "[]", nil)
f(&UserInfo{ f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"), URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"),
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "[]") }, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "[]", "[]", nil)
} }
func TestCreateTargetURLFailure(t *testing.T) { func TestCreateTargetURLFailure(t *testing.T) {
@ -124,12 +146,18 @@ func TestCreateTargetURLFailure(t *testing.T) {
t.Fatalf("cannot parse %q: %s", requestURI, err) t.Fatalf("cannot parse %q: %s", requestURI, err)
} }
u = normalizeURL(u) u = normalizeURL(u)
up, headers := ui.getURLPrefixAndHeaders(u) up, hc, retryStatusCodes := ui.getURLPrefixAndHeaders(u)
if up != nil { if up != nil {
t.Fatalf("unexpected non-empty up=%#v", up) t.Fatalf("unexpected non-empty up=%#v", up)
} }
if headers != nil { if hc.RequestHeaders != nil {
t.Fatalf("unexpected non-empty headers=%q", headers) t.Fatalf("unexpected non-empty request headers=%q", hc.RequestHeaders)
}
if hc.ResponseHeaders != nil {
t.Fatalf("unexpected non-empty response headers=%q", hc.ResponseHeaders)
}
if retryStatusCodes != nil {
t.Fatalf("unexpected non-empty retryStatusCodes=%d", retryStatusCodes)
} }
} }
f(&UserInfo{}, "/foo/bar") f(&UserInfo{}, "/foo/bar")

View file

@ -89,6 +89,23 @@ Do not forget to remove old backups when they are no longer needed in order to s
See also [vmbackupmanager tool](https://docs.victoriametrics.com/vmbackupmanager.html) for automating smart backups. See also [vmbackupmanager tool](https://docs.victoriametrics.com/vmbackupmanager.html) for automating smart backups.
### Server-side copy of the existing backup
Sometimes it is needed to make server-side copy of the existing backup. This can be done by specifying the source backup path via `-origin` command-line flag,
while the destination path for backup copy must be specified via `-dst` command-line flag. For example, the following command copies backup
from `gs://bucket/foo` to `gs://bucket/bar`:
```console
./vmbackup -origin=gs://bucket/foo -dst=gs://bucket/bar
```
The `-origin` and `-dst` must point to the same object storage bucket or to the same filesystem.
The server-side backup copy is usually performed at much faster speed comparing to the usual backup, since backup data isn't transferred
between the remote storage and locally running `vmbackup` tool.
If the `-dst` already contains some data, then its' contents is synced with the `-origin` data. This allows making incremental server-side copies of backups.
## How does it work? ## How does it work?
The backup algorithm is the following: The backup algorithm is the following:
@ -196,7 +213,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-flagsAuthKey string -flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap -fs.disableMmap
@ -225,6 +242,12 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int -internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500) The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-license.forceOffline
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-licenseFile string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-loggerDisableTimestamps -loggerDisableTimestamps
Whether to disable writing timestamps in logs Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int -loggerErrorsPerSecondLimit int
@ -301,7 +324,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmbackup` binary and puts it into the `bin` folder. It builds `vmbackup` binary and puts it into the `bin` folder.

View file

@ -92,8 +92,6 @@ func main() {
logger.Fatalf("cannot delete snapshot: %s", err) logger.Fatalf("cannot delete snapshot: %s", err)
} }
} }
} else if len(*snapshotName) == 0 {
logger.Fatalf("`-snapshotName` or `-snapshot.createURL` must be provided")
} }
go httpserver.Serve(*httpListenAddr, false, nil) go httpserver.Serve(*httpListenAddr, false, nil)
@ -113,34 +111,48 @@ func main() {
} }
func makeBackup() error { func makeBackup() error {
if err := snapshot.Validate(*snapshotName); err != nil {
return fmt.Errorf("invalid -snapshotName=%q: %s", *snapshotName, err)
}
srcFS, err := newSrcFS()
if err != nil {
return err
}
dstFS, err := newDstFS() dstFS, err := newDstFS()
if err != nil { if err != nil {
return err return err
} }
originFS, err := newOriginFS() if *snapshotName == "" {
if err != nil { // Make server-side copy from -origin to -dst
return err originFS, err := newRemoteOriginFS()
if err != nil {
return err
}
a := &actions.RemoteBackupCopy{
Concurrency: *concurrency,
Src: originFS,
Dst: dstFS,
}
if err := a.Run(); err != nil {
return err
}
originFS.MustStop()
} else {
// Make backup from srcFS to -dst
srcFS, err := newSrcFS()
if err != nil {
return err
}
originFS, err := newOriginFS()
if err != nil {
return err
}
a := &actions.Backup{
Concurrency: *concurrency,
Src: srcFS,
Dst: dstFS,
Origin: originFS,
}
if err := a.Run(); err != nil {
return err
}
srcFS.MustStop()
originFS.MustStop()
} }
a := &actions.Backup{
Concurrency: *concurrency,
Src: srcFS,
Dst: dstFS,
Origin: originFS,
}
if err := a.Run(); err != nil {
return err
}
srcFS.MustStop()
dstFS.MustStop() dstFS.MustStop()
originFS.MustStop()
return nil return nil
} }
@ -155,6 +167,9 @@ See the docs at https://docs.victoriametrics.com/vmbackup.html .
} }
func newSrcFS() (*fslocal.FS, error) { func newSrcFS() (*fslocal.FS, error) {
if err := snapshot.Validate(*snapshotName); err != nil {
return nil, fmt.Errorf("invalid -snapshotName=%q: %s", *snapshotName, err)
}
snapshotPath := filepath.Join(*storageDataPath, "snapshots", *snapshotName) snapshotPath := filepath.Join(*storageDataPath, "snapshots", *snapshotName)
// Verify the snapshot exists. // Verify the snapshot exists.
@ -205,7 +220,20 @@ func hasFilepathPrefix(path, prefix string) bool {
if err != nil { if err != nil {
return false return false
} }
return strings.HasPrefix(pathAbs, prefixAbs) if prefixAbs == pathAbs {
return true
}
rel, err := filepath.Rel(prefixAbs, pathAbs)
if err != nil {
// if paths can't be related - they don't match
return false
}
if i := strings.Index(rel, "."); i == 0 {
// if path can be related only with . as first char - they still don't match
return false
}
// if paths are related - it is a match
return true
} }
func newOriginFS() (common.OriginFS, error) { func newOriginFS() (common.OriginFS, error) {
@ -218,3 +246,14 @@ func newOriginFS() (common.OriginFS, error) {
} }
return fs, nil return fs, nil
} }
func newRemoteOriginFS() (common.RemoteFS, error) {
if len(*origin) == 0 {
return nil, fmt.Errorf("-origin cannot be empty when -snapshotName and -snapshot.createURL aren't set")
}
fs, err := actions.NewRemoteFS(*origin)
if err != nil {
return nil, fmt.Errorf("cannot parse `-origin`=%q: %w", *origin, err)
}
return fs, nil
}

View file

@ -26,4 +26,9 @@ func TestHasFilepathPrefix(t *testing.T) {
f("fs://"+pwd+"/foo", pwd+"/foo/bar", false) f("fs://"+pwd+"/foo", pwd+"/foo/bar", false)
f("fs://"+pwd+"/foo/bar", pwd+"/foo", true) f("fs://"+pwd+"/foo/bar", pwd+"/foo", true)
f("fs://"+pwd+"/foo", pwd+"/bar", false) f("fs://"+pwd+"/foo", pwd+"/bar", false)
f("fs:///data1", "/data", false)
f("fs:///data", "/data1", false)
f("fs:///data", "/data/foo", false)
f("fs:///data/foo", "/data", true)
f("fs:///data/foo/", "/data/", true)
} }

View file

@ -429,7 +429,7 @@ command-line flags:
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-flagsAuthKey string -flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap -fs.disableMmap
@ -466,6 +466,12 @@ command-line flags:
Keep last N monthly backups. If 0 is specified next retention cycle removes all backups for given time period. (default -1) Keep last N monthly backups. If 0 is specified next retention cycle removes all backups for given time period. (default -1)
-keepLastWeekly int -keepLastWeekly int
Keep last N weekly backups. If 0 is specified next retention cycle removes all backups for given time period. (default -1) Keep last N weekly backups. If 0 is specified next retention cycle removes all backups for given time period. (default -1)
-license string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-license.forceOffline
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-licenseFile string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-loggerDisableTimestamps -loggerDisableTimestamps
Whether to disable writing timestamps in logs Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int -loggerErrorsPerSecondLimit int

View file

@ -11,6 +11,7 @@ Features:
- migrate data from [Mimir](#migrating-data-from-mimir) to VictoriaMetrics - migrate data from [Mimir](#migrating-data-from-mimir) to VictoriaMetrics
- migrate data from [InfluxDB](#migrating-data-from-influxdb-1x) to VictoriaMetrics - migrate data from [InfluxDB](#migrating-data-from-influxdb-1x) to VictoriaMetrics
- migrate data from [OpenTSDB](#migrating-data-from-opentsdb) to VictoriaMetrics - migrate data from [OpenTSDB](#migrating-data-from-opentsdb) to VictoriaMetrics
- migrate data from [Promscale](#migrating-data-from-promscale)
- migrate data between [VictoriaMetrics](#migrating-data-from-victoriametrics) single or cluster version. - migrate data between [VictoriaMetrics](#migrating-data-from-victoriametrics) single or cluster version.
- migrate data by [Prometheus remote read protocol](#migrating-data-by-remote-read-protocol) to VictoriaMetrics - migrate data by [Prometheus remote read protocol](#migrating-data-by-remote-read-protocol) to VictoriaMetrics
- [verify](#verifying-exported-blocks-from-victoriametrics) exported blocks from VictoriaMetrics single or cluster version. - [verify](#verifying-exported-blocks-from-victoriametrics) exported blocks from VictoriaMetrics single or cluster version.
@ -306,6 +307,46 @@ Please see more about time filtering [here](https://docs.influxdata.com/influxdb
Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)). Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)).
You may find useful a 3rd party solution for this - <https://github.com/jonppe/influx_to_victoriametrics>. You may find useful a 3rd party solution for this - <https://github.com/jonppe/influx_to_victoriametrics>.
## Migrating data from Promscale
[Promscale](https://github.com/timescale/promscale) supports [Prometheus Remote Read API](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/).
To migrate historical data from Promscale to VictoriaMetrics we recommend using `vmctl`
in [remote-read](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol) mode.
See the example of migration command below:
```console
./vmctl remote-read --remote-read-src-addr=http://<promscale>:9201/read \
--remote-read-step-interval=day \
--remote-read-use-stream=false \ # promscale doesn't support streaming
--vm-addr=http://<victoriametrics>:8428 \
--remote-read-filter-time-start=2023-08-21T00:00:00Z \
--remote-read-disable-path-append=true # promscale has custom remote read API HTTP path
Selected time range "2023-08-21 00:00:00 +0000 UTC" - "2023-08-21 14:11:41.561979 +0000 UTC" will be split into 1 ranges according to "day" step. Continue? [Y/n] y
VM worker 0:↙ 82831 samples/s
VM worker 1:↙ 54378 samples/s
VM worker 2:↙ 121616 samples/s
VM worker 3:↙ 59164 samples/s
VM worker 4:↙ 59220 samples/s
VM worker 5:↙ 102072 samples/s
Processing ranges: 1 / 1 [██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2023/08/21 16:11:55 Import finished!
2023/08/21 16:11:55 VictoriaMetrics importer stats:
idle duration: 0s;
time spent while importing: 14.047045459s;
total samples: 262111;
samples/s: 18659.51;
total bytes: 5.3 MB;
bytes/s: 376.4 kB;
import requests: 6;
import requests retries: 0;
2023/08/21 16:11:55 Total time: 14.063458792s
```
Here we specify the full path to Promscale's Remote Read API via `--remote-read-src-addr`, and disable auto-path
appending via `--remote-read-disable-path-append` cmd-line flags. This is necessary, as Promscale has a different to
Prometheus API path. Promscale doesn't support stream mode for Remote Read API,
so we disable it via `--remote-read-use-stream=false`.
## Migrating data from Prometheus ## Migrating data from Prometheus
`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database. `vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database.
@ -438,28 +479,34 @@ Found 2 blocks to import. Continue? [Y/n] y
## Migrating data by remote read protocol ## Migrating data by remote read protocol
`vmctl` supports the `remote-read` mode for migrating data from databases which support `vmctl` provides the `remote-read` mode for migrating data from remote databases supporting
[Prometheus remote read API](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/) [Prometheus remote read API](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/).
Remote read API has two implementations of remote read API: default (`SAMPLES`) and
[streamed](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/) (`STREAMED_XOR_CHUNKS`).
Streamed version is more efficient but has lower adoption (e.g. [Promscale](#migrating-data-from-promscale)
doesn't support it).
See `./vmctl remote-read --help` for details and full list of flags. See `./vmctl remote-read --help` for details and full list of flags.
To start the migration process configure the following flags: To start the migration process configure the following flags:
1. `--remote-read-src-addr` - data source address to read from; 1. `--remote-read-src-addr` - data source address to read from;
1. `--vm-addr` - VictoriaMetrics address to write to. For single-node VM is usually equal to `--httpListenAddr`, 1. `--vm-addr` - VictoriaMetrics address to write to. For single-node VM is usually equal to `--httpListenAddr`,
and for cluster version is equal to `--httpListenAddr` flag of vminsert component (for example `http://<vminsert>:8480/insert/<accountID>/prometheus`); and for cluster version is equal to `--httpListenAddr` flag of vminsert component (for example `http://<vminsert>:8480/insert/<accountID>/prometheus`);
1. `--remote-read-filter-time-start` - the time filter in RFC3339 format to select time series with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'; 1. `--remote-read-filter-time-start` - the time filter in RFC3339 format to select time series with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z';
1. `--remote-read-filter-time-end` - the time filter in RFC3339 format to select time series with timestamp equal or smaller than provided value. E.g. '2020-01-01T20:07:00Z'. Current time is used when omitted.; 1. `--remote-read-filter-time-end` - the time filter in RFC3339 format to select time series with timestamp equal or smaller than provided value. E.g. '2020-01-01T20:07:00Z'. Current time is used when omitted.;
1. `--remote-read-step-interval` - split export data into chunks. Valid values are `month, day, hour, minute`; 1. `--remote-read-step-interval` - split export data into chunks. Valid values are `month, day, hour, minute`;
1. `--remote-read-use-stream` - defines whether to use `SAMPLES` or `STREAMED_XOR_CHUNKS` mode. By default, is uses `SAMPLES` mode.
The importing process example for local installation of Prometheus The importing process example for local installation of Prometheus
and single-node VictoriaMetrics(`http://localhost:8428`): and single-node VictoriaMetrics(`http://localhost:8428`):
``` ```
./vmctl remote-read \ ./vmctl remote-read \
--remote-read-src-addr=http://127.0.0.1:9091 \ --remote-read-src-addr=http://<prometheus>:9091 \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \ --remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \ --remote-read-step-interval=hour \
--vm-addr=http://127.0.0.1:8428 \ --vm-addr=http://<victoria-metrics>:8428 \
--vm-concurrency=6 --vm-concurrency=6
Split defined times into 8798 ranges to import. Continue? [Y/n] Split defined times into 8798 ranges to import. Continue? [Y/n]
@ -536,6 +583,7 @@ then import it into VM using `vmctl` in `prometheus` mode.
1. Run the `minio/mc` Docker container. 1. Run the `minio/mc` Docker container.
1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items. 1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items.
1. `mc cp -r minio/prometheus thanos-data` 1. `mc cp -r minio/prometheus thanos-data`
1. Import using `vmctl`. 1. Import using `vmctl`.
1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine. 1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine.
1. Use [prometheus](#migrating-data-from-prometheus) mode to import data: 1. Use [prometheus](#migrating-data-from-prometheus) mode to import data:
@ -553,7 +601,7 @@ service (or anything that exposes gRPC StoreAPI e.g. Querier) via Prometheus rem
If you want to migrate data, you should run [thanos-remote-read](https://github.com/G-Research/thanos-remote-read) proxy If you want to migrate data, you should run [thanos-remote-read](https://github.com/G-Research/thanos-remote-read) proxy
and define the Thanos store address `./thanos-remote-read -store 127.0.0.1:19194`. and define the Thanos store address `./thanos-remote-read -store 127.0.0.1:19194`.
It is important to know that `store` flag is Thanos Store API gRPC endpoint. It is important to know that `store` flag is Thanos Store API gRPC endpoint.
Also, it is important to know that thanos-remote-read proxy doesn't support `STREAMED_XOR_CHUNKS` mode. Also, it is important to know that thanos-remote-read proxy doesn't support stream mode.
When you run thanos-remote-read proxy, it exposes port to serve HTTP on `10080 by default`. When you run thanos-remote-read proxy, it exposes port to serve HTTP on `10080 by default`.
The importing process example for local installation of Thanos The importing process example for local installation of Thanos
@ -616,7 +664,7 @@ api:
If you defined some prometheus prefix, you should use it when you define flag `--remote-read-src-addr=http://127.0.0.1:9009/{prometheus_http_prefix}`. If you defined some prometheus prefix, you should use it when you define flag `--remote-read-src-addr=http://127.0.0.1:9009/{prometheus_http_prefix}`.
By default, Cortex uses the `prometheus` path prefix, so you should define the flag `--remote-read-src-addr=http://127.0.0.1:9009/prometheus`. By default, Cortex uses the `prometheus` path prefix, so you should define the flag `--remote-read-src-addr=http://127.0.0.1:9009/prometheus`.
It is important to know that Cortex doesn't support the `STREAMED_XOR_CHUNKS` mode. It is important to know that Cortex doesn't support the stream mode.
When you run Cortex, it exposes a port to serve HTTP on `9009 by default`. When you run Cortex, it exposes a port to serve HTTP on `9009 by default`.
The importing process example for the local installation of Cortex The importing process example for the local installation of Cortex
@ -658,26 +706,24 @@ requires an Authentication header like `X-Scope-OrgID`. You can define it via th
## Migrating data from Mimir ## Migrating data from Mimir
Mimir has similar implementation as Cortex and also support of the Prometheus remote read protocol. That means Mimir has similar implementation as Cortex and supports Prometheus remote read API. That means historical data
`vmctl` in mode `remote-read` may also be used for Mimir historical data migration. from Mimir can be migrated via `vmctl` in mode `remote-read` mode.
These instructions may vary based on the details of your Mimir configuration. The instructions for data migration via vmctl vary based on the details of your Mimir configuration.
Please read carefully and verify as you go. Please read carefully and verify as you go.
### Remote read protocol ### Remote read protocol
If you want to migrate data, you should check your Mimir configuration in the section By default, Mimir uses the `prometheus` path prefix so specifying the source
```yaml should be as simple as `--remote-read-src-addr=http://<mimir>:9009/prometheus`.
api: But if prefix was overriden via `prometheus_http_prefix`, then source address should be updated
prometheus_http_prefix: to `--remote-read-src-addr=http://<mimir>:9009/{prometheus_http_prefix}`.
```
If you defined some prometheus prefix, you should use it when you define flag `--remote-read-src-addr=http://127.0.0.1:9009/{prometheus_http_prefix}`. Mimir supports [streamed remote read API](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/),
By default, Mimir uses the `prometheus` path prefix, so you should define the flag `--remote-read-src-addr=http://127.0.0.1:9009/prometheus`. so it is recommended setting `--remote-read-use-stream=true` flag for better performance and resource usage.
Mimir supports both remote read mode, so you can use `STREAMED_XOR_CHUNKS` mode and `SAMPLES` mode.
When you run Mimir, it exposes a port to serve HTTP on `8080 by default`. When you run Mimir, it exposes a port to serve HTTP on `8080 by default`.
Next example of the local installation was in multi-tenant mode (3 instances of mimir) with nginx as load balancer. Next example of the local installation was in multi-tenant mode (3 instances of Mimir) with nginx as load balancer.
Load balancer expose single port `:9090`. Load balancer expose single port `:9090`.
As you can see in the example we call `:9009` instead of `:8080` because of proxy. As you can see in the example we call `:9009` instead of `:8080` because of proxy.
@ -687,13 +733,12 @@ and single-node VictoriaMetrics(`http://localhost:8428`):
``` ```
./vmctl remote-read ./vmctl remote-read
--remote-read-src-addr=http://127.0.0.1:9009/prometheus \ --remote-read-src-addr=http://<mimir>:9009/prometheus \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \ --remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \ --remote-read-step-interval=hour \
--remote-read-headers=X-Scope-OrgID:demo \ --remote-read-headers=X-Scope-OrgID:demo \
--remote-read-use-stream=true \ --remote-read-use-stream=true \
--vm-addr=http://127.0.0.1:8428 \ --vm-addr=http://<victoria-metrics>:8428 \
--vm-concurrency=6
``` ```
And when the process finishes, you will see the following: And when the process finishes, you will see the following:
@ -826,7 +871,7 @@ It is possible split migration process into set of smaller batches based on time
migrating large volumes of data as this adds indication of progress and ability to restore process from certain point migrating large volumes of data as this adds indication of progress and ability to restore process from certain point
in case of failure. in case of failure.
To use this you need to specify `--vm-native-step-interval` flag. Supported values are: `month`, `day`, `hour`, `minute`. To use this you need to specify `--vm-native-step-interval` flag. Supported values are: `month`, `week`, `day`, `hour`, `minute`.
Note that in order to use this it is required `--vm-native-filter-time-start` to be set to calculate time ranges for Note that in order to use this it is required `--vm-native-filter-time-start` to be set to calculate time ranges for
export process. export process.
@ -836,7 +881,7 @@ Every range is being processed independently, which means that:
so it is possible to restart process starting from failed range. so it is possible to restart process starting from failed range.
It is recommended using the `month` step when migrating the data over multiple months, It is recommended using the `month` step when migrating the data over multiple months,
since the migration with `day` and `hour` steps may take longer time to complete because of additional overhead. since the migration with `week`, `day` and `hour` steps may take longer time to complete because of additional overhead.
Usage example: Usage example:
```console ```console
@ -1023,7 +1068,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmctl` binary and puts it into the `bin` folder. It builds `vmctl` binary and puts it into the `bin` folder.
@ -1052,7 +1097,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
#### Development ARM build #### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmctl-linux-arm` or `make vmctl-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make vmctl-linux-arm` or `make vmctl-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmctl-linux-arm` or `vmctl-linux-arm64` binary respectively and puts it into the `bin` folder. It builds `vmctl-linux-arm` or `vmctl-linux-arm64` binary respectively and puts it into the `bin` folder.

View file

@ -361,8 +361,9 @@ var (
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#timestamp-formats", Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#timestamp-formats",
}, },
&cli.StringFlag{ &cli.StringFlag{
Name: vmNativeStepInterval, Name: vmNativeStepInterval,
Usage: fmt.Sprintf("Split export data into chunks. Requires setting --%s. Valid values are '%s','%s','%s','%s'.", vmNativeFilterTimeStart, stepper.StepMonth, stepper.StepDay, stepper.StepHour, stepper.StepMinute), Usage: fmt.Sprintf("Split export data into chunks. Requires setting --%s. Valid values are '%s','%s','%s','%s','%s'.", vmNativeFilterTimeStart,
stepper.StepMonth, stepper.StepWeek, stepper.StepDay, stepper.StepHour, stepper.StepMinute),
Value: stepper.StepMonth, Value: stepper.StepMonth,
}, },
&cli.BoolFlag{ &cli.BoolFlag{

View file

@ -170,8 +170,5 @@ func (op *otsdbProcessor) do(s queryObj) error {
Timestamps: data.Timestamps, Timestamps: data.Timestamps,
Values: data.Values, Values: data.Values,
} }
if err := op.im.Input(&ts); err != nil { return op.im.Input(&ts)
return err
}
return nil
} }

View file

@ -180,7 +180,7 @@ func modifyData(msg Metric, normalize bool) (Metric, error) {
/* /*
replace bad characters in metric name with _ per the data model replace bad characters in metric name with _ per the data model
*/ */
finalMsg.Metric = promrelabel.SanitizeName(name) finalMsg.Metric = promrelabel.SanitizeMetricName(name)
// replace bad characters in tag keys with _ per the data model // replace bad characters in tag keys with _ per the data model
for key, value := range msg.Tags { for key, value := range msg.Tags {
// if normalization requested, lowercase the key and value // if normalization requested, lowercase the key and value
@ -191,7 +191,7 @@ func modifyData(msg Metric, normalize bool) (Metric, error) {
/* /*
replace all explicitly bad characters with _ replace all explicitly bad characters with _
*/ */
key = promrelabel.SanitizeName(key) key = promrelabel.SanitizeLabelName(key)
// tags that start with __ are considered custom stats for internal prometheus stuff, we should drop them // tags that start with __ are considered custom stats for internal prometheus stuff, we should drop them
if !strings.HasPrefix(key, "__") { if !strings.HasPrefix(key, "__") {
finalMsg.Tags[key] = value finalMsg.Tags[key] = value

View file

@ -8,10 +8,10 @@ import (
const ( const (
// StepMonth represents a one month interval // StepMonth represents a one month interval
StepMonth string = "month" StepMonth string = "month"
// StepDay represents a one day interval
StepDay string = "day"
// StepWeek represents a one week interval // StepWeek represents a one week interval
StepWeek string = "week" StepWeek string = "week"
// StepDay represents a one day interval
StepDay string = "day"
// StepHour represents a one hour interval // StepHour represents a one hour interval
StepHour string = "hour" StepHour string = "hour"
// StepMinute represents a one minute interval // StepMinute represents a one minute interval

View file

@ -201,7 +201,12 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
} }
if len(metrics) == 0 { if len(metrics) == 0 {
return fmt.Errorf("no metrics found") errMsg := "no metrics found"
if tenantID != "" {
errMsg = fmt.Sprintf("%s for tenant id: %s", errMsg, tenantID)
}
log.Println(errMsg)
return nil
} }
foundSeriesMsg = fmt.Sprintf("Found %d metrics to import", len(metrics)) foundSeriesMsg = fmt.Sprintf("Found %d metrics to import", len(metrics))
} }

View file

@ -338,7 +338,7 @@ The shortlist of configuration flags include the following:
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-flagsAuthKey string -flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap -fs.disableMmap
@ -369,6 +369,12 @@ The shortlist of configuration flags include the following:
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int -internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500) The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-license.forceOffline
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-licenseFile string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-loggerDisableTimestamps -loggerDisableTimestamps
Whether to disable writing timestamps in logs Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int -loggerErrorsPerSecondLimit int

View file

@ -134,9 +134,9 @@ func (ctx *Ctx) ApplyRelabeling(labels []prompb.Label) []prompb.Label {
for i := range tmpLabels { for i := range tmpLabels {
label := &tmpLabels[i] label := &tmpLabels[i]
if label.Name == "__name__" { if label.Name == "__name__" {
label.Value = promrelabel.SanitizeName(label.Value) label.Value = promrelabel.SanitizeMetricName(label.Value)
} else { } else {
label.Name = promrelabel.SanitizeName(label.Name) label.Name = promrelabel.SanitizeLabelName(label.Name)
} }
} }
} }

View file

@ -100,7 +100,7 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-flagsAuthKey string -flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap -fs.disableMmap
@ -129,6 +129,12 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int -internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500) The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-license.forceOffline
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-licenseFile string
See https://victoriametrics.com/products/enterprise/ for trial license. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-loggerDisableTimestamps -loggerDisableTimestamps
Whether to disable writing timestamps in logs Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int -loggerErrorsPerSecondLimit int
@ -201,7 +207,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
1. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 1. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmrestore` binary and puts it into the `bin` folder. It builds `vmrestore` binary and puts it into the `bin` folder.

View file

@ -114,7 +114,7 @@ func (as *aggrStateAvgZero) Update(values []float64) {
as.seriesTotal++ as.seriesTotal++
} }
func (as *aggrStateAvgZero) Finalize(xFilesFactor float64) []float64 { func (as *aggrStateAvgZero) Finalize(_ float64) []float64 {
sums := as.sums sums := as.sums
values := make([]float64, as.pointsLen) values := make([]float64, as.pointsLen)
count := float64(as.seriesTotal) count := float64(as.seriesTotal)

View file

@ -6,7 +6,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
) )
@ -14,7 +13,7 @@ import (
// FunctionsHandler implements /functions handler. // FunctionsHandler implements /functions handler.
// //
// See https://graphite.readthedocs.io/en/latest/functions.html#function-api // See https://graphite.readthedocs.io/en/latest/functions.html#function-api
func FunctionsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { func FunctionsHandler(w http.ResponseWriter, r *http.Request) error {
grouped := httputils.GetBool(r, "grouped") grouped := httputils.GetBool(r, "grouped")
group := r.FormValue("group") group := r.FormValue("group")
result := make(map[string]interface{}) result := make(map[string]interface{})
@ -40,7 +39,7 @@ func FunctionsHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
// FunctionDetailsHandler implements /functions/<func_name> handler. // FunctionDetailsHandler implements /functions/<func_name> handler.
// //
// See https://graphite.readthedocs.io/en/latest/functions.html#function-api // See https://graphite.readthedocs.io/en/latest/functions.html#function-api
func FunctionDetailsHandler(startTime time.Time, funcName string, w http.ResponseWriter, r *http.Request) error { func FunctionDetailsHandler(funcName string, w http.ResponseWriter, r *http.Request) error {
result := funcs[funcName] result := funcs[funcName]
if result == nil { if result == nil {
return fmt.Errorf("cannot find function %q", funcName) return fmt.Errorf("cannot find function %q", funcName)

View file

@ -85,7 +85,7 @@ func MetricsFindHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
if leavesOnly { if leavesOnly {
paths = filterLeaves(paths, delimiter) paths = filterLeaves(paths, delimiter)
} }
paths = deduplicatePaths(paths, delimiter) paths = deduplicatePaths(paths)
sortPaths(paths, delimiter) sortPaths(paths, delimiter)
contentType := getContentType(jsonp) contentType := getContentType(jsonp)
w.Header().Set("Content-Type", contentType) w.Header().Set("Content-Type", contentType)
@ -99,7 +99,7 @@ func MetricsFindHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
return nil return nil
} }
func deduplicatePaths(paths []string, delimiter string) []string { func deduplicatePaths(paths []string) []string {
if len(paths) == 0 { if len(paths) == 0 {
return nil return nil
} }

View file

@ -189,7 +189,7 @@ func init() {
} }
} }
func transformTODO(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, error) { func transformTODO(_ *evalConfig, _ *graphiteql.FuncExpr) (nextSeriesFunc, error) {
return nil, fmt.Errorf("TODO: implement this function") return nil, fmt.Errorf("TODO: implement this function")
} }
@ -1062,7 +1062,7 @@ func transformCumulative(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFun
if err != nil { if err != nil {
return nil, err return nil, err
} }
return consolidateBy(ec, fe, nextSeries, "sum") return consolidateBy(fe, nextSeries, "sum")
} }
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.consolidateBy // See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.consolidateBy
@ -1079,10 +1079,10 @@ func transformConsolidateBy(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeries
if err != nil { if err != nil {
return nil, err return nil, err
} }
return consolidateBy(ec, fe, nextSeries, funcName) return consolidateBy(fe, nextSeries, funcName)
} }
func consolidateBy(ec *evalConfig, expr graphiteql.Expr, nextSeries nextSeriesFunc, funcName string) (nextSeriesFunc, error) { func consolidateBy(expr graphiteql.Expr, nextSeries nextSeriesFunc, funcName string) (nextSeriesFunc, error) {
consolidateFunc, err := getAggrFunc(funcName) consolidateFunc, err := getAggrFunc(funcName)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1843,10 +1843,10 @@ func transformHighest(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc,
if err != nil { if err != nil {
return nil, err return nil, err
} }
return highestGeneric(ec, fe, nextSeries, n, funcName) return highestGeneric(fe, nextSeries, n, funcName)
} }
func highestGeneric(ec *evalConfig, expr graphiteql.Expr, nextSeries nextSeriesFunc, n float64, funcName string) (nextSeriesFunc, error) { func highestGeneric(expr graphiteql.Expr, nextSeries nextSeriesFunc, n float64, funcName string) (nextSeriesFunc, error) {
aggrFunc, err := getAggrFunc(funcName) aggrFunc, err := getAggrFunc(funcName)
if err != nil { if err != nil {
_, _ = drainAllSeries(nextSeries) _, _ = drainAllSeries(nextSeries)
@ -1928,7 +1928,7 @@ func transformHighestAverage(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSerie
if err != nil { if err != nil {
return nil, err return nil, err
} }
return highestGeneric(ec, fe, nextSeries, n, "average") return highestGeneric(fe, nextSeries, n, "average")
} }
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.highestCurrent // See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.highestCurrent
@ -1945,7 +1945,7 @@ func transformHighestCurrent(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSerie
if err != nil { if err != nil {
return nil, err return nil, err
} }
return highestGeneric(ec, fe, nextSeries, n, "current") return highestGeneric(fe, nextSeries, n, "current")
} }
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.highestMax // See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.highestMax
@ -1962,7 +1962,7 @@ func transformHighestMax(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFun
if err != nil { if err != nil {
return nil, err return nil, err
} }
return highestGeneric(ec, fe, nextSeries, n, "max") return highestGeneric(fe, nextSeries, n, "max")
} }
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.hitcount // See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.hitcount
@ -2379,10 +2379,10 @@ func transformLowest(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, e
if err != nil { if err != nil {
return nil, err return nil, err
} }
return lowestGeneric(ec, fe, nextSeries, n, funcName) return lowestGeneric(fe, nextSeries, n, funcName)
} }
func lowestGeneric(ec *evalConfig, expr graphiteql.Expr, nextSeries nextSeriesFunc, n float64, funcName string) (nextSeriesFunc, error) { func lowestGeneric(expr graphiteql.Expr, nextSeries nextSeriesFunc, n float64, funcName string) (nextSeriesFunc, error) {
aggrFunc, err := getAggrFunc(funcName) aggrFunc, err := getAggrFunc(funcName)
if err != nil { if err != nil {
_, _ = drainAllSeries(nextSeries) _, _ = drainAllSeries(nextSeries)
@ -2459,7 +2459,7 @@ func transformLowestAverage(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeries
if err != nil { if err != nil {
return nil, err return nil, err
} }
return lowestGeneric(ec, fe, nextSeries, n, "average") return lowestGeneric(fe, nextSeries, n, "average")
} }
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.lowestCurrent // See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.lowestCurrent
@ -2476,7 +2476,7 @@ func transformLowestCurrent(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeries
if err != nil { if err != nil {
return nil, err return nil, err
} }
return lowestGeneric(ec, fe, nextSeries, n, "current") return lowestGeneric(fe, nextSeries, n, "current")
} }
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.maxSeries // See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.maxSeries
@ -2607,7 +2607,7 @@ func transformMostDeviant(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFu
if err != nil { if err != nil {
return nil, err return nil, err
} }
return highestGeneric(ec, fe, nextSeries, n, "stddev") return highestGeneric(fe, nextSeries, n, "stddev")
} }
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.movingAverage // See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.movingAverage
@ -3862,7 +3862,11 @@ func nextSeriesConcurrentWrapper(nextSeries nextSeriesFunc, f func(s *series) (*
} }
if r.err != nil { if r.err != nil {
// Drain the rest of series before returning the error. // Drain the rest of series before returning the error.
for range resultCh { for {
_, ok := <-resultCh
if !ok {
break
}
} }
<-errCh <-errCh
return nil, r.err return nil, r.err
@ -4733,7 +4737,7 @@ func transformSortByTotal(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFu
if err != nil { if err != nil {
return nil, err return nil, err
} }
return sortByGeneric(ec, fe, nextSeries, "sum", true) return sortByGeneric(fe, nextSeries, "sum", true)
} }
// https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.sortBy // https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.sortBy
@ -4754,10 +4758,10 @@ func transformSortBy(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, e
if err != nil { if err != nil {
return nil, err return nil, err
} }
return sortByGeneric(ec, fe, nextSeries, funcName, reverse) return sortByGeneric(fe, nextSeries, funcName, reverse)
} }
func sortByGeneric(ec *evalConfig, fe *graphiteql.FuncExpr, nextSeries nextSeriesFunc, funcName string, reverse bool) (nextSeriesFunc, error) { func sortByGeneric(fe *graphiteql.FuncExpr, nextSeries nextSeriesFunc, funcName string, reverse bool) (nextSeriesFunc, error) {
aggrFunc, err := getAggrFunc(funcName) aggrFunc, err := getAggrFunc(funcName)
if err != nil { if err != nil {
_, _ = drainAllSeries(nextSeries) _, _ = drainAllSeries(nextSeries)
@ -4868,7 +4872,7 @@ func transformSortByMinima(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesF
} }
return s, nil return s, nil
}) })
return sortByGeneric(ec, fe, f, "min", false) return sortByGeneric(fe, f, "min", false)
} }
// https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.sortByMaxima // https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.sortByMaxima
@ -4881,7 +4885,7 @@ func transformSortByMaxima(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesF
if err != nil { if err != nil {
return nil, err return nil, err
} }
return sortByGeneric(ec, fe, nextSeries, "max", true) return sortByGeneric(fe, nextSeries, "max", true)
} }
// https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.smartSummarize // https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.smartSummarize
@ -5286,7 +5290,7 @@ func holtWinterConfidenceBands(ec *evalConfig, fe *graphiteql.FuncExpr, args []*
f := nextSeriesConcurrentWrapper(nextSeries, func(s *series) (*series, error) { f := nextSeriesConcurrentWrapper(nextSeries, func(s *series) (*series, error) {
s.consolidate(&ecCopy, step) s.consolidate(&ecCopy, step)
timeStamps := s.Timestamps[trimWindowPoints:] timeStamps := s.Timestamps[trimWindowPoints:]
analysis := holtWintersAnalysis(&ecCopy, s, seasonalityMs) analysis := holtWintersAnalysis(s, seasonalityMs)
forecastValues := analysis.predictions.Values[trimWindowPoints:] forecastValues := analysis.predictions.Values[trimWindowPoints:]
deviationValues := analysis.deviations.Values[trimWindowPoints:] deviationValues := analysis.deviations.Values[trimWindowPoints:]
valuesLen := len(forecastValues) valuesLen := len(forecastValues)
@ -5450,7 +5454,7 @@ func transformHoltWintersForecast(ec *evalConfig, fe *graphiteql.FuncExpr) (next
trimWindowPoints := ecCopy.pointsLen(step) - ec.pointsLen(step) trimWindowPoints := ecCopy.pointsLen(step) - ec.pointsLen(step)
f := nextSeriesConcurrentWrapper(nextSeries, func(s *series) (*series, error) { f := nextSeriesConcurrentWrapper(nextSeries, func(s *series) (*series, error) {
s.consolidate(&ecCopy, step) s.consolidate(&ecCopy, step)
analysis := holtWintersAnalysis(&ecCopy, s, seasonalityMs) analysis := holtWintersAnalysis(s, seasonalityMs)
predictions := analysis.predictions predictions := analysis.predictions
s.Tags["holtWintersForecast"] = "1" s.Tags["holtWintersForecast"] = "1"
@ -5468,7 +5472,7 @@ func transformHoltWintersForecast(ec *evalConfig, fe *graphiteql.FuncExpr) (next
} }
func holtWintersAnalysis(ec *evalConfig, s *series, seasonality int64) holtWintersAnalysisResult { func holtWintersAnalysis(s *series, seasonality int64) holtWintersAnalysisResult {
alpha := 0.1 alpha := 0.1
gamma := alpha gamma := alpha
beta := 0.0035 beta := 0.0035

View file

@ -236,7 +236,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
funcName = strings.TrimPrefix(funcName, "/") funcName = strings.TrimPrefix(funcName, "/")
if funcName == "" { if funcName == "" {
graphiteFunctionsRequests.Inc() graphiteFunctionsRequests.Inc()
if err := graphite.FunctionsHandler(startTime, w, r); err != nil { if err := graphite.FunctionsHandler(w, r); err != nil {
graphiteFunctionsErrors.Inc() graphiteFunctionsErrors.Inc()
httpserver.Errorf(w, r, "%s", err) httpserver.Errorf(w, r, "%s", err)
return true return true
@ -244,7 +244,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
return true return true
} }
graphiteFunctionDetailsRequests.Inc() graphiteFunctionDetailsRequests.Inc()
if err := graphite.FunctionDetailsHandler(startTime, funcName, w, r); err != nil { if err := graphite.FunctionDetailsHandler(funcName, w, r); err != nil {
graphiteFunctionDetailsErrors.Inc() graphiteFunctionDetailsErrors.Inc()
httpserver.Errorf(w, r, "%s", err) httpserver.Errorf(w, r, "%s", err)
return true return true
@ -480,6 +480,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
expandWithExprsRequests.Inc() expandWithExprsRequests.Inc()
prometheus.ExpandWithExprs(w, r) prometheus.ExpandWithExprs(w, r)
return true return true
case "/prettify-query":
prettifyQueryRequests.Inc()
prometheus.PrettifyQuery(w, r)
return true
case "/api/v1/rules", "/rules": case "/api/v1/rules", "/rules":
rulesRequests.Inc() rulesRequests.Inc()
if len(*vmalertProxyURL) > 0 { if len(*vmalertProxyURL) > 0 {
@ -655,6 +659,7 @@ var (
graphiteFunctionDetailsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/functions/<func_name>"}`) graphiteFunctionDetailsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/functions/<func_name>"}`)
expandWithExprsRequests = metrics.NewCounter(`vm_http_requests_total{path="/expand-with-exprs"}`) expandWithExprsRequests = metrics.NewCounter(`vm_http_requests_total{path="/expand-with-exprs"}`)
prettifyQueryRequests = metrics.NewCounter(`vm_http_requests_total{path="/prettify-query"}`)
vmalertRequests = metrics.NewCounter(`vm_http_requests_total{path="/vmalert"}`) vmalertRequests = metrics.NewCounter(`vm_http_requests_total{path="/vmalert"}`)
rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`) rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`)

View file

@ -1343,7 +1343,8 @@ func setupTfss(qt *querytracer.Tracer, tr storage.TimeRange, tagFilterss [][]sto
} }
if len(paths) >= maxMetrics { if len(paths) >= maxMetrics {
return nil, fmt.Errorf("more than %d time series match Graphite query %q; "+ return nil, fmt.Errorf("more than %d time series match Graphite query %q; "+
"either narrow down the query or increase the corresponding -search.max* command-line flag value", maxMetrics, query) "either narrow down the query or increase the corresponding -search.max* command-line flag value; "+
"see https://docs.victoriametrics.com/#resource-usage-limits", maxMetrics, query)
} }
tfs.AddGraphiteQuery(query, paths, tf.IsNegative) tfs.AddGraphiteQuery(query, paths, tf.IsNegative)
continue continue

View file

@ -3,6 +3,7 @@ package prometheus
import ( import (
"flag" "flag"
"fmt" "fmt"
"github.com/VictoriaMetrics/metricsql"
"math" "math"
"net/http" "net/http"
"runtime" "runtime"
@ -75,6 +76,23 @@ func ExpandWithExprs(w http.ResponseWriter, r *http.Request) {
_ = bw.Flush() _ = bw.Flush()
} }
// PrettifyQuery handles the request /prettify-query
func PrettifyQuery(w http.ResponseWriter, r *http.Request) {
query := r.FormValue("query")
bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw)
w.Header().Set("Content-Type", "application/json")
httpserver.EnableCORS(w, r)
prettyQuery, err := metricsql.Prettify(query)
if err != nil {
fmt.Fprintf(bw, `{"status": "error", "msg": %q}`, err)
} else {
fmt.Fprintf(bw, `{"status": "success", "query": %q}`, prettyQuery)
}
_ = bw.Flush()
}
// FederateHandler implements /federate . See https://prometheus.io/docs/prometheus/latest/federation/ // FederateHandler implements /federate . See https://prometheus.io/docs/prometheus/latest/federation/
func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
defer federateDuration.UpdateDuration(startTime) defer federateDuration.UpdateDuration(startTime)
@ -649,10 +667,7 @@ func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
qt.Donef("start=%d, end=%d", cp.start, cp.end) qt.Donef("start=%d, end=%d", cp.start, cp.end)
} }
WriteSeriesResponse(bw, metricNames, qt, qtDone) WriteSeriesResponse(bw, metricNames, qt, qtDone)
if err := bw.Flush(); err != nil { return bw.Flush()
return err
}
return nil
} }
var seriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/series"}`) var seriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/series"}`)

View file

@ -12,7 +12,7 @@ import (
// ActiveQueriesHandler returns response to /api/v1/status/active_queries // ActiveQueriesHandler returns response to /api/v1/status/active_queries
// //
// It writes a JSON with active queries to w. // It writes a JSON with active queries to w.
func ActiveQueriesHandler(w http.ResponseWriter, r *http.Request) { func ActiveQueriesHandler(w http.ResponseWriter, _ *http.Request) {
aqes := activeQueriesV.GetAll() aqes := activeQueriesV.GetAll()
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")

View file

@ -73,7 +73,7 @@ func Exec(qt *querytracer.Tracer, ec *EvalConfig, q string, isFirstPointOnly boo
} }
qt.Printf("leave only the first point in every series") qt.Printf("leave only the first point in every series")
} }
maySort := maySortResults(e, rv) maySort := maySortResults(e)
result, err := timeseriesToResult(rv, maySort) result, err := timeseriesToResult(rv, maySort)
if err != nil { if err != nil {
return nil, err return nil, err
@ -95,7 +95,7 @@ func Exec(qt *querytracer.Tracer, ec *EvalConfig, q string, isFirstPointOnly boo
return result, nil return result, nil
} }
func maySortResults(e metricsql.Expr, tss []*timeseries) bool { func maySortResults(e metricsql.Expr) bool {
switch v := e.(type) { switch v := e.(type) {
case *metricsql.FuncExpr: case *metricsql.FuncExpr:
switch strings.ToLower(v.Name) { switch strings.ToLower(v.Name) {

View file

@ -86,6 +86,28 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r} resultExpected := []netstorage.Result{r}
f(q, resultExpected) f(q, resultExpected)
}) })
t.Run("int_with_underscores", func(t *testing.T) {
t.Parallel()
q := `123_456_789`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{123456789, 123456789, 123456789, 123456789, 123456789, 123456789},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("float_with_underscores", func(t *testing.T) {
t.Parallel()
q := `1_2.3_456_789`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{12.3456789, 12.3456789, 12.3456789, 12.3456789, 12.3456789, 12.3456789},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("duration-constant", func(t *testing.T) { t.Run("duration-constant", func(t *testing.T) {
t.Parallel() t.Parallel()
q := `1h23m5S` q := `1h23m5S`
@ -141,6 +163,17 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r} resultExpected := []netstorage.Result{r}
f(q, resultExpected) f(q, resultExpected)
}) })
t.Run("num-with-suffix-5", func(t *testing.T) {
t.Parallel()
q := `1_234M`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{1234e6, 1234e6, 1234e6, 1234e6, 1234e6, 1234e6},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("simple-arithmetic", func(t *testing.T) { t.Run("simple-arithmetic", func(t *testing.T) {
t.Parallel() t.Parallel()
q := `-1+2 *3 ^ 4+5%6` q := `-1+2 *3 ^ 4+5%6`

View file

@ -2222,7 +2222,7 @@ func rollupIntegrate(rfa *rollupFuncArg) float64 {
return sum return sum
} }
func rollupFake(rfa *rollupFuncArg) float64 { func rollupFake(_ *rollupFuncArg) float64 {
logger.Panicf("BUG: rollupFake shouldn't be called") logger.Panicf("BUG: rollupFake shouldn't be called")
return 0 return 0
} }

View file

@ -160,7 +160,7 @@ func TestDerivValues(t *testing.T) {
testRowsEqual(t, values, timestamps, valuesExpected, timestamps) testRowsEqual(t, values, timestamps, valuesExpected, timestamps)
} }
func testRollupFunc(t *testing.T, funcName string, args []interface{}, meExpected *metricsql.MetricExpr, vExpected float64) { func testRollupFunc(t *testing.T, funcName string, args []interface{}, vExpected float64) {
t.Helper() t.Helper()
nrf := getRollupFunc(funcName) nrf := getRollupFunc(funcName)
if nrf == nil { if nrf == nil {
@ -203,7 +203,7 @@ func TestRollupDurationOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, maxIntervals} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, maxIntervals}
testRollupFunc(t, "duration_over_time", args, &me, dExpected) testRollupFunc(t, "duration_over_time", args, dExpected)
} }
f(-123, 0) f(-123, 0)
f(0, 0) f(0, 0)
@ -224,7 +224,7 @@ func TestRollupShareLEOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les}
testRollupFunc(t, "share_le_over_time", args, &me, vExpected) testRollupFunc(t, "share_le_over_time", args, vExpected)
} }
f(-123, 0) f(-123, 0)
@ -247,7 +247,7 @@ func TestRollupShareGTOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, gts} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, gts}
testRollupFunc(t, "share_gt_over_time", args, &me, vExpected) testRollupFunc(t, "share_gt_over_time", args, vExpected)
} }
f(-123, 1) f(-123, 1)
@ -270,7 +270,7 @@ func TestRollupShareEQOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, eqs} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, eqs}
testRollupFunc(t, "share_eq_over_time", args, &me, vExpected) testRollupFunc(t, "share_eq_over_time", args, vExpected)
} }
f(-123, 0) f(-123, 0)
@ -289,7 +289,7 @@ func TestRollupCountLEOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les}
testRollupFunc(t, "count_le_over_time", args, &me, vExpected) testRollupFunc(t, "count_le_over_time", args, vExpected)
} }
f(-123, 0) f(-123, 0)
@ -312,7 +312,7 @@ func TestRollupCountGTOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, gts} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, gts}
testRollupFunc(t, "count_gt_over_time", args, &me, vExpected) testRollupFunc(t, "count_gt_over_time", args, vExpected)
} }
f(-123, 12) f(-123, 12)
@ -335,7 +335,7 @@ func TestRollupCountEQOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, eqs} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, eqs}
testRollupFunc(t, "count_eq_over_time", args, &me, vExpected) testRollupFunc(t, "count_eq_over_time", args, vExpected)
} }
f(-123, 0) f(-123, 0)
@ -354,7 +354,7 @@ func TestRollupCountNEOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, nes} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, nes}
testRollupFunc(t, "count_ne_over_time", args, &me, vExpected) testRollupFunc(t, "count_ne_over_time", args, vExpected)
} }
f(-123, 12) f(-123, 12)
@ -373,7 +373,7 @@ func TestRollupQuantileOverTime(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}}
testRollupFunc(t, "quantile_over_time", args, &me, vExpected) testRollupFunc(t, "quantile_over_time", args, vExpected)
} }
f(-123, math.Inf(-1)) f(-123, math.Inf(-1))
@ -395,7 +395,7 @@ func TestRollupPredictLinear(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, secs} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, secs}
testRollupFunc(t, "predict_linear", args, &me, vExpected) testRollupFunc(t, "predict_linear", args, vExpected)
} }
f(0e-3, 65.07405077267295) f(0e-3, 65.07405077267295)
@ -434,7 +434,7 @@ func TestRollupHoltWinters(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, sfs, tfs} args := []interface{}{&metricsql.RollupExpr{Expr: &me}, sfs, tfs}
testRollupFunc(t, "holt_winters", args, &me, vExpected) testRollupFunc(t, "holt_winters", args, vExpected)
} }
f(-1, 0.5, nan) f(-1, 0.5, nan)
@ -462,7 +462,7 @@ func TestRollupHoeffdingBoundLower(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}}
testRollupFunc(t, "hoeffding_bound_lower", args, &me, vExpected) testRollupFunc(t, "hoeffding_bound_lower", args, vExpected)
} }
f(0.5, 28.21949401521037) f(0.5, 28.21949401521037)
@ -483,7 +483,7 @@ func TestRollupHoeffdingBoundUpper(t *testing.T) {
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}}
testRollupFunc(t, "hoeffding_bound_upper", args, &me, vExpected) testRollupFunc(t, "hoeffding_bound_upper", args, vExpected)
} }
f(0.5, 65.9471726514563) f(0.5, 65.9471726514563)
@ -500,7 +500,7 @@ func TestRollupNewRollupFuncSuccess(t *testing.T) {
t.Helper() t.Helper()
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}} args := []interface{}{&metricsql.RollupExpr{Expr: &me}}
testRollupFunc(t, funcName, args, &me, vExpected) testRollupFunc(t, funcName, args, vExpected)
} }
f("default_rollup", 34) f("default_rollup", 34)

View file

@ -1090,18 +1090,18 @@ func transformHour(t time.Time) int {
return t.Hour() return t.Hour()
} }
func runningSum(a, b float64, idx int) float64 { func runningSum(a, b float64, _ int) float64 {
return a + b return a + b
} }
func runningMax(a, b float64, idx int) float64 { func runningMax(a, b float64, _ int) float64 {
if a > b { if a > b {
return a return a
} }
return b return b
} }
func runningMin(a, b float64, idx int) float64 { func runningMin(a, b float64, _ int) float64 {
if a < b { if a < b {
return a return a
} }

View file

@ -1,14 +1,14 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.643dbb4b.css", "main.css": "./static/css/main.e95426eb.css",
"main.js": "./static/js/main.68a897a8.js", "main.js": "./static/js/main.8d3e794d.js",
"static/js/522.b5ae4365.chunk.js": "./static/js/522.b5ae4365.chunk.js", "static/js/522.b5ae4365.chunk.js": "./static/js/522.b5ae4365.chunk.js",
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf", "static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf", "static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.643dbb4b.css", "static/css/main.e95426eb.css",
"static/js/main.68a897a8.js" "static/js/main.8d3e794d.js"
] ]
} }

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.68a897a8.js"></script><link href="./static/css/main.643dbb4b.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.8d3e794d.js"></script><link href="./static/css/main.e95426eb.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -76,7 +76,7 @@ func CheckTimeRange(tr storage.TimeRange) error {
if !*denyQueriesOutsideRetention { if !*denyQueriesOutsideRetention {
return nil return nil
} }
minAllowedTimestamp := int64(fasttime.UnixTimestamp()*1000) - retentionPeriod.Msecs minAllowedTimestamp := int64(fasttime.UnixTimestamp()*1000) - retentionPeriod.Milliseconds()
if tr.MinTimestamp > minAllowedTimestamp { if tr.MinTimestamp > minAllowedTimestamp {
return nil return nil
} }
@ -103,13 +103,13 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
mergeset.SetIndexBlocksCacheSize(cacheSizeIndexDBIndexBlocks.IntN()) mergeset.SetIndexBlocksCacheSize(cacheSizeIndexDBIndexBlocks.IntN())
mergeset.SetDataBlocksCacheSize(cacheSizeIndexDBDataBlocks.IntN()) mergeset.SetDataBlocksCacheSize(cacheSizeIndexDBDataBlocks.IntN())
if retentionPeriod.Msecs < 24*3600*1000 { if retentionPeriod.Duration() < 24*time.Hour {
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod) logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
} }
logger.Infof("opening storage at %q with -retentionPeriod=%s", *DataPath, retentionPeriod) logger.Infof("opening storage at %q with -retentionPeriod=%s", *DataPath, retentionPeriod)
startTime := time.Now() startTime := time.Now()
WG = syncwg.WaitGroup{} WG = syncwg.WaitGroup{}
strg := storage.MustOpenStorage(*DataPath, retentionPeriod.Msecs, *maxHourlySeries, *maxDailySeries) strg := storage.MustOpenStorage(*DataPath, retentionPeriod.Duration(), *maxHourlySeries, *maxDailySeries)
Storage = strg Storage = strg
initStaleSnapshotsRemover(strg) initStaleSnapshotsRemover(strg)
@ -384,10 +384,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
func initStaleSnapshotsRemover(strg *storage.Storage) { func initStaleSnapshotsRemover(strg *storage.Storage) {
staleSnapshotsRemoverCh = make(chan struct{}) staleSnapshotsRemoverCh = make(chan struct{})
if snapshotsMaxAge.Msecs <= 0 { if snapshotsMaxAge.Duration() <= 0 {
return return
} }
snapshotsMaxAgeDur := time.Duration(snapshotsMaxAge.Msecs) * time.Millisecond snapshotsMaxAgeDur := snapshotsMaxAge.Duration()
staleSnapshotsRemoverWG.Add(1) staleSnapshotsRemoverWG.Add(1)
go func() { go func() {
defer staleSnapshotsRemoverWG.Done() defer staleSnapshotsRemoverWG.Done()

View file

@ -1,4 +1,4 @@
FROM golang:1.21.0 as build-web-stage FROM golang:1.21.1 as build-web-stage
COPY build /build COPY build /build
WORKDIR /build WORKDIR /build

View file

@ -3,7 +3,7 @@
<head> <head>
<meta charset="utf-8" /> <meta charset="utf-8" />
<link rel="icon" href="%PUBLIC_URL%/favicon.ico" /> <link rel="icon" href="%PUBLIC_URL%/favicon.ico" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no" /> <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=5" />
<meta name="theme-color" content="#000000" /> <meta name="theme-color" content="#000000" />
<meta <meta
name="description" name="description"

View file

@ -1,6 +1,6 @@
import { seriesBarsPlugin } from "../../../utils/uplot/plugin"; import { seriesBarsPlugin } from "../../../utils/uplot/plugin";
import { barDisp, getBarSeries } from "../../../utils/uplot/series"; import { barDisp, getBarSeries } from "../../../utils/uplot";
import { Fill, Stroke } from "../../../utils/uplot/types"; import { Fill, Stroke } from "../../../types";
import { PaddingSide, Series } from "uplot"; import { PaddingSide, Series } from "uplot";

View file

@ -0,0 +1,180 @@
import React, { FC, useCallback, useEffect, useRef, useState } from "preact/compat";
import { MouseEvent as ReactMouseEvent } from "react";
import useEventListener from "../../../hooks/useEventListener";
import ReactDOM from "react-dom";
import classNames from "classnames";
import uPlot from "uplot";
import Button from "../../Main/Button/Button";
import { CloseIcon, DragIcon } from "../../Main/Icons";
import { SeriesItemStats } from "../../../types";
import { STATS_ORDER } from "../../../constants/graph";
export interface ChartTooltipProps {
u?: uPlot;
id: string;
title?: string;
dates: string[];
value: string | number | null;
point: { top: number, left: number };
unit?: string;
stats?: SeriesItemStats;
isSticky?: boolean;
info?: string;
marker?: string;
show?: boolean;
onClose?: (id: string) => void;
}
const ChartTooltip: FC<ChartTooltipProps> = ({
u,
id,
title,
dates,
value,
point,
unit = "",
info,
stats,
isSticky,
marker,
onClose
}) => {
const tooltipRef = useRef<HTMLDivElement>(null);
const [position, setPosition] = useState({ top: -999, left: -999 });
const [moving, setMoving] = useState(false);
const [moved, setMoved] = useState(false);
const handleClose = () => {
onClose && onClose(id);
};
const handleMouseDown = (e: ReactMouseEvent) => {
setMoved(true);
setMoving(true);
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
};
const handleMouseMove = useCallback((e: MouseEvent) => {
if (!moving) return;
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
}, [moving]);
const handleMouseUp = () => {
setMoving(false);
};
const calcPosition = () => {
if (!tooltipRef.current || !u) return;
const { top, left } = point;
const uPlotPosition = {
left: parseFloat(u.over.style.left),
top: parseFloat(u.over.style.top)
};
const {
width: uPlotWidth,
height: uPlotHeight
} = u.over.getBoundingClientRect();
const {
width: tooltipWidth,
height: tooltipHeight
} = tooltipRef.current.getBoundingClientRect();
const margin = 10;
const overflowX = left + tooltipWidth >= uPlotWidth ? tooltipWidth + (2 * margin) : 0;
const overflowY = top + tooltipHeight >= uPlotHeight ? tooltipHeight + (2 * margin) : 0;
const position = {
top: top + uPlotPosition.top + margin - overflowY,
left: left + uPlotPosition.left + margin - overflowX
};
if (position.left < 0) position.left = 20;
if (position.top < 0) position.top = 20;
setPosition(position);
};
useEffect(calcPosition, [u, value, point, tooltipRef]);
useEventListener("mousemove", handleMouseMove);
useEventListener("mouseup", handleMouseUp);
if (!u) return null;
return ReactDOM.createPortal((
<div
className={classNames({
"vm-chart-tooltip": true,
"vm-chart-tooltip_sticky": isSticky,
"vm-chart-tooltip_moved": moved
})}
ref={tooltipRef}
style={position}
>
<div className="vm-chart-tooltip-header">
{title && (
<div className="vm-chart-tooltip-header__title">
{title}
</div>
)}
<div className="vm-chart-tooltip-header__date">
{dates.map((date, i) => <span key={i}>{date}</span>)}
</div>
{isSticky && (
<>
<Button
className="vm-chart-tooltip-header__drag"
variant="text"
size="small"
startIcon={<DragIcon/>}
onMouseDown={handleMouseDown}
ariaLabel="drag the tooltip"
/>
<Button
className="vm-chart-tooltip-header__close"
variant="text"
size="small"
startIcon={<CloseIcon/>}
onClick={handleClose}
ariaLabel="close the tooltip"
/>
</>
)}
</div>
<div className="vm-chart-tooltip-data">
{marker && (
<span
className="vm-chart-tooltip-data__marker"
style={{ background: marker }}
/>
)}
<p className="vm-chart-tooltip-data__value">
<b>{value}</b>{unit}
</p>
</div>
{stats && (
<table className="vm-chart-tooltip-stats">
{STATS_ORDER.map((key, i) => (
<div
className="vm-chart-tooltip-stats-row"
key={i}
>
<span className="vm-chart-tooltip-stats-row__key">{key}:</span>
<span className="vm-chart-tooltip-stats-row__value">{stats[key]}</span>
</div>
))}
</table>
)}
{info && <p className="vm-chart-tooltip__info">{info}</p>}
</div>
), u.root);
};
export default ChartTooltip;

View file

@ -0,0 +1,26 @@
import React, { FC } from "preact/compat";
import ChartTooltip, { ChartTooltipProps } from "./ChartTooltip";
import "./style.scss";
interface LineTooltipHook {
showTooltip: boolean;
tooltipProps: ChartTooltipProps;
stickyTooltips: ChartTooltipProps[];
handleUnStick: (id: string) => void;
}
const ChartTooltipWrapper: FC<LineTooltipHook> = ({ showTooltip, tooltipProps, stickyTooltips, handleUnStick }) => (
<>
{showTooltip && tooltipProps && <ChartTooltip {...tooltipProps}/>}
{stickyTooltips.map(t => (
<ChartTooltip
{...t}
isSticky
key={t.id}
onClose={handleUnStick}
/>
))}
</>
);
export default ChartTooltipWrapper;

View file

@ -1,17 +1,18 @@
@use "src/styles/variables" as *; @use "src/styles/variables" as *;
$chart-tooltip-width: 325px;
$chart-tooltip-width: 370px;
$chart-tooltip-icon-width: 25px; $chart-tooltip-icon-width: 25px;
$chart-tooltip-half-icon: calc($chart-tooltip-icon-width/2); $chart-tooltip-half-icon: calc($chart-tooltip-icon-width / 2);
$chart-tooltip-date-width: $chart-tooltip-width - (2*$chart-tooltip-icon-width) - (2*$padding-global) - $padding-small; $chart-tooltip-date-width: $chart-tooltip-width - (2*$chart-tooltip-icon-width) - (3*$padding-global);
$chart-tooltip-x: -1 * ($padding-small + $padding-global + $chart-tooltip-date-width + $chart-tooltip-half-icon); $chart-tooltip-x: -1 * ($padding-small + $padding-global + $chart-tooltip-date-width + $chart-tooltip-half-icon);
$chart-tooltip-y: -1 * ($padding-small + $chart-tooltip-half-icon); $chart-tooltip-y: -1 * ($padding-global + $chart-tooltip-half-icon);
.vm-chart-tooltip { .vm-chart-tooltip {
position: absolute; position: absolute;
display: grid; display: grid;
gap: $padding-global; gap: $padding-global;
width: $chart-tooltip-width; width: $chart-tooltip-width;
padding: $padding-small; padding: $padding-global;
border-radius: $border-radius-medium; border-radius: $border-radius-medium;
background: $color-background-tooltip; background: $color-background-tooltip;
color: $color-white; color: $color-white;
@ -43,40 +44,76 @@ $chart-tooltip-y: -1 * ($padding-small + $chart-tooltip-half-icon);
justify-content: center; justify-content: center;
min-height: 25px; min-height: 25px;
&__title {
grid-row: 1;
}
&__close { &__close {
grid-row: 1;
grid-column: 3;
color: $color-white; color: $color-white;
} }
&__drag { &__drag {
grid-row: 1;
grid-column: 2;
color: $color-white; color: $color-white;
cursor: move; cursor: move;
} }
&__date { &__date {
&_range { grid-column: 1;
display: grid; display: grid;
gap: 2px; gap: 2px;
}
} }
} }
&-data { &-data {
display: grid; display: flex;
grid-template-columns: auto 1fr; align-items: center;
justify-content: flex-start;
gap: $padding-small; gap: $padding-small;
align-items: flex-start;
word-break: break-all;
line-height: $font-size;
&__marker { &__marker {
width: 12px; width: $font-size;
height: 12px; height: $font-size;
border: 1px solid rgba($color-white, 0.5);
}
&__value {
line-height: 1;
font-size: $font-size;
} }
} }
&-info { &-stats {
display: grid; display: flex;
grid-gap: 4px; flex-wrap: wrap;
align-items: center;
justify-content: flex-start;
gap: $padding-small $padding-global;
&-row {
display: grid;
align-items: center;
justify-content: flex-start;
&:not(:last-child) {
padding-right: $padding-small;
}
&__key {
line-height: 1;
margin-right: calc($padding-small/2);
}
&__value {
font-weight: bold;
}
}
}
&__info {
word-break: break-all; word-break: break-all;
white-space: pre-wrap; white-space: pre-wrap;
} }

View file

@ -22,6 +22,7 @@ const GraphTips: FC = () => {
color={"gray"} color={"gray"}
startIcon={<TipIcon/>} startIcon={<TipIcon/>}
onClick={handleOpenTips} onClick={handleOpenTips}
ariaLabel="open the tips"
/> />
</Tooltip> </Tooltip>
{showTips && ( {showTips && (

View file

@ -1,142 +0,0 @@
import React, { FC, useCallback, useEffect, useRef, useState } from "preact/compat";
import uPlot from "uplot";
import ReactDOM from "react-dom";
import Button from "../../../Main/Button/Button";
import { CloseIcon, DragIcon } from "../../../Main/Icons";
import classNames from "classnames";
import { MouseEvent as ReactMouseEvent } from "react";
import "../../Line/ChartTooltip/style.scss";
import useEventListener from "../../../../hooks/useEventListener";
export interface TooltipHeatmapProps {
cursor: {left: number, top: number}
startDate: string,
endDate: string,
bucket: string,
value: number,
valueFormat: string
}
export interface ChartTooltipHeatmapProps extends TooltipHeatmapProps {
id: string,
u: uPlot,
unit?: string,
isSticky?: boolean,
tooltipOffset: { left: number, top: number },
onClose?: (id: string) => void
}
const ChartTooltipHeatmap: FC<ChartTooltipHeatmapProps> = ({
u,
id,
unit = "",
cursor,
tooltipOffset,
isSticky,
onClose,
startDate,
endDate,
bucket,
valueFormat,
value
}) => {
const tooltipRef = useRef<HTMLDivElement>(null);
const [position, setPosition] = useState({ top: -999, left: -999 });
const [moving, setMoving] = useState(false);
const [moved, setMoved] = useState(false);
const handleClose = () => {
onClose && onClose(id);
};
const handleMouseDown = (e: ReactMouseEvent) => {
setMoved(true);
setMoving(true);
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
};
const handleMouseMove = useCallback((e: MouseEvent) => {
if (!moving) return;
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
}, [moving]);
const handleMouseUp = () => {
setMoving(false);
};
const calcPosition = () => {
if (!tooltipRef.current) return;
const topOnChart = cursor.top;
const leftOnChart = cursor.left;
const { width: tooltipWidth, height: tooltipHeight } = tooltipRef.current.getBoundingClientRect();
const { width, height } = u.over.getBoundingClientRect();
const margin = 10;
const overflowX = leftOnChart + tooltipWidth >= width ? tooltipWidth + (2 * margin) : 0;
const overflowY = topOnChart + tooltipHeight >= height ? tooltipHeight + (2 * margin) : 0;
setPosition({
top: topOnChart + tooltipOffset.top + margin - overflowY,
left: leftOnChart + tooltipOffset.left + margin - overflowX
});
};
useEffect(calcPosition, [u, cursor, tooltipOffset, tooltipRef]);
useEventListener("mousemove", handleMouseMove);
useEventListener("mouseup", handleMouseUp);
if (!cursor?.left || !cursor?.top || !value) return null;
return ReactDOM.createPortal((
<div
className={classNames({
"vm-chart-tooltip": true,
"vm-chart-tooltip_sticky": isSticky,
"vm-chart-tooltip_moved": moved
})}
ref={tooltipRef}
style={position}
>
<div className="vm-chart-tooltip-header">
<div className="vm-chart-tooltip-header__date vm-chart-tooltip-header__date_range">
<span>{startDate}</span>
<span>{endDate}</span>
</div>
{isSticky && (
<>
<Button
className="vm-chart-tooltip-header__drag"
variant="text"
size="small"
startIcon={<DragIcon/>}
onMouseDown={handleMouseDown}
/>
<Button
className="vm-chart-tooltip-header__close"
variant="text"
size="small"
startIcon={<CloseIcon/>}
onClick={handleClose}
/>
</>
)}
</div>
<div className="vm-chart-tooltip-data">
<p>
value: <b className="vm-chart-tooltip-data__value">{valueFormat}</b>{unit}
</p>
</div>
<div className="vm-chart-tooltip-info">
{bucket}
</div>
</div>
), u.root);
};
export default ChartTooltipHeatmap;

View file

@ -1,49 +1,44 @@
import React, { FC, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat"; import React, { FC, useEffect, useMemo, useRef, useState } from "preact/compat";
import uPlot, { import uPlot, {
AlignedData as uPlotData, AlignedData as uPlotData,
Options as uPlotOptions, Options as uPlotOptions,
Range
} from "uplot"; } from "uplot";
import { defaultOptions, sizeAxis } from "../../../../utils/uplot/helpers";
import { dragChart } from "../../../../utils/uplot/events";
import { getAxes } from "../../../../utils/uplot/axes";
import { MetricResult } from "../../../../api/types"; import { MetricResult } from "../../../../api/types";
import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../../utils/time";
import throttle from "lodash.throttle";
import { TimeParams } from "../../../../types"; import { TimeParams } from "../../../../types";
import { YaxisState } from "../../../../state/graph/reducer";
import "uplot/dist/uPlot.min.css"; import "uplot/dist/uPlot.min.css";
import classNames from "classnames"; import classNames from "classnames";
import dayjs from "dayjs";
import { useAppState } from "../../../../state/common/StateContext"; import { useAppState } from "../../../../state/common/StateContext";
import { heatmapPaths } from "../../../../utils/uplot/heatmap"; import {
import { DATE_FULL_TIMEZONE_FORMAT } from "../../../../constants/date"; heatmapPaths,
import ChartTooltipHeatmap, { handleDestroy,
ChartTooltipHeatmapProps, getDefaultOptions,
TooltipHeatmapProps sizeAxis,
} from "../ChartTooltipHeatmap/ChartTooltipHeatmap"; getAxes,
setSelect,
} from "../../../../utils/uplot";
import { ElementSize } from "../../../../hooks/useElementSize"; import { ElementSize } from "../../../../hooks/useElementSize";
import useEventListener from "../../../../hooks/useEventListener"; import useReadyChart from "../../../../hooks/uplot/useReadyChart";
import useZoomChart from "../../../../hooks/uplot/useZoomChart";
import usePlotScale from "../../../../hooks/uplot/usePlotScale";
import useHeatmapTooltip from "../../../../hooks/uplot/useHeatmapTooltip";
import ChartTooltipWrapper from "../../ChartTooltip";
import { ChartTooltipProps } from "../../ChartTooltip/ChartTooltip";
export interface HeatmapChartProps { export interface HeatmapChartProps {
metrics: MetricResult[]; metrics: MetricResult[];
data: uPlotData; data: uPlotData;
period: TimeParams; period: TimeParams;
yaxis: YaxisState;
unit?: string; unit?: string;
setPeriod: ({ from, to }: {from: Date, to: Date}) => void; setPeriod: ({ from, to }: { from: Date, to: Date }) => void;
layoutSize: ElementSize, layoutSize: ElementSize,
height?: number; height?: number;
onChangeLegend: (val: TooltipHeatmapProps) => void; onChangeLegend: (val: ChartTooltipProps) => void;
} }
enum typeChartUpdate {xRange = "xRange", yRange = "yRange"}
const HeatmapChart: FC<HeatmapChartProps> = ({ const HeatmapChart: FC<HeatmapChartProps> = ({
data, data,
metrics = [], metrics = [],
period, period,
yaxis,
unit, unit,
setPeriod, setPeriod,
layoutSize, layoutSize,
@ -53,144 +48,39 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
const { isDarkTheme } = useAppState(); const { isDarkTheme } = useAppState();
const uPlotRef = useRef<HTMLDivElement>(null); const uPlotRef = useRef<HTMLDivElement>(null);
const [isPanning, setPanning] = useState(false);
const [xRange, setXRange] = useState({ min: period.start, max: period.end });
const [uPlotInst, setUPlotInst] = useState<uPlot>(); const [uPlotInst, setUPlotInst] = useState<uPlot>();
const [startTouchDistance, setStartTouchDistance] = useState(0);
const [tooltipProps, setTooltipProps] = useState<TooltipHeatmapProps | null>(null); const { xRange, setPlotScale } = usePlotScale({ period, setPeriod });
const [tooltipOffset, setTooltipOffset] = useState({ left: 0, top: 0 }); const { onReadyChart, isPanning } = useReadyChart(setPlotScale);
const [stickyTooltips, setStickyToolTips] = useState<ChartTooltipHeatmapProps[]>([]); useZoomChart({ uPlotInst, xRange, setPlotScale });
const tooltipId = useMemo(() => { const {
return `${tooltipProps?.bucket}_${tooltipProps?.startDate}`; stickyTooltips,
}, [tooltipProps]); handleUnStick,
getTooltipProps,
setCursor,
resetTooltips
} = useHeatmapTooltip({ u: uPlotInst, metrics, unit });
const setScale = ({ min, max }: { min: number, max: number }): void => { const tooltipProps = useMemo(() => getTooltipProps(), [getTooltipProps]);
if (isNaN(min) || isNaN(max)) return;
setPeriod({
from: dayjs(min * 1000).toDate(),
to: dayjs(max * 1000).toDate()
});
};
const throttledSetScale = useCallback(throttle(setScale, 500), []);
const setPlotScale = ({ min, max }: { min: number, max: number }) => {
const delta = (max - min) * 1000;
if ((delta < limitsDurations.min) || (delta > limitsDurations.max)) return;
setXRange({ min, max });
throttledSetScale({ min, max });
};
const onReadyChart = (u: uPlot) => { const getHeatmapAxes = () => {
const factor = 0.9; const baseAxes = getAxes([{}], unit);
setTooltipOffset({
left: parseFloat(u.over.style.left),
top: parseFloat(u.over.style.top)
});
u.over.addEventListener("mousedown", e => { return [
const { ctrlKey, metaKey, button } = e; ...baseAxes,
const leftClick = button === 0; {
const leftClickWithMeta = leftClick && (ctrlKey || metaKey); scale: "y",
if (leftClickWithMeta) { stroke: baseAxes[0].stroke,
// drag pan font: baseAxes[0].font,
dragChart({ u, e, setPanning, setPlotScale, factor }); size: sizeAxis,
splits: metrics.map((m, i) => i),
values: metrics.map(m => m.metric.vmrange),
} }
}); ];
u.over.addEventListener("touchstart", e => {
dragChart({ u, e, setPanning, setPlotScale, factor });
});
u.over.addEventListener("wheel", e => {
if (!e.ctrlKey && !e.metaKey) return;
e.preventDefault();
const { width } = u.over.getBoundingClientRect();
const zoomPos = u.cursor.left && u.cursor.left > 0 ? u.cursor.left : 0;
const xVal = u.posToVal(zoomPos, "x");
const oxRange = (u.scales.x.max || 0) - (u.scales.x.min || 0);
const nxRange = e.deltaY < 0 ? oxRange * factor : oxRange / factor;
const min = xVal - (zoomPos / width) * nxRange;
const max = min + nxRange;
u.batch(() => setPlotScale({ min, max }));
});
}; };
const handleKeyDown = useCallback((e: KeyboardEvent) => {
const { target, ctrlKey, metaKey, key } = e;
const isInput = target instanceof HTMLInputElement || target instanceof HTMLTextAreaElement;
if (!uPlotInst || isInput) return;
const minus = key === "-";
const plus = key === "+" || key === "=";
if ((minus || plus) && !(ctrlKey || metaKey)) {
e.preventDefault();
const factor = (xRange.max - xRange.min) / 10 * (plus ? 1 : -1);
setPlotScale({
min: xRange.min + factor,
max: xRange.max - factor
});
}
}, [uPlotInst, xRange]);
const handleClick = useCallback(() => {
if (!tooltipProps?.value) return;
const id = `${tooltipProps?.bucket}_${tooltipProps?.startDate}`;
const props = {
id,
unit,
tooltipOffset,
...tooltipProps
};
if (!stickyTooltips.find(t => t.id === id)) {
const res = JSON.parse(JSON.stringify(props));
setStickyToolTips(prev => [...prev, res]);
}
}, [stickyTooltips, tooltipProps, tooltipOffset, unit]);
const handleUnStick = (id: string) => {
setStickyToolTips(prev => prev.filter(t => t.id !== id));
};
const setCursor = (u: uPlot) => {
const left = u.cursor.left && u.cursor.left > 0 ? u.cursor.left : 0;
const top = u.cursor.top && u.cursor.top > 0 ? u.cursor.top : 0;
const xArr = (u.data[1][0] || []) as number[];
if (!Array.isArray(xArr)) return;
const xVal = u.posToVal(left, "x");
const yVal = u.posToVal(top, "y");
const xIdx = xArr.findIndex((t, i) => xVal >= t && xVal < xArr[i + 1]) || -1;
const second = xArr[xIdx + 1];
const result = metrics[Math.round(yVal)];
if (!result) {
setTooltipProps(null);
return;
}
const [endTime = 0, value = ""] = result.values.find(v => v[0] === second) || [];
const valueFormat = `${+value}%`;
const startTime = xArr[xIdx];
const startDate = dayjs(startTime * 1000).tz().format(DATE_FULL_TIMEZONE_FORMAT);
const endDate = dayjs(endTime * 1000).tz().format(DATE_FULL_TIMEZONE_FORMAT);
setTooltipProps({
cursor: { left, top },
startDate,
endDate,
bucket: result?.metric?.vmrange || "",
value: +value,
valueFormat: valueFormat,
});
};
const getRangeX = (): Range.MinMax => [xRange.min, xRange.max];
const axes = getAxes( [{}], unit);
const options: uPlotOptions = { const options: uPlotOptions = {
...defaultOptions, ...getDefaultOptions({ width: layoutSize.width, height }),
mode: 2, mode: 2,
tzDate: ts => dayjs(formatDateForNativeInput(dateFromSeconds(ts))).local().toDate(),
series: [ series: [
{}, {},
{ {
@ -210,17 +100,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
], ],
}, },
], ],
axes: [ axes: getHeatmapAxes(),
...axes,
{
scale: "y",
stroke: axes[0].stroke,
font: axes[0].font,
size: sizeAxis,
splits: metrics.map((m, i) => i),
values: metrics.map(m => m.metric.vmrange),
}
],
scales: { scales: {
x: { x: {
time: true, time: true,
@ -228,87 +108,35 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
y: { y: {
log: 2, log: 2,
time: false, time: false,
range: (self, initMin, initMax) => [initMin - 1, initMax + 1] range: (u, initMin, initMax) => [initMin - 1, initMax + 1]
} }
}, },
width: layoutSize.width || 400,
height: height || 500,
plugins: [{ hooks: { ready: onReadyChart, setCursor } }],
hooks: { hooks: {
setSelect: [ ready: [onReadyChart],
(u) => { setCursor: [setCursor],
const min = u.posToVal(u.select.left, "x"); setSelect: [setSelect(setPlotScale)],
const max = u.posToVal(u.select.left + u.select.width, "x"); destroy: [handleDestroy],
setPlotScale({ min, max });
}
]
}, },
}; };
const updateChart = (type: typeChartUpdate): void => {
if (!uPlotInst) return;
switch (type) {
case typeChartUpdate.xRange:
uPlotInst.scales.x.range = getRangeX;
break;
}
if (!isPanning) uPlotInst.redraw();
};
useEffect(() => setXRange({ min: period.start, max: period.end }), [period]);
useEffect(() => { useEffect(() => {
setStickyToolTips([]); resetTooltips();
setTooltipProps(null);
const isValidData = data[0] === null && Array.isArray(data[1]); const isValidData = data[0] === null && Array.isArray(data[1]);
if (!uPlotRef.current || !layoutSize.width || !layoutSize.height || !isValidData) return; if (!uPlotRef.current || !isValidData) return;
const u = new uPlot(options, data, uPlotRef.current); const u = new uPlot(options, data, uPlotRef.current);
setUPlotInst(u); setUPlotInst(u);
setXRange({ min: period.start, max: period.end });
return u.destroy; return u.destroy;
}, [uPlotRef.current, layoutSize, height, isDarkTheme, data]); }, [uPlotRef, data, isDarkTheme]);
const handleTouchStart = (e: TouchEvent) => {
if (e.touches.length !== 2) return;
e.preventDefault();
const dx = e.touches[0].clientX - e.touches[1].clientX;
const dy = e.touches[0].clientY - e.touches[1].clientY;
setStartTouchDistance(Math.sqrt(dx * dx + dy * dy));
};
const handleTouchMove = useCallback((e: TouchEvent) => {
if (e.touches.length !== 2 || !uPlotInst) return;
e.preventDefault();
const dx = e.touches[0].clientX - e.touches[1].clientX;
const dy = e.touches[0].clientY - e.touches[1].clientY;
const endTouchDistance = Math.sqrt(dx * dx + dy * dy);
const diffDistance = startTouchDistance - endTouchDistance;
const max = (uPlotInst.scales.x.max || xRange.max);
const min = (uPlotInst.scales.x.min || xRange.min);
const dur = max - min;
const dir = (diffDistance > 0 ? -1 : 1);
const zoomFactor = dur / 50 * dir;
uPlotInst.batch(() => setPlotScale({
min: min + zoomFactor,
max: max - zoomFactor
}));
}, [uPlotInst, startTouchDistance, xRange]);
useEffect(() => updateChart(typeChartUpdate.xRange), [xRange]);
useEffect(() => updateChart(typeChartUpdate.yRange), [yaxis]);
useEffect(() => { useEffect(() => {
if (tooltipProps) onChangeLegend(tooltipProps); if (!uPlotInst) return;
}, [tooltipProps]); uPlotInst.setSize({ width: layoutSize.width || 400, height: height || 500 });
uPlotInst.redraw();
}, [height, layoutSize]);
useEventListener("click", handleClick); useEffect(() => {
useEventListener("keydown", handleKeyDown); onChangeLegend(tooltipProps);
useEventListener("touchmove", handleTouchMove); }, [tooltipProps]);
useEventListener("touchstart", handleTouchStart);
return ( return (
<div <div
@ -325,25 +153,13 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
className="vm-line-chart__u-plot" className="vm-line-chart__u-plot"
ref={uPlotRef} ref={uPlotRef}
/> />
{uPlotInst && tooltipProps && (
<ChartTooltipHeatmap
{...tooltipProps}
unit={unit}
u={uPlotInst}
tooltipOffset={tooltipOffset}
id={tooltipId}
/>
)}
{uPlotInst && stickyTooltips.map(t => ( <ChartTooltipWrapper
<ChartTooltipHeatmap showTooltip={!!tooltipProps.show}
{...t} tooltipProps={tooltipProps}
isSticky stickyTooltips={stickyTooltips}
u={uPlotInst} handleUnStick={handleUnStick}
key={t.id} />
onClose={handleUnStick}
/>
))}
</div> </div>
); );
}; };

View file

@ -1,39 +1,39 @@
import React, { FC, useEffect, useState } from "preact/compat"; import React, { FC, useEffect, useMemo, useState } from "preact/compat";
import { gradMetal16 } from "../../../../utils/uplot/heatmap"; import { gradMetal16 } from "../../../../utils/uplot";
import { SeriesItem, LegendItemType } from "../../../../types";
import "./style.scss"; import "./style.scss";
import { TooltipHeatmapProps } from "../ChartTooltipHeatmap/ChartTooltipHeatmap";
import { SeriesItem } from "../../../../utils/uplot/series";
import LegendItem from "../../Line/Legend/LegendItem/LegendItem"; import LegendItem from "../../Line/Legend/LegendItem/LegendItem";
import { LegendItemType } from "../../../../utils/uplot/types"; import { ChartTooltipProps } from "../../ChartTooltip/ChartTooltip";
interface LegendHeatmapProps { interface LegendHeatmapProps {
min: number min: number
max: number max: number
legendValue: TooltipHeatmapProps | null, legendValue: ChartTooltipProps | null,
series: SeriesItem[] series: SeriesItem[]
} }
const LegendHeatmap: FC<LegendHeatmapProps> = ( const LegendHeatmap: FC<LegendHeatmapProps> = ({
{ min,
min, max,
max, legendValue,
legendValue, series
series, }) => {
}
) => {
const [percent, setPercent] = useState(0); const [percent, setPercent] = useState(0);
const [valueFormat, setValueFormat] = useState(""); const [valueFormat, setValueFormat] = useState("");
const [minFormat, setMinFormat] = useState(""); const [minFormat, setMinFormat] = useState("");
const [maxFormat, setMaxFormat] = useState(""); const [maxFormat, setMaxFormat] = useState("");
const value = useMemo(() => {
return parseFloat(String(legendValue?.value || 0).replace("%", ""));
}, [legendValue]);
useEffect(() => { useEffect(() => {
const value = legendValue?.value || 0;
setPercent(value ? (value - min) / (max - min) * 100 : 0); setPercent(value ? (value - min) / (max - min) * 100 : 0);
setValueFormat(value ? `${value}%` : ""); setValueFormat(value ? `${value}%` : "");
setMinFormat(`${min}%`); setMinFormat(`${min}%`);
setMaxFormat(`${max}%`); setMaxFormat(`${max}%`);
}, [legendValue, min, max]); }, [value, min, max]);
return ( return (
<div className="vm-legend-heatmap__wrapper"> <div className="vm-legend-heatmap__wrapper">
@ -42,7 +42,7 @@ const LegendHeatmap: FC<LegendHeatmapProps> = (
className="vm-legend-heatmap-gradient" className="vm-legend-heatmap-gradient"
style={{ background: `linear-gradient(to right, ${gradMetal16.join(", ")})` }} style={{ background: `linear-gradient(to right, ${gradMetal16.join(", ")})` }}
> >
{!!legendValue?.value && ( {!!value && (
<div <div
className="vm-legend-heatmap-gradient__value" className="vm-legend-heatmap-gradient__value"
style={{ left: `${percent}%` }} style={{ left: `${percent}%` }}

View file

@ -1,180 +0,0 @@
import React, { FC, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat";
import uPlot from "uplot";
import { MetricResult } from "../../../../api/types";
import { formatPrettyNumber } from "../../../../utils/uplot/helpers";
import dayjs from "dayjs";
import { DATE_FULL_TIMEZONE_FORMAT } from "../../../../constants/date";
import ReactDOM from "react-dom";
import get from "lodash.get";
import Button from "../../../Main/Button/Button";
import { CloseIcon, DragIcon } from "../../../Main/Icons";
import classNames from "classnames";
import { MouseEvent as ReactMouseEvent } from "react";
import "./style.scss";
import { SeriesItem } from "../../../../utils/uplot/series";
import useEventListener from "../../../../hooks/useEventListener";
export interface ChartTooltipProps {
id: string,
u: uPlot,
metricItem: MetricResult,
seriesItem: SeriesItem,
unit?: string,
isSticky?: boolean,
showQueryNum?: boolean,
tooltipOffset: { left: number, top: number },
tooltipIdx: { seriesIdx: number, dataIdx: number },
onClose?: (id: string) => void
}
const ChartTooltip: FC<ChartTooltipProps> = ({
u,
id,
unit = "",
metricItem,
seriesItem,
tooltipIdx,
tooltipOffset,
isSticky,
showQueryNum,
onClose
}) => {
const tooltipRef = useRef<HTMLDivElement>(null);
const [position, setPosition] = useState({ top: -999, left: -999 });
const [moving, setMoving] = useState(false);
const [moved, setMoved] = useState(false);
const [seriesIdx, setSeriesIdx] = useState(tooltipIdx.seriesIdx);
const [dataIdx, setDataIdx] = useState(tooltipIdx.dataIdx);
const value = get(u, ["data", seriesIdx, dataIdx], 0);
const valueFormat = formatPrettyNumber(value, get(u, ["scales", "1", "min"], 0), get(u, ["scales", "1", "max"], 1));
const dataTime = u.data[0][dataIdx];
const date = dayjs(dataTime * 1000).tz().format(DATE_FULL_TIMEZONE_FORMAT);
const color = `${seriesItem?.stroke}`;
const calculations = seriesItem?.calculations || {};
const group = metricItem?.group || 0;
const fullMetricName = useMemo(() => {
const metric = metricItem?.metric || {};
const labelNames = Object.keys(metric).filter(x => x != "__name__");
const labels = labelNames.map(key => `${key}=${JSON.stringify(metric[key])}`);
let metricName = metric["__name__"] || "";
if (labels.length > 0) {
metricName += "{" + labels.join(",") + "}";
}
return metricName;
}, [metricItem]);
const handleClose = () => {
onClose && onClose(id);
};
const handleMouseDown = (e: ReactMouseEvent<HTMLButtonElement, MouseEvent>) => {
setMoved(true);
setMoving(true);
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
};
const handleMouseMove = useCallback((e: MouseEvent) => {
if (!moving) return;
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
}, [moving]);
const handleMouseUp = () => {
setMoving(false);
};
const calcPosition = () => {
if (!tooltipRef.current) return;
const topOnChart = u.valToPos((value || 0), seriesItem?.scale || "1");
const leftOnChart = u.valToPos(dataTime, "x");
const { width: tooltipWidth, height: tooltipHeight } = tooltipRef.current.getBoundingClientRect();
const { width, height } = u.over.getBoundingClientRect();
const margin = 10;
const overflowX = leftOnChart + tooltipWidth >= width ? tooltipWidth + (2 * margin) : 0;
const overflowY = topOnChart + tooltipHeight >= height ? tooltipHeight + (2 * margin) : 0;
const position = {
top: topOnChart + tooltipOffset.top + margin - overflowY,
left: leftOnChart + tooltipOffset.left + margin - overflowX
};
if (position.left < 0) position.left = 20;
if (position.top < 0) position.top = 20;
setPosition(position);
};
useEffect(calcPosition, [u, value, dataTime, seriesIdx, tooltipOffset, tooltipRef]);
useEffect(() => {
setSeriesIdx(tooltipIdx.seriesIdx);
setDataIdx(tooltipIdx.dataIdx);
}, [tooltipIdx]);
useEventListener("mousemove", handleMouseMove);
useEventListener("mouseup", handleMouseUp);
if (tooltipIdx.seriesIdx < 0 || tooltipIdx.dataIdx < 0) return null;
return ReactDOM.createPortal((
<div
className={classNames({
"vm-chart-tooltip": true,
"vm-chart-tooltip_sticky": isSticky,
"vm-chart-tooltip_moved": moved
})}
ref={tooltipRef}
style={position}
>
<div className="vm-chart-tooltip-header">
<div className="vm-chart-tooltip-header__date">
{showQueryNum && (<div>Query {group}</div>)}
{date}
</div>
{isSticky && (
<>
<Button
className="vm-chart-tooltip-header__drag"
variant="text"
size="small"
startIcon={<DragIcon/>}
onMouseDown={handleMouseDown}
/>
<Button
className="vm-chart-tooltip-header__close"
variant="text"
size="small"
startIcon={<CloseIcon/>}
onClick={handleClose}
/>
</>
)}
</div>
<div className="vm-chart-tooltip-data">
<div
className="vm-chart-tooltip-data__marker"
style={{ background: color }}
/>
<div>
<b>{valueFormat}{unit}</b><br/>
median:<b>{calculations.median}</b>, min:<b>{calculations.min}</b>, max:<b>{calculations.max}</b>
</div>
</div>
<div className="vm-chart-tooltip-info">
{fullMetricName}
</div>
</div>
), u.root);
};
export default ChartTooltip;

View file

@ -1,5 +1,5 @@
import React, { FC, useMemo } from "preact/compat"; import React, { FC, useMemo } from "preact/compat";
import { LegendItemType } from "../../../../utils/uplot/types"; import { LegendItemType } from "../../../../types";
import LegendItem from "./LegendItem/LegendItem"; import LegendItem from "./LegendItem/LegendItem";
import Accordion from "../../../Main/Accordion/Accordion"; import Accordion from "../../../Main/Accordion/Accordion";
import "./style.scss"; import "./style.scss";

View file

@ -1,10 +1,11 @@
import React, { FC, useMemo } from "preact/compat"; import React, { FC, useMemo } from "preact/compat";
import { MouseEvent } from "react"; import { MouseEvent } from "react";
import { LegendItemType } from "../../../../../utils/uplot/types"; import { LegendItemType } from "../../../../../types";
import "./style.scss"; import "./style.scss";
import classNames from "classnames"; import classNames from "classnames";
import { getFreeFields } from "./helpers"; import { getFreeFields } from "./helpers";
import useCopyToClipboard from "../../../../../hooks/useCopyToClipboard"; import useCopyToClipboard from "../../../../../hooks/useCopyToClipboard";
import { STATS_ORDER } from "../../../../../constants/graph";
interface LegendItemProps { interface LegendItemProps {
legend: LegendItemType; legend: LegendItemType;
@ -70,8 +71,16 @@ const LegendItem: FC<LegendItemProps> = ({ legend, onChange, isHeatmap }) => {
</span> </span>
</div> </div>
{!isHeatmap && showCalculations && ( {!isHeatmap && showCalculations && (
<div className="vm-legend-item-values"> <div className="vm-legend-item-stats">
median:{calculations.median}, min:{calculations.min}, max:{calculations.max}, last:{calculations.last} {STATS_ORDER.map((key, i) => (
<div
className="vm-legend-item-stats-row"
key={i}
>
<span className="vm-legend-item-stats-row__key">{key}:</span>
<span className="vm-legend-item-stats-row__value">{calculations[key]}</span>
</div>
))}
</div> </div>
)} )}
</div> </div>

View file

@ -1,4 +1,4 @@
import { LegendItemType } from "../../../../../utils/uplot/types"; import { LegendItemType } from "../../../../../types";
export const getFreeFields = (legend: LegendItemType) => { export const getFreeFields = (legend: LegendItemType) => {
const keys = Object.keys(legend.freeFormFields).filter(f => f !== "__name__"); const keys = Object.keys(legend.freeFormFields).filter(f => f !== "__name__");

View file

@ -34,6 +34,7 @@
} }
&__marker { &__marker {
position: relative;
width: 14px; width: 14px;
height: 14px; height: 14px;
box-sizing: border-box; box-sizing: border-box;
@ -59,10 +60,29 @@
} }
} }
&-values { &-stats {
grid-column: 2; grid-column: 2;
display: flex; display: flex;
align-items: center; align-items: center;
gap: $padding-small; gap: $padding-small;
&-row {
display: flex;
align-items: center;
justify-content: flex-start;
&:not(:last-child) {
padding-right: $padding-global;
}
&__key {
line-height: 1;
color: $color-text-secondary;
margin-right: calc($padding-small / 2);
}
&__value {
}
}
} }
} }

View file

@ -1,26 +1,33 @@
import React, { FC, useCallback, useEffect, useRef, useState } from "preact/compat"; import React, { FC, useEffect, useRef, useState } from "preact/compat";
import uPlot, { import uPlot, {
AlignedData as uPlotData, AlignedData as uPlotData,
Options as uPlotOptions, Options as uPlotOptions,
Series as uPlotSeries, Series as uPlotSeries,
} from "uplot"; } from "uplot";
import { defaultOptions } from "../../../../utils/uplot/helpers"; import {
import { dragChart } from "../../../../utils/uplot/events"; getDefaultOptions,
import { getAxes } from "../../../../utils/uplot/axes"; addSeries,
delSeries,
getRangeX,
getRangeY,
getScales,
handleDestroy,
getAxes,
setSelect
} from "../../../../utils/uplot";
import { MetricResult } from "../../../../api/types"; import { MetricResult } from "../../../../api/types";
import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../../utils/time";
import { TimeParams } from "../../../../types"; import { TimeParams } from "../../../../types";
import { YaxisState } from "../../../../state/graph/reducer"; import { YaxisState } from "../../../../state/graph/reducer";
import "uplot/dist/uPlot.min.css"; import "uplot/dist/uPlot.min.css";
import "./style.scss"; import "./style.scss";
import classNames from "classnames"; import classNames from "classnames";
import ChartTooltip, { ChartTooltipProps } from "../ChartTooltip/ChartTooltip";
import dayjs from "dayjs";
import { useAppState } from "../../../../state/common/StateContext"; import { useAppState } from "../../../../state/common/StateContext";
import { SeriesItem } from "../../../../utils/uplot/series";
import { ElementSize } from "../../../../hooks/useElementSize"; import { ElementSize } from "../../../../hooks/useElementSize";
import useEventListener from "../../../../hooks/useEventListener"; import useReadyChart from "../../../../hooks/uplot/useReadyChart";
import { getRangeX, getRangeY, getScales } from "../../../../utils/uplot/scales"; import useZoomChart from "../../../../hooks/uplot/useZoomChart";
import usePlotScale from "../../../../hooks/uplot/usePlotScale";
import useLineTooltip from "../../../../hooks/uplot/useLineTooltip";
import ChartTooltipWrapper from "../../ChartTooltip";
export interface LineChartProps { export interface LineChartProps {
metrics: MetricResult[]; metrics: MetricResult[];
@ -29,7 +36,7 @@ export interface LineChartProps {
yaxis: YaxisState; yaxis: YaxisState;
series: uPlotSeries[]; series: uPlotSeries[];
unit?: string; unit?: string;
setPeriod: ({ from, to }: {from: Date, to: Date}) => void; setPeriod: ({ from, to }: { from: Date, to: Date }) => void;
layoutSize: ElementSize; layoutSize: ElementSize;
height?: number; height?: number;
} }
@ -48,208 +55,41 @@ const LineChart: FC<LineChartProps> = ({
const { isDarkTheme } = useAppState(); const { isDarkTheme } = useAppState();
const uPlotRef = useRef<HTMLDivElement>(null); const uPlotRef = useRef<HTMLDivElement>(null);
const [isPanning, setPanning] = useState(false);
const [xRange, setXRange] = useState({ min: period.start, max: period.end });
const [uPlotInst, setUPlotInst] = useState<uPlot>(); const [uPlotInst, setUPlotInst] = useState<uPlot>();
const [startTouchDistance, setStartTouchDistance] = useState(0);
const [showTooltip, setShowTooltip] = useState(false); const { xRange, setPlotScale } = usePlotScale({ period, setPeriod });
const [tooltipIdx, setTooltipIdx] = useState({ seriesIdx: -1, dataIdx: -1 }); const { onReadyChart, isPanning } = useReadyChart(setPlotScale);
const [tooltipOffset, setTooltipOffset] = useState({ left: 0, top: 0 }); useZoomChart({ uPlotInst, xRange, setPlotScale });
const [stickyTooltips, setStickyToolTips] = useState<ChartTooltipProps[]>([]); const {
showTooltip,
const setPlotScale = ({ min, max }: { min: number, max: number }) => { stickyTooltips,
const delta = (max - min) * 1000; handleUnStick,
if ((delta < limitsDurations.min) || (delta > limitsDurations.max)) return; getTooltipProps,
setXRange({ min, max }); seriesFocus,
setPeriod({ setCursor,
from: dayjs(min * 1000).toDate(), resetTooltips
to: dayjs(max * 1000).toDate() } = useLineTooltip({ u: uPlotInst, metrics, series, unit });
});
};
const onReadyChart = (u: uPlot): void => {
const factor = 0.9;
setTooltipOffset({
left: parseFloat(u.over.style.left),
top: parseFloat(u.over.style.top)
});
u.over.addEventListener("mousedown", e => {
const { ctrlKey, metaKey, button } = e;
const leftClick = button === 0;
const leftClickWithMeta = leftClick && (ctrlKey || metaKey);
if (leftClickWithMeta) {
// drag pan
dragChart({ u, e, setPanning, setPlotScale, factor });
}
});
u.over.addEventListener("touchstart", e => {
dragChart({ u, e, setPanning, setPlotScale, factor });
});
u.over.addEventListener("wheel", e => {
if (!e.ctrlKey && !e.metaKey) return;
e.preventDefault();
const { width } = u.over.getBoundingClientRect();
const zoomPos = u.cursor.left && u.cursor.left > 0 ? u.cursor.left : 0;
const xVal = u.posToVal(zoomPos, "x");
const oxRange = (u.scales.x.max || 0) - (u.scales.x.min || 0);
const nxRange = e.deltaY < 0 ? oxRange * factor : oxRange / factor;
const min = xVal - (zoomPos / width) * nxRange;
const max = min + nxRange;
u.batch(() => setPlotScale({ min, max }));
});
};
const handleKeyDown = useCallback((e: KeyboardEvent) => {
const { target, ctrlKey, metaKey, key } = e;
const isInput = target instanceof HTMLInputElement || target instanceof HTMLTextAreaElement;
if (!uPlotInst || isInput) return;
const minus = key === "-";
const plus = key === "+" || key === "=";
if ((minus || plus) && !(ctrlKey || metaKey)) {
e.preventDefault();
const factor = (xRange.max - xRange.min) / 10 * (plus ? 1 : -1);
setPlotScale({
min: xRange.min + factor,
max: xRange.max - factor
});
}
}, [uPlotInst, xRange]);
const getChartProps = useCallback(() => {
const { seriesIdx, dataIdx } = tooltipIdx;
const id = `${seriesIdx}_${dataIdx}`;
const metricItem = metrics[seriesIdx-1];
const seriesItem = series[seriesIdx] as SeriesItem;
const groups = new Set(metrics.map(m => m.group));
const showQueryNum = groups.size > 1;
return {
id,
unit,
seriesItem,
metricItem,
tooltipIdx,
tooltipOffset,
showQueryNum,
};
}, [uPlotInst, metrics, series, tooltipIdx, tooltipOffset, unit]);
const handleClick = useCallback(() => {
if (!showTooltip) return;
const props = getChartProps();
if (!stickyTooltips.find(t => t.id === props.id)) {
setStickyToolTips(prev => [...prev, props as ChartTooltipProps]);
}
}, [getChartProps, stickyTooltips, showTooltip]);
const handleUnStick = (id: string) => {
setStickyToolTips(prev => prev.filter(t => t.id !== id));
};
const setCursor = (u: uPlot) => {
const dataIdx = u.cursor.idx ?? -1;
setTooltipIdx(prev => ({ ...prev, dataIdx }));
};
const seriesFocus = (u: uPlot, sidx: (number | null)) => {
const seriesIdx = sidx ?? -1;
setTooltipIdx(prev => ({ ...prev, seriesIdx }));
};
const addSeries = (u: uPlot, series: uPlotSeries[]) => {
series.forEach((s) => {
u.addSeries(s);
});
};
const delSeries = (u: uPlot) => {
for (let i = u.series.length - 1; i >= 0; i--) {
u.delSeries(i);
}
};
const delHooks = (u: uPlot) => {
Object.keys(u.hooks).forEach(hook => {
u.hooks[hook as keyof uPlot.Hooks.Arrays] = [];
});
};
const handleDestroy = (u: uPlot) => {
delSeries(u);
delHooks(u);
u.setData([]);
};
const setSelect = (u: uPlot) => {
const min = u.posToVal(u.select.left, "x");
const max = u.posToVal(u.select.left + u.select.width, "x");
setPlotScale({ min, max });
};
const options: uPlotOptions = { const options: uPlotOptions = {
...defaultOptions, ...getDefaultOptions({ width: layoutSize.width, height }),
tzDate: ts => dayjs(formatDateForNativeInput(dateFromSeconds(ts))).local().toDate(),
series, series,
axes: getAxes( [{}, { scale: "1" }], unit), axes: getAxes([{}, { scale: "1" }], unit),
scales: getScales(yaxis, xRange), scales: getScales(yaxis, xRange),
width: layoutSize.width || 400,
height: height || 500,
hooks: { hooks: {
ready: [onReadyChart], ready: [onReadyChart],
setSeries: [seriesFocus], setSeries: [seriesFocus],
setCursor: [setCursor], setCursor: [setCursor],
setSelect: [setSelect], setSelect: [setSelect(setPlotScale)],
destroy: [handleDestroy], destroy: [handleDestroy],
}, },
}; };
const handleTouchStart = (e: TouchEvent) => {
if (e.touches.length !== 2) return;
e.preventDefault();
const dx = e.touches[0].clientX - e.touches[1].clientX;
const dy = e.touches[0].clientY - e.touches[1].clientY;
setStartTouchDistance(Math.sqrt(dx * dx + dy * dy));
};
const handleTouchMove = useCallback((e: TouchEvent) => {
if (e.touches.length !== 2 || !uPlotInst) return;
e.preventDefault();
const dx = e.touches[0].clientX - e.touches[1].clientX;
const dy = e.touches[0].clientY - e.touches[1].clientY;
const endTouchDistance = Math.sqrt(dx * dx + dy * dy);
const diffDistance = startTouchDistance - endTouchDistance;
const max = (uPlotInst.scales.x.max || xRange.max);
const min = (uPlotInst.scales.x.min || xRange.min);
const dur = max - min;
const dir = (diffDistance > 0 ? -1 : 1);
const zoomFactor = dur / 50 * dir;
uPlotInst.batch(() => setPlotScale({
min: min + zoomFactor,
max: max - zoomFactor
}));
}, [uPlotInst, startTouchDistance, xRange]);
useEffect(() => { useEffect(() => {
setXRange({ min: period.start, max: period.end }); resetTooltips();
}, [period]);
useEffect(() => {
setStickyToolTips([]);
setTooltipIdx({ seriesIdx: -1, dataIdx: -1 });
if (!uPlotRef.current) return; if (!uPlotRef.current) return;
if (uPlotInst) uPlotInst.destroy(); if (uPlotInst) uPlotInst.destroy();
const u = new uPlot(options, data, uPlotRef.current); const u = new uPlot(options, data, uPlotRef.current);
setUPlotInst(u); setUPlotInst(u);
setXRange({ min: period.start, max: period.end });
return u.destroy; return u.destroy;
}, [uPlotRef, isDarkTheme]); }, [uPlotRef, isDarkTheme]);
@ -287,15 +127,6 @@ const LineChart: FC<LineChartProps> = ({
uPlotInst.redraw(); uPlotInst.redraw();
}, [height, layoutSize]); }, [height, layoutSize]);
useEffect(() => {
setShowTooltip(tooltipIdx.dataIdx !== -1 && tooltipIdx.seriesIdx !== -1);
}, [tooltipIdx]);
useEventListener("click", handleClick);
useEventListener("keydown", handleKeyDown);
useEventListener("touchmove", handleTouchMove);
useEventListener("touchstart", handleTouchStart);
return ( return (
<div <div
className={classNames({ className={classNames({
@ -311,22 +142,12 @@ const LineChart: FC<LineChartProps> = ({
className="vm-line-chart__u-plot" className="vm-line-chart__u-plot"
ref={uPlotRef} ref={uPlotRef}
/> />
{uPlotInst && showTooltip && ( <ChartTooltipWrapper
<ChartTooltip showTooltip={showTooltip}
{...getChartProps()} tooltipProps={getTooltipProps()}
u={uPlotInst} stickyTooltips={stickyTooltips}
/> handleUnStick={handleUnStick}
)} />
{uPlotInst && stickyTooltips.map(t => (
<ChartTooltip
{...t}
isSticky
u={uPlotInst}
key={t.id}
onClose={handleUnStick}
/>
))}
</div> </div>
); );
}; };

View file

@ -76,6 +76,7 @@ const AdditionalSettings: FC = () => {
variant="outlined" variant="outlined"
startIcon={<TuneIcon/>} startIcon={<TuneIcon/>}
onClick={handleToggleList} onClick={handleToggleList}
ariaLabel="additional the query settings"
/> />
</div> </div>
<Popper <Popper

Some files were not shown because too many files have changed in this diff Show more