mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-01 14:47:38 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
8561bb48fd
53 changed files with 1572 additions and 649 deletions
32
.github/ISSUE_TEMPLATE/question.yml
vendored
Normal file
32
.github/ISSUE_TEMPLATE/question.yml
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
name: Question
|
||||||
|
description: Ask a question regarding VictoriaMetrics or its components
|
||||||
|
labels: [question]
|
||||||
|
body:
|
||||||
|
- type: textarea
|
||||||
|
id: describe-the-component
|
||||||
|
attributes:
|
||||||
|
label: Is your question request related to a specific component?
|
||||||
|
placeholder: |
|
||||||
|
VictoriaMetrics, vmagent, vmalert, vmui, etc...
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: describe-the-question
|
||||||
|
attributes:
|
||||||
|
label: Describe the question in detail
|
||||||
|
description: |
|
||||||
|
A clear and concise description of the issue and the question.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: checkboxes
|
||||||
|
id: troubleshooting
|
||||||
|
attributes:
|
||||||
|
label: Troubleshooting docs
|
||||||
|
description: I am familiar with the following troubleshooting docs
|
||||||
|
options:
|
||||||
|
- label: General - https://docs.victoriametrics.com/Troubleshooting.html
|
||||||
|
required: false
|
||||||
|
- label: vmagent - https://docs.victoriametrics.com/vmagent.html#troubleshooting
|
||||||
|
required: false
|
||||||
|
- label: vmalert - https://docs.victoriametrics.com/vmalert.html#troubleshooting
|
||||||
|
required: false
|
28
README.md
28
README.md
|
@ -126,11 +126,22 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
|
||||||
|
|
||||||
## Operation
|
## Operation
|
||||||
|
|
||||||
### How to start VictoriaMetrics
|
### Install
|
||||||
|
|
||||||
Just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
To quickly try VictoriaMetrics, just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
||||||
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
|
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
|
||||||
|
|
||||||
|
VictoriaMetrics can also be installed via these installation methods:
|
||||||
|
|
||||||
|
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
||||||
|
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
||||||
|
* [Ansible role for installing cluster VictoriaMetrics (by VictoriaMetrics)](https://github.com/VictoriaMetrics/ansible-playbooks).
|
||||||
|
* [Ansible role for installing cluster VictoriaMetrics (by community)](https://github.com/Slapper/ansible-victoriametrics-cluster-role).
|
||||||
|
* [Ansible role for installing single-node VictoriaMetrics (by community)](https://github.com/dreamteam-gg/ansible-victoriametrics-role).
|
||||||
|
* [Snap package for VictoriaMetrics](https://snapcraft.io/victoriametrics).
|
||||||
|
|
||||||
|
### How to start VictoriaMetrics
|
||||||
|
|
||||||
The following command-line flags are used the most:
|
The following command-line flags are used the most:
|
||||||
|
|
||||||
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||||
|
@ -2039,17 +2050,10 @@ It is safe sharing the collected profiles from security point of view, since the
|
||||||
|
|
||||||
## Integrations
|
## Integrations
|
||||||
|
|
||||||
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
|
||||||
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
|
||||||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
|
||||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
|
||||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||||
See [this example](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml).
|
See [this example](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml).
|
||||||
* [Ansible role for installing cluster VictoriaMetrics (by VictoriaMetrics)](https://github.com/VictoriaMetrics/ansible-playbooks).
|
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||||
* [Ansible role for installing cluster VictoriaMetrics (by community)](https://github.com/Slapper/ansible-victoriametrics-cluster-role).
|
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||||
* [Ansible role for installing single-node VictoriaMetrics (by community)](https://github.com/dreamteam-gg/ansible-victoriametrics-role).
|
|
||||||
|
|
||||||
* [Snap package for VictoriaMetrics](https://snapcraft.io/victoriametrics).
|
|
||||||
* [vmalert-cli](https://github.com/aorfanos/vmalert-cli) - a CLI application for managing [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
* [vmalert-cli](https://github.com/aorfanos/vmalert-cli) - a CLI application for managing [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
||||||
|
|
||||||
## Third-party contributions
|
## Third-party contributions
|
||||||
|
@ -2367,7 +2371,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.seriesLimitPerTarget int
|
-promscrape.seriesLimitPerTarget int
|
||||||
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
||||||
-promscrape.streamParse
|
-promscrape.streamParse
|
||||||
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
||||||
-promscrape.suppressDuplicateScrapeTargetErrors
|
-promscrape.suppressDuplicateScrapeTargetErrors
|
||||||
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
||||||
-promscrape.suppressScrapeErrors
|
-promscrape.suppressScrapeErrors
|
||||||
|
|
|
@ -26,7 +26,8 @@ import (
|
||||||
var (
|
var (
|
||||||
httpListenAddr = flag.String("httpListenAddr", ":8428", "TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
httpListenAddr = flag.String("httpListenAddr", ":8428", "TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
||||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||||
|
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||||
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
|
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
|
||||||
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
|
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
|
||||||
dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+
|
dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+
|
||||||
|
@ -61,7 +62,7 @@ func main() {
|
||||||
if err := promscrape.CheckConfig(); err != nil {
|
if err := promscrape.CheckConfig(); err != nil {
|
||||||
logger.Fatalf("error when checking -promscrape.config: %s", err)
|
logger.Fatalf("error when checking -promscrape.config: %s", err)
|
||||||
}
|
}
|
||||||
logger.Infof("-promscrape.config is ok; exitting with 0 status code")
|
logger.Infof("-promscrape.config is ok; exiting with 0 status code")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1398,7 +1398,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||||
-promscrape.seriesLimitPerTarget int
|
-promscrape.seriesLimitPerTarget int
|
||||||
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
||||||
-promscrape.streamParse
|
-promscrape.streamParse
|
||||||
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
||||||
-promscrape.suppressDuplicateScrapeTargetErrors
|
-promscrape.suppressDuplicateScrapeTargetErrors
|
||||||
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
||||||
-promscrape.suppressScrapeErrors
|
-promscrape.suppressScrapeErrors
|
||||||
|
|
|
@ -46,7 +46,8 @@ var (
|
||||||
"Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. "+
|
"Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. "+
|
||||||
"Note that /targets and /metrics pages aren't available if -httpListenAddr=''. See also -httpListenAddr.useProxyProtocol")
|
"Note that /targets and /metrics pages aren't available if -httpListenAddr=''. See also -httpListenAddr.useProxyProtocol")
|
||||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||||
|
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||||
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for InfluxDB line protocol data. Usually :8089 must be set. Doesn't work if empty. "+
|
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for InfluxDB line protocol data. Usually :8089 must be set. Doesn't work if empty. "+
|
||||||
"This flag isn't needed when ingesting data over HTTP - just send it to http://<vmagent>:8429/write . "+
|
"This flag isn't needed when ingesting data over HTTP - just send it to http://<vmagent>:8429/write . "+
|
||||||
"See also -influxListenAddr.useProxyProtocol")
|
"See also -influxListenAddr.useProxyProtocol")
|
||||||
|
@ -98,7 +99,7 @@ func main() {
|
||||||
if err := promscrape.CheckConfig(); err != nil {
|
if err := promscrape.CheckConfig(); err != nil {
|
||||||
logger.Fatalf("error when checking -promscrape.config: %s", err)
|
logger.Fatalf("error when checking -promscrape.config: %s", err)
|
||||||
}
|
}
|
||||||
logger.Infof("-promscrape.config is ok; exitting with 0 status code")
|
logger.Infof("-promscrape.config is ok; exiting with 0 status code")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if *dryRun {
|
if *dryRun {
|
||||||
|
@ -108,7 +109,7 @@ func main() {
|
||||||
if err := promscrape.CheckConfig(); err != nil {
|
if err := promscrape.CheckConfig(); err != nil {
|
||||||
logger.Fatalf("error when checking -promscrape.config: %s", err)
|
logger.Fatalf("error when checking -promscrape.config: %s", err)
|
||||||
}
|
}
|
||||||
logger.Infof("all the configs are ok; exitting with 0 status code")
|
logger.Infof("all the configs are ok; exiting with 0 status code")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -147,10 +147,6 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
||||||
}
|
}
|
||||||
c.sendBlock = c.sendBlockHTTP
|
c.sendBlock = c.sendBlockHTTP
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
|
|
||||||
useVMProto := forceVMProto.GetOptionalArg(argIdx)
|
useVMProto := forceVMProto.GetOptionalArg(argIdx)
|
||||||
usePromProto := forcePromProto.GetOptionalArg(argIdx)
|
usePromProto := forcePromProto.GetOptionalArg(argIdx)
|
||||||
if useVMProto && usePromProto {
|
if useVMProto && usePromProto {
|
||||||
|
@ -169,6 +165,10 @@ func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
|
||||||
}
|
}
|
||||||
c.useVMProto = useVMProto
|
c.useVMProto = useVMProto
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
|
||||||
if bytesPerSec := rateLimit.GetOptionalArgOrDefault(argIdx, 0); bytesPerSec > 0 {
|
if bytesPerSec := rateLimit.GetOptionalArgOrDefault(argIdx, 0); bytesPerSec > 0 {
|
||||||
logger.Infof("applying %d bytes per second rate limit for -remoteWrite.url=%q", bytesPerSec, sanitizedURL)
|
logger.Infof("applying %d bytes per second rate limit for -remoteWrite.url=%q", bytesPerSec, sanitizedURL)
|
||||||
c.rl.perSecondLimit = int64(bytesPerSec)
|
c.rl.perSecondLimit = int64(bytesPerSec)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -44,6 +45,9 @@ type Group struct {
|
||||||
// channel accepts new Group obj
|
// channel accepts new Group obj
|
||||||
// which supposed to update current group
|
// which supposed to update current group
|
||||||
updateCh chan *Group
|
updateCh chan *Group
|
||||||
|
// evalCancel stores the cancel fn for interrupting
|
||||||
|
// rules evaluation. Used on groups update() and close().
|
||||||
|
evalCancel context.CancelFunc
|
||||||
|
|
||||||
metrics *groupMetrics
|
metrics *groupMetrics
|
||||||
}
|
}
|
||||||
|
@ -233,11 +237,24 @@ func (g *Group) updateWith(newGroup *Group) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// interruptEval interrupts in-flight rules evaluations
|
||||||
|
// within the group. It is expected that g.evalCancel
|
||||||
|
// will be repopulated after the call.
|
||||||
|
func (g *Group) interruptEval() {
|
||||||
|
g.mu.RLock()
|
||||||
|
defer g.mu.RUnlock()
|
||||||
|
|
||||||
|
if g.evalCancel != nil {
|
||||||
|
g.evalCancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (g *Group) close() {
|
func (g *Group) close() {
|
||||||
if g.doneCh == nil {
|
if g.doneCh == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
close(g.doneCh)
|
close(g.doneCh)
|
||||||
|
g.interruptEval()
|
||||||
<-g.finishedCh
|
<-g.finishedCh
|
||||||
|
|
||||||
g.metrics.iterationDuration.Unregister()
|
g.metrics.iterationDuration.Unregister()
|
||||||
|
@ -254,6 +271,26 @@ var skipRandSleepOnGroupStart bool
|
||||||
func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *remotewrite.Client, rr datasource.QuerierBuilder) {
|
func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *remotewrite.Client, rr datasource.QuerierBuilder) {
|
||||||
defer func() { close(g.finishedCh) }()
|
defer func() { close(g.finishedCh) }()
|
||||||
|
|
||||||
|
// Spread group rules evaluation over time in order to reduce load on VictoriaMetrics.
|
||||||
|
if !skipRandSleepOnGroupStart {
|
||||||
|
randSleep := uint64(float64(g.Interval) * (float64(g.ID()) / (1 << 64)))
|
||||||
|
sleepOffset := uint64(time.Now().UnixNano()) % uint64(g.Interval)
|
||||||
|
if randSleep < sleepOffset {
|
||||||
|
randSleep += uint64(g.Interval)
|
||||||
|
}
|
||||||
|
randSleep -= sleepOffset
|
||||||
|
sleepTimer := time.NewTimer(time.Duration(randSleep))
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
sleepTimer.Stop()
|
||||||
|
return
|
||||||
|
case <-g.doneCh:
|
||||||
|
sleepTimer.Stop()
|
||||||
|
return
|
||||||
|
case <-sleepTimer.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
e := &executor{
|
e := &executor{
|
||||||
rw: rw,
|
rw: rw,
|
||||||
notifiers: nts,
|
notifiers: nts,
|
||||||
|
@ -263,7 +300,7 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
|
||||||
|
|
||||||
logger.Infof("group %q started; interval=%v; concurrency=%d", g.Name, g.Interval, g.Concurrency)
|
logger.Infof("group %q started; interval=%v; concurrency=%d", g.Name, g.Interval, g.Concurrency)
|
||||||
|
|
||||||
eval := func(ts time.Time) {
|
eval := func(ctx context.Context, ts time.Time) {
|
||||||
g.metrics.iterationTotal.Inc()
|
g.metrics.iterationTotal.Inc()
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
@ -285,7 +322,13 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
|
||||||
g.LastEvaluation = start
|
g.LastEvaluation = start
|
||||||
}
|
}
|
||||||
|
|
||||||
eval(evalTS)
|
evalCtx, cancel := context.WithCancel(ctx)
|
||||||
|
g.mu.Lock()
|
||||||
|
g.evalCancel = cancel
|
||||||
|
g.mu.Unlock()
|
||||||
|
defer g.evalCancel()
|
||||||
|
|
||||||
|
eval(evalCtx, evalTS)
|
||||||
|
|
||||||
t := time.NewTicker(g.Interval)
|
t := time.NewTicker(g.Interval)
|
||||||
defer t.Stop()
|
defer t.Stop()
|
||||||
|
@ -309,6 +352,14 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
|
||||||
return
|
return
|
||||||
case ng := <-g.updateCh:
|
case ng := <-g.updateCh:
|
||||||
g.mu.Lock()
|
g.mu.Lock()
|
||||||
|
|
||||||
|
// it is expected that g.evalCancel will be evoked
|
||||||
|
// somewhere else to unblock group from the rules evaluation.
|
||||||
|
// we recreate the evalCtx and g.evalCancel, so it can
|
||||||
|
// be called again.
|
||||||
|
evalCtx, cancel = context.WithCancel(ctx)
|
||||||
|
g.evalCancel = cancel
|
||||||
|
|
||||||
err := g.updateWith(ng)
|
err := g.updateWith(ng)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("group %q: failed to update: %s", g.Name, err)
|
logger.Errorf("group %q: failed to update: %s", g.Name, err)
|
||||||
|
@ -333,7 +384,7 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
|
||||||
}
|
}
|
||||||
evalTS = evalTS.Add((missed + 1) * g.Interval)
|
evalTS = evalTS.Add((missed + 1) * g.Interval)
|
||||||
|
|
||||||
eval(evalTS)
|
eval(evalCtx, evalTS)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -407,6 +458,11 @@ func (e *executor) exec(ctx context.Context, rule Rule, ts time.Time, resolveDur
|
||||||
|
|
||||||
tss, err := rule.Exec(ctx, ts, limit)
|
tss, err := rule.Exec(ctx, ts, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
// the context can be cancelled on graceful shutdown
|
||||||
|
// or on group update. So no need to handle the error as usual.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
execErrors.Inc()
|
execErrors.Inc()
|
||||||
return fmt.Errorf("rule %q: failed to execute: %w", rule, err)
|
return fmt.Errorf("rule %q: failed to execute: %w", rule, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -474,3 +474,31 @@ func TestFaultyRW(t *testing.T) {
|
||||||
t.Fatalf("expected to get an error from faulty RW client, got nil instead")
|
t.Fatalf("expected to get an error from faulty RW client, got nil instead")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCloseWithEvalInterruption(t *testing.T) {
|
||||||
|
groups, err := config.Parse([]string{"config/testdata/rules/rules1-good.rules"}, notifier.ValidateTemplates, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to parse rules: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
const delay = time.Second * 2
|
||||||
|
fq := &fakeQuerierWithDelay{delay: delay}
|
||||||
|
|
||||||
|
const evalInterval = time.Millisecond
|
||||||
|
g := newGroup(groups[0], fq, evalInterval, nil)
|
||||||
|
|
||||||
|
go g.start(context.Background(), nil, nil, nil)
|
||||||
|
|
||||||
|
time.Sleep(evalInterval * 20)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
g.close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
deadline := time.Tick(delay / 2)
|
||||||
|
select {
|
||||||
|
case <-deadline:
|
||||||
|
t.Fatalf("deadline for close exceeded")
|
||||||
|
case <-g.finishedCh:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -104,6 +104,24 @@ func (fqr *fakeQuerierWithRegistry) Query(_ context.Context, expr string, _ time
|
||||||
return cp, req, nil
|
return cp, req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fakeQuerierWithDelay struct {
|
||||||
|
fakeQuerier
|
||||||
|
delay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fqd *fakeQuerierWithDelay) Query(ctx context.Context, expr string, ts time.Time) ([]datasource.Metric, *http.Request, error) {
|
||||||
|
timer := time.NewTimer(fqd.delay)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-timer.C:
|
||||||
|
}
|
||||||
|
return fqd.fakeQuerier.Query(ctx, expr, ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fqd *fakeQuerierWithDelay) BuildWithParams(_ datasource.QuerierParams) datasource.Querier {
|
||||||
|
return fqd
|
||||||
|
}
|
||||||
|
|
||||||
type fakeNotifier struct {
|
type fakeNotifier struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
alerts []notifier.Alert
|
alerts []notifier.Alert
|
||||||
|
|
|
@ -57,7 +57,8 @@ absolute path to all .tpl files in root.`)
|
||||||
|
|
||||||
httpListenAddr = flag.String("httpListenAddr", ":8880", "Address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
httpListenAddr = flag.String("httpListenAddr", ":8880", "Address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
||||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||||
|
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||||
evaluationInterval = flag.Duration("evaluationInterval", time.Minute, "How often to evaluate the rules")
|
evaluationInterval = flag.Duration("evaluationInterval", time.Minute, "How often to evaluate the rules")
|
||||||
|
|
||||||
validateTemplates = flag.Bool("rule.validateTemplates", true, "Whether to validate annotation and label templates")
|
validateTemplates = flag.Bool("rule.validateTemplates", true, "Whether to validate annotation and label templates")
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||||
|
@ -87,32 +86,12 @@ func (m *manager) startGroup(ctx context.Context, g *Group, restore bool) error
|
||||||
m.wg.Add(1)
|
m.wg.Add(1)
|
||||||
id := g.ID()
|
id := g.ID()
|
||||||
go func() {
|
go func() {
|
||||||
// Spread group rules evaluation over time in order to reduce load on VictoriaMetrics.
|
defer m.wg.Done()
|
||||||
if !skipRandSleepOnGroupStart {
|
|
||||||
randSleep := uint64(float64(g.Interval) * (float64(g.ID()) / (1 << 64)))
|
|
||||||
sleepOffset := uint64(time.Now().UnixNano()) % uint64(g.Interval)
|
|
||||||
if randSleep < sleepOffset {
|
|
||||||
randSleep += uint64(g.Interval)
|
|
||||||
}
|
|
||||||
randSleep -= sleepOffset
|
|
||||||
sleepTimer := time.NewTimer(time.Duration(randSleep))
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
sleepTimer.Stop()
|
|
||||||
return
|
|
||||||
case <-g.doneCh:
|
|
||||||
sleepTimer.Stop()
|
|
||||||
return
|
|
||||||
case <-sleepTimer.C:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if restore {
|
if restore {
|
||||||
g.start(ctx, m.notifiers, m.rw, m.rr)
|
g.start(ctx, m.notifiers, m.rw, m.rr)
|
||||||
} else {
|
} else {
|
||||||
g.start(ctx, m.notifiers, m.rw, nil)
|
g.start(ctx, m.notifiers, m.rw, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.wg.Done()
|
|
||||||
}()
|
}()
|
||||||
m.groups[id] = g
|
m.groups[id] = g
|
||||||
return nil
|
return nil
|
||||||
|
@ -168,6 +147,7 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
|
||||||
}
|
}
|
||||||
for _, ng := range groupsRegistry {
|
for _, ng := range groupsRegistry {
|
||||||
if err := m.startGroup(ctx, ng, restore); err != nil {
|
if err := m.startGroup(ctx, ng, restore); err != nil {
|
||||||
|
m.groupsMu.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -181,6 +161,7 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
|
||||||
old.updateCh <- new
|
old.updateCh <- new
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(item.old, item.new)
|
}(item.old, item.new)
|
||||||
|
item.old.interruptEval()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,8 @@ import (
|
||||||
var (
|
var (
|
||||||
httpListenAddr = flag.String("httpListenAddr", ":8427", "TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
httpListenAddr = flag.String("httpListenAddr", ":8427", "TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
||||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||||
|
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||||
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host. "+
|
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host. "+
|
||||||
"See also -maxConcurrentRequests")
|
"See also -maxConcurrentRequests")
|
||||||
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
|
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
|
||||||
|
|
|
@ -483,6 +483,10 @@ Processing ranges: 8798 / 8798 [████████████████
|
||||||
2022/10/19 16:45:37 Total time: 1m19.406283424s
|
2022/10/19 16:45:37 Total time: 1m19.406283424s
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Migrating big volumes of data may result in remote read client reaching the timeout.
|
||||||
|
Consider increasing the value of `--remote-read-http-timeout` (default `5m`) command-line flag when seeing
|
||||||
|
timeouts or `context canceled` errors.
|
||||||
|
|
||||||
### Filtering
|
### Filtering
|
||||||
|
|
||||||
The filtering consists of two parts: by labels and time.
|
The filtering consists of two parts: by labels and time.
|
||||||
|
@ -733,21 +737,33 @@ or higher.
|
||||||
|
|
||||||
See `./vmctl vm-native --help` for details and full list of flags.
|
See `./vmctl vm-native --help` for details and full list of flags.
|
||||||
|
|
||||||
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
|
Migration in `vm-native` mode takes two steps:
|
||||||
and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be
|
1. Explore the list of the metrics to migrate via `/api/v1/series` API;
|
||||||
processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes:
|
2. Migrate explored metrics one-by-one.
|
||||||
|
|
||||||
```
|
```
|
||||||
./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \
|
./vmctl vm-native \
|
||||||
|
--vm-native-src-addr=http://127.0.0.1:8481/select/0/prometheus \
|
||||||
--vm-native-dst-addr=http://localhost:8428 \
|
--vm-native-dst-addr=http://localhost:8428 \
|
||||||
--vm-native-filter-match='{job="vmagent"}' \
|
--vm-native-filter-time-start='2022-11-20T00:00:00Z' \
|
||||||
--vm-native-filter-time-start='2020-01-01T20:07:00Z'
|
--vm-native-filter-match='{__name__=~"vm_cache_.*"}'
|
||||||
VictoriaMetrics Native import mode
|
VictoriaMetrics Native import mode
|
||||||
Initing export pipe from "http://localhost:8528" with filters:
|
|
||||||
filter: match[]={job="vmagent"}
|
2023/03/02 09:22:02 Initing import process from "http://127.0.0.1:8481/select/0/prometheus/api/v1/export/native" to "http://localhost:8428/api/v1/import/native" with filter
|
||||||
Initing import process to "http://localhost:8428":
|
filter: match[]={__name__=~"vm_cache_.*"}
|
||||||
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
start: 2022-11-20T00:00:00Z
|
||||||
2020/10/13 17:04:59 Total time: 952.143376ms
|
2023/03/02 09:22:02 Exploring metrics...
|
||||||
|
Found 9 metrics to import. Continue? [Y/n]
|
||||||
|
2023/03/02 09:22:04 Requests to make: 9
|
||||||
|
Requests to make: 9 / 9 [███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
|
||||||
|
2023/03/02 09:22:06 Import finished!
|
||||||
|
2023/03/02 09:22:06 VictoriaMetrics importer stats:
|
||||||
|
time spent while importing: 3.632638875s;
|
||||||
|
total bytes: 7.8 MB;
|
||||||
|
bytes/s: 2.1 MB;
|
||||||
|
requests: 9;
|
||||||
|
requests retries: 0;
|
||||||
|
2023/03/02 09:22:06 Total time: 3.633127625s
|
||||||
```
|
```
|
||||||
|
|
||||||
Importing tips:
|
Importing tips:
|
||||||
|
@ -755,6 +771,7 @@ Importing tips:
|
||||||
1. Migrating big volumes of data may result in reaching the safety limits on `src` side.
|
1. Migrating big volumes of data may result in reaching the safety limits on `src` side.
|
||||||
Please verify that `-search.maxExportDuration` and `-search.maxExportSeries` were set with
|
Please verify that `-search.maxExportDuration` and `-search.maxExportSeries` were set with
|
||||||
proper values for `src`. If hitting the limits, follow the recommendations [here](https://docs.victoriametrics.com/#how-to-export-data-in-native-format).
|
proper values for `src`. If hitting the limits, follow the recommendations [here](https://docs.victoriametrics.com/#how-to-export-data-in-native-format).
|
||||||
|
If hitting `the number of matching timeseries exceeds...` error, adjust filters to match less time series or update `-search.maxSeries` command-line flag on vmselect/vmsingle;
|
||||||
2. Migrating all the metrics from one VM to another may collide with existing application metrics
|
2. Migrating all the metrics from one VM to another may collide with existing application metrics
|
||||||
(prefixed with `vm_`) at destination and lead to confusion when using
|
(prefixed with `vm_`) at destination and lead to confusion when using
|
||||||
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
|
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
|
||||||
|
@ -766,71 +783,59 @@ Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/
|
||||||
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
and specify `accountID` param.
|
and specify `accountID` param.
|
||||||
6. When migrating large volumes of data it might be useful to use `--vm-native-step-interval` flag to split single process into smaller steps.
|
6. When migrating large volumes of data it might be useful to use `--vm-native-step-interval` flag to split single process into smaller steps.
|
||||||
|
7. `vmctl` supports `--vm-concurrency` which controls the number of concurrent workers that process the input from source query results.
|
||||||
|
Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according
|
||||||
|
to allocated CPU resources of your VictoriaMetrics installation.
|
||||||
|
8. `vmctl` supports `--vm-native-src-headers` and `--vm-native-dst-headers` which defines headers to send with each request
|
||||||
|
to the corresponding source address.
|
||||||
|
|
||||||
|
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
|
||||||
|
and processing is done by "destination" (`dst`). So no extra memory or CPU resources required on `vmctl` side. Only
|
||||||
|
`src` and `dst` resource matter.
|
||||||
|
|
||||||
#### Using time-based chunking of migration
|
#### Using time-based chunking of migration
|
||||||
|
|
||||||
It is possible split migration process into set of smaller batches based on time. This is especially useful when migrating large volumes of data as this adds indication of progress and ability to restore process from certain point in case of failure.
|
It is possible split migration process into set of smaller batches based on time. This is especially useful when
|
||||||
|
migrating large volumes of data as this adds indication of progress and ability to restore process from certain point
|
||||||
|
in case of failure.
|
||||||
|
|
||||||
To use this you need to specify `--vm-native-step-interval` flag. Supported values are: `month`, `day`, `hour`.
|
To use this you need to specify `--vm-native-step-interval` flag. Supported values are: `month`, `day`, `hour`, `minute`.
|
||||||
Note that in order to use this it is required `--vm-native-filter-time-start` to be set to calculate time ranges for export process.
|
Note that in order to use this it is required `--vm-native-filter-time-start` to be set to calculate time ranges for
|
||||||
|
export process.
|
||||||
|
|
||||||
Every range is being processed independently, which means that:
|
Every range is being processed independently, which means that:
|
||||||
- after range processing is finished all data within range is migrated
|
- after range processing is finished all data within range is migrated
|
||||||
- if process fails on one of stages it is guaranteed that data of prior stages is already written, so it is possible to restart process starting from failed range
|
- if process fails on one of stages it is guaranteed that data of prior stages is already written,
|
||||||
|
so it is possible to restart process starting from failed range.
|
||||||
|
|
||||||
It is recommended using the `month` step when migrating the data over multiple months, since the migration with `day` and `hour` steps may take longer time to complete
|
It is recommended using the `month` step when migrating the data over multiple months,
|
||||||
because of additional overhead.
|
since the migration with `day` and `hour` steps may take longer time to complete because of additional overhead.
|
||||||
|
|
||||||
Usage example:
|
Usage example:
|
||||||
```console
|
```console
|
||||||
./vmctl vm-native
|
./vmctl vm-native \
|
||||||
--vm-native-filter-time-start 2022-06-17T00:07:00Z \
|
--vm-native-src-addr=http://127.0.0.1:8481/select/0/prometheus \
|
||||||
--vm-native-filter-time-end 2022-10-03T00:07:00Z \
|
--vm-native-dst-addr=http://localhost:8428 \
|
||||||
--vm-native-src-addr http://localhost:8428 \
|
--vm-native-filter-time-start='2022-11-20T00:00:00Z' \
|
||||||
--vm-native-dst-addr http://localhost:8528 \
|
--vm-native-step-interval=month \
|
||||||
--vm-native-step-interval=month
|
--vm-native-filter-match='{__name__=~"vm_cache_.*"}'
|
||||||
VictoriaMetrics Native import mode
|
VictoriaMetrics Native import mode
|
||||||
2022/08/30 19:48:24 Processing range 1/5: 2022-06-17T00:07:00Z - 2022-06-30T23:59:59Z
|
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
2023/03/02 09:18:05 Initing import process from "http://127.0.0.1:8481/select/0/prometheus/api/v1/export/native" to "http://localhost:8428/api/v1/import/native" with filter
|
||||||
filter: match[]={__name__!=""}
|
filter: match[]={__name__=~"vm_cache_.*"}
|
||||||
start: 2022-06-17T00:07:00Z
|
start: 2022-11-20T00:00:00Z
|
||||||
end: 2022-06-30T23:59:59Z
|
2023/03/02 09:18:05 Exploring metrics...
|
||||||
Initing import process to "http://localhost:8428":
|
Found 9 metrics to import. Continue? [Y/n]
|
||||||
2022/08/30 19:48:24 Import finished!
|
2023/03/02 09:18:07 Selected time range will be split into 5 ranges according to "month" step. Requests to make: 45.
|
||||||
Total: 16 B ↗ Speed: 28.89 KiB p/s
|
Requests to make: 45 / 45 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
|
||||||
2022/08/30 19:48:24 Processing range 2/5: 2022-07-01T00:00:00Z - 2022-07-31T23:59:59Z
|
2023/03/02 09:18:12 Import finished!
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
2023/03/02 09:18:12 VictoriaMetrics importer stats:
|
||||||
filter: match[]={__name__!=""}
|
time spent while importing: 7.111870667s;
|
||||||
start: 2022-07-01T00:00:00Z
|
total bytes: 7.7 MB;
|
||||||
end: 2022-07-31T23:59:59Z
|
bytes/s: 1.1 MB;
|
||||||
Initing import process to "http://localhost:8428":
|
requests: 45;
|
||||||
2022/08/30 19:48:24 Import finished!
|
requests retries: 0;
|
||||||
Total: 16 B ↗ Speed: 164.35 KiB p/s
|
2023/03/02 09:18:12 Total time: 7.112405875s
|
||||||
2022/08/30 19:48:24 Processing range 3/5: 2022-08-01T00:00:00Z - 2022-08-31T23:59:59Z
|
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
start: 2022-08-01T00:00:00Z
|
|
||||||
end: 2022-08-31T23:59:59Z
|
|
||||||
Initing import process to "http://localhost:8428":
|
|
||||||
2022/08/30 19:48:24 Import finished!
|
|
||||||
Total: 16 B ↗ Speed: 191.42 KiB p/s
|
|
||||||
2022/08/30 19:48:24 Processing range 4/5: 2022-09-01T00:00:00Z - 2022-09-30T23:59:59Z
|
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
start: 2022-09-01T00:00:00Z
|
|
||||||
end: 2022-09-30T23:59:59Z
|
|
||||||
Initing import process to "http://localhost:8428":
|
|
||||||
2022/08/30 19:48:24 Import finished!
|
|
||||||
Total: 16 B ↗ Speed: 141.04 KiB p/s
|
|
||||||
2022/08/30 19:48:24 Processing range 5/5: 2022-10-01T00:00:00Z - 2022-10-03T00:07:00Z
|
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
start: 2022-10-01T00:00:00Z
|
|
||||||
end: 2022-10-03T00:07:00Z
|
|
||||||
Initing import process to "http://localhost:8428":
|
|
||||||
2022/08/30 19:48:24 Import finished!
|
|
||||||
Total: 16 B ↗ Speed: 186.32 KiB p/s
|
|
||||||
2022/08/30 19:48:24 Total time: 12.680582ms
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Cluster-to-cluster migration mode
|
#### Cluster-to-cluster migration mode
|
||||||
|
@ -842,70 +847,41 @@ Cluster-to-cluster uses `/admin/tenants` endpoint (available starting from [v1.8
|
||||||
To use this mode you need to set `--vm-intercluster` flag to `true`, `--vm-native-src-addr` flag to 'http://vmselect:8481/' and `--vm-native-dst-addr` value to http://vminsert:8480/:
|
To use this mode you need to set `--vm-intercluster` flag to `true`, `--vm-native-src-addr` flag to 'http://vmselect:8481/' and `--vm-native-dst-addr` value to http://vminsert:8480/:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
./bin/vmctl vm-native --vm-intercluster=true --vm-native-src-addr=http://localhost:8481/ --vm-native-dst-addr=http://172.17.0.3:8480/
|
./vmctl vm-native --vm-native-src-addr=http://127.0.0.1:8481/ \
|
||||||
|
--vm-native-dst-addr=http://127.0.0.1:8480/ \
|
||||||
|
--vm-native-filter-match='{__name__="vm_app_uptime_seconds"}' \
|
||||||
|
--vm-native-filter-time-start='2023-02-01T00:00:00Z' \
|
||||||
|
--vm-native-step-interval=day \
|
||||||
|
--vm-intercluster
|
||||||
VictoriaMetrics Native import mode
|
VictoriaMetrics Native import mode
|
||||||
2022/12/05 21:20:06 Discovered tenants: [123:1 12812919:1 1289198:1 1289:1283 12:1 1:0 1:1 1:1231231 1:1271727 1:12819 1:281 812891298:1]
|
2023/02/28 10:41:42 Discovering tenants...
|
||||||
2022/12/05 21:20:06 Initing export pipe from "http://localhost:8481/select/123:1/prometheus/api/v1/export/native" with filters:
|
2023/02/28 10:41:42 The following tenants were discovered: [0:0 1:0 2:0 3:0 4:0]
|
||||||
filter: match[]={__name__!=""}
|
2023/02/28 10:41:42 Initing import process from "http://127.0.0.1:8481/select/0:0/prometheus/api/v1/export/native" to "http://127.0.0.1:8480/insert/0:0/prometheus/api/v1/import/native" with filter
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/123:1/prometheus/api/v1/import/native":
|
filter: match[]={__name__="vm_app_uptime_seconds"}
|
||||||
Total: 61.13 MiB ↖ Speed: 2.05 MiB p/s
|
start: 2023-02-01T00:00:00Z for tenant 0:0
|
||||||
Total: 61.13 MiB ↗ Speed: 2.30 MiB p/s
|
2023/02/28 10:41:42 Exploring metrics...
|
||||||
2022/12/05 21:20:33 Initing export pipe from "http://localhost:8481/select/12812919:1/prometheus/api/v1/export/native" with filters:
|
2023/02/28 10:41:42 Found 1 metrics to import
|
||||||
filter: match[]={__name__!=""}
|
2023/02/28 10:41:42 Selected time range will be split into 28 ranges according to "day" step.
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/12812919:1/prometheus/api/v1/import/native":
|
Requests to make for tenant 0:0: 28 / 28 [███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
|
||||||
Total: 43.14 MiB ↘ Speed: 1.86 MiB p/s
|
|
||||||
Total: 43.14 MiB ↙ Speed: 2.36 MiB p/s
|
2023/02/28 10:41:45 Initing import process from "http://127.0.0.1:8481/select/1:0/prometheus/api/v1/export/native" to "http://127.0.0.1:8480/insert/1:0/prometheus/api/v1/import/native" with filter
|
||||||
2022/12/05 21:20:51 Initing export pipe from "http://localhost:8481/select/1289198:1/prometheus/api/v1/export/native" with filters:
|
filter: match[]={__name__="vm_app_uptime_seconds"}
|
||||||
filter: match[]={__name__!=""}
|
start: 2023-02-01T00:00:00Z for tenant 1:0
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1289198:1/prometheus/api/v1/import/native":
|
2023/02/28 10:41:45 Exploring metrics...
|
||||||
Total: 16.64 MiB ↗ Speed: 2.66 MiB p/s
|
2023/02/28 10:41:45 Found 1 metrics to import
|
||||||
Total: 16.64 MiB ↘ Speed: 2.19 MiB p/s
|
2023/02/28 10:41:45 Selected time range will be split into 28 ranges according to "day" step. Requests to make: 28
|
||||||
2022/12/05 21:20:59 Initing export pipe from "http://localhost:8481/select/1289:1283/prometheus/api/v1/export/native" with filters:
|
Requests to make for tenant 1:0: 28 / 28 [████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1289:1283/prometheus/api/v1/import/native":
|
...
|
||||||
Total: 43.33 MiB ↙ Speed: 1.94 MiB p/s
|
|
||||||
Total: 43.33 MiB ↖ Speed: 2.35 MiB p/s
|
2023/02/28 10:42:49 Import finished!
|
||||||
2022/12/05 21:21:18 Initing export pipe from "http://localhost:8481/select/12:1/prometheus/api/v1/export/native" with filters:
|
2023/02/28 10:42:49 VictoriaMetrics importer stats:
|
||||||
filter: match[]={__name__!=""}
|
time spent while importing: 1m6.714210417s;
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/12:1/prometheus/api/v1/import/native":
|
total bytes: 39.7 MB;
|
||||||
Total: 63.78 MiB ↙ Speed: 1.96 MiB p/s
|
bytes/s: 594.4 kB;
|
||||||
Total: 63.78 MiB ↖ Speed: 2.28 MiB p/s
|
requests: 140;
|
||||||
2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:0/prometheus/api/v1/export/native" with filters:
|
requests retries: 0;
|
||||||
filter: match[]={__name__!=""}
|
2023/02/28 10:42:49 Total time: 1m7.147971417s
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:0/prometheus/api/v1/import/native":
|
|
||||||
2022/12/05 21:21:46 Import finished!
|
|
||||||
Total: 330 B ↗ Speed: 3.53 MiB p/s
|
|
||||||
2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:1/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:1/prometheus/api/v1/import/native":
|
|
||||||
Total: 63.81 MiB ↙ Speed: 1.96 MiB p/s
|
|
||||||
Total: 63.81 MiB ↖ Speed: 2.28 MiB p/s
|
|
||||||
2022/12/05 21:22:14 Initing export pipe from "http://localhost:8481/select/1:1231231/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:1231231/prometheus/api/v1/import/native":
|
|
||||||
Total: 63.84 MiB ↙ Speed: 1.93 MiB p/s
|
|
||||||
Total: 63.84 MiB ↖ Speed: 2.29 MiB p/s
|
|
||||||
2022/12/05 21:22:42 Initing export pipe from "http://localhost:8481/select/1:1271727/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:1271727/prometheus/api/v1/import/native":
|
|
||||||
Total: 54.37 MiB ↘ Speed: 1.90 MiB p/s
|
|
||||||
Total: 54.37 MiB ↙ Speed: 2.37 MiB p/s
|
|
||||||
2022/12/05 21:23:05 Initing export pipe from "http://localhost:8481/select/1:12819/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:12819/prometheus/api/v1/import/native":
|
|
||||||
Total: 17.01 MiB ↙ Speed: 1.75 MiB p/s
|
|
||||||
Total: 17.01 MiB ↖ Speed: 2.15 MiB p/s
|
|
||||||
2022/12/05 21:23:13 Initing export pipe from "http://localhost:8481/select/1:281/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:281/prometheus/api/v1/import/native":
|
|
||||||
Total: 63.89 MiB ↘ Speed: 1.90 MiB p/s
|
|
||||||
Total: 63.89 MiB ↙ Speed: 2.29 MiB p/s
|
|
||||||
2022/12/05 21:23:42 Initing export pipe from "http://localhost:8481/select/812891298:1/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/812891298:1/prometheus/api/v1/import/native":
|
|
||||||
Total: 63.84 MiB ↖ Speed: 1.99 MiB p/s
|
|
||||||
Total: 63.84 MiB ↗ Speed: 2.26 MiB p/s
|
|
||||||
2022/12/05 21:24:10 Total time: 4m4.1466565s
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Verifying exported blocks from VictoriaMetrics
|
## Verifying exported blocks from VictoriaMetrics
|
||||||
|
@ -972,6 +948,7 @@ a sign of network issues or VM being overloaded. See the logs during import for
|
||||||
By default `vmctl` waits confirmation from user before starting the import. If this is unwanted
|
By default `vmctl` waits confirmation from user before starting the import. If this is unwanted
|
||||||
behavior and no user interaction required - pass `-s` flag to enable "silence" mode:
|
behavior and no user interaction required - pass `-s` flag to enable "silence" mode:
|
||||||
|
|
||||||
|
See below the example of `vm-native` migration process:
|
||||||
```
|
```
|
||||||
-s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false)
|
-s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false)
|
||||||
```
|
```
|
||||||
|
|
|
@ -47,7 +47,7 @@ func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return attempt, nil
|
return attempt, nil
|
||||||
}
|
}
|
||||||
if errors.Is(err, ErrBadRequest) {
|
if errors.Is(err, ErrBadRequest) || errors.Is(err, context.Canceled) {
|
||||||
logger.Errorf("unrecoverable error: %s", err)
|
logger.Errorf("unrecoverable error: %s", err)
|
||||||
return attempt, err // fail fast if not recoverable
|
return attempt, err // fail fast if not recoverable
|
||||||
}
|
}
|
||||||
|
|
|
@ -328,10 +328,12 @@ const (
|
||||||
vmNativeSrcAddr = "vm-native-src-addr"
|
vmNativeSrcAddr = "vm-native-src-addr"
|
||||||
vmNativeSrcUser = "vm-native-src-user"
|
vmNativeSrcUser = "vm-native-src-user"
|
||||||
vmNativeSrcPassword = "vm-native-src-password"
|
vmNativeSrcPassword = "vm-native-src-password"
|
||||||
|
vmNativeSrcHeaders = "vm-native-src-headers"
|
||||||
|
|
||||||
vmNativeDstAddr = "vm-native-dst-addr"
|
vmNativeDstAddr = "vm-native-dst-addr"
|
||||||
vmNativeDstUser = "vm-native-dst-user"
|
vmNativeDstUser = "vm-native-dst-user"
|
||||||
vmNativeDstPassword = "vm-native-dst-password"
|
vmNativeDstPassword = "vm-native-dst-password"
|
||||||
|
vmNativeDstHeaders = "vm-native-dst-headers"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -346,6 +348,7 @@ var (
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: vmNativeFilterTimeStart,
|
Name: vmNativeFilterTimeStart,
|
||||||
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
||||||
|
Required: true,
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: vmNativeFilterTimeEnd,
|
Name: vmNativeFilterTimeEnd,
|
||||||
|
@ -372,6 +375,12 @@ var (
|
||||||
Usage: "VictoriaMetrics password for basic auth",
|
Usage: "VictoriaMetrics password for basic auth",
|
||||||
EnvVars: []string{"VM_NATIVE_SRC_PASSWORD"},
|
EnvVars: []string{"VM_NATIVE_SRC_PASSWORD"},
|
||||||
},
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeSrcHeaders,
|
||||||
|
Usage: "Optional HTTP headers to send with each request to the corresponding source address. \n" +
|
||||||
|
"For example, --vm-native-src-headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding source address. \n" +
|
||||||
|
"Multiple headers must be delimited by '^^': --vm-native-src-headers='header1:value1^^header2:value2'",
|
||||||
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: vmNativeDstAddr,
|
Name: vmNativeDstAddr,
|
||||||
Usage: "VictoriaMetrics address to perform import to. \n" +
|
Usage: "VictoriaMetrics address to perform import to. \n" +
|
||||||
|
@ -389,6 +398,12 @@ var (
|
||||||
Usage: "VictoriaMetrics password for basic auth",
|
Usage: "VictoriaMetrics password for basic auth",
|
||||||
EnvVars: []string{"VM_NATIVE_DST_PASSWORD"},
|
EnvVars: []string{"VM_NATIVE_DST_PASSWORD"},
|
||||||
},
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeDstHeaders,
|
||||||
|
Usage: "Optional HTTP headers to send with each request to the corresponding destination address. \n" +
|
||||||
|
"For example, --vm-native-dst-headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding destination address. \n" +
|
||||||
|
"Multiple headers must be delimited by '^^': --vm-native-dst-headers='header1:value1^^header2:value2'",
|
||||||
|
},
|
||||||
&cli.StringSliceFlag{
|
&cli.StringSliceFlag{
|
||||||
Name: vmExtraLabel,
|
Name: vmExtraLabel,
|
||||||
Value: nil,
|
Value: nil,
|
||||||
|
@ -406,6 +421,11 @@ var (
|
||||||
fmt.Sprintf(" In this mode --%s flag format is: 'http://vmselect:8481/'. --%s flag format is: http://vminsert:8480/. \n", vmNativeSrcAddr, vmNativeDstAddr) +
|
fmt.Sprintf(" In this mode --%s flag format is: 'http://vmselect:8481/'. --%s flag format is: http://vminsert:8480/. \n", vmNativeSrcAddr, vmNativeDstAddr) +
|
||||||
" TenantID will be appended automatically after discovering tenants from src.",
|
" TenantID will be appended automatically after discovering tenants from src.",
|
||||||
},
|
},
|
||||||
|
&cli.UintFlag{
|
||||||
|
Name: vmConcurrency,
|
||||||
|
Usage: "Number of workers concurrently performing import requests to VM",
|
||||||
|
Value: 2,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -485,7 +505,7 @@ var (
|
||||||
},
|
},
|
||||||
&cli.DurationFlag{
|
&cli.DurationFlag{
|
||||||
Name: remoteReadHTTPTimeout,
|
Name: remoteReadHTTPTimeout,
|
||||||
Usage: "Timeout defines timeout for HTTP write request to remote storage",
|
Usage: "Timeout defines timeout for HTTP requests made by remote read client",
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: remoteReadHeaders,
|
Name: remoteReadHeaders,
|
||||||
|
|
|
@ -11,6 +11,8 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
|
@ -189,7 +191,7 @@ func main() {
|
||||||
{
|
{
|
||||||
Name: "vm-native",
|
Name: "vm-native",
|
||||||
Usage: "Migrate time series between VictoriaMetrics installations via native binary format",
|
Usage: "Migrate time series between VictoriaMetrics installations via native binary format",
|
||||||
Flags: vmNativeFlags,
|
Flags: mergeFlags(globalFlags, vmNativeFlags),
|
||||||
Action: func(c *cli.Context) error {
|
Action: func(c *cli.Context) error {
|
||||||
fmt.Println("VictoriaMetrics Native import mode")
|
fmt.Println("VictoriaMetrics Native import mode")
|
||||||
|
|
||||||
|
@ -200,25 +202,29 @@ func main() {
|
||||||
p := vmNativeProcessor{
|
p := vmNativeProcessor{
|
||||||
rateLimit: c.Int64(vmRateLimit),
|
rateLimit: c.Int64(vmRateLimit),
|
||||||
interCluster: c.Bool(vmInterCluster),
|
interCluster: c.Bool(vmInterCluster),
|
||||||
filter: filter{
|
filter: native.Filter{
|
||||||
match: c.String(vmNativeFilterMatch),
|
Match: c.String(vmNativeFilterMatch),
|
||||||
timeStart: c.String(vmNativeFilterTimeStart),
|
TimeStart: c.String(vmNativeFilterTimeStart),
|
||||||
timeEnd: c.String(vmNativeFilterTimeEnd),
|
TimeEnd: c.String(vmNativeFilterTimeEnd),
|
||||||
chunk: c.String(vmNativeStepInterval),
|
Chunk: c.String(vmNativeStepInterval),
|
||||||
},
|
},
|
||||||
src: &vmNativeClient{
|
src: &native.Client{
|
||||||
addr: strings.Trim(c.String(vmNativeSrcAddr), "/"),
|
Addr: strings.Trim(c.String(vmNativeSrcAddr), "/"),
|
||||||
user: c.String(vmNativeSrcUser),
|
User: c.String(vmNativeSrcUser),
|
||||||
password: c.String(vmNativeSrcPassword),
|
Password: c.String(vmNativeSrcPassword),
|
||||||
|
Headers: c.String(vmNativeSrcHeaders),
|
||||||
},
|
},
|
||||||
dst: &vmNativeClient{
|
dst: &native.Client{
|
||||||
addr: strings.Trim(c.String(vmNativeDstAddr), "/"),
|
Addr: strings.Trim(c.String(vmNativeDstAddr), "/"),
|
||||||
user: c.String(vmNativeDstUser),
|
User: c.String(vmNativeDstUser),
|
||||||
password: c.String(vmNativeDstPassword),
|
Password: c.String(vmNativeDstPassword),
|
||||||
extraLabels: c.StringSlice(vmExtraLabel),
|
ExtraLabels: c.StringSlice(vmExtraLabel),
|
||||||
|
Headers: c.String(vmNativeDstHeaders),
|
||||||
},
|
},
|
||||||
|
backoff: backoff.New(),
|
||||||
|
cc: c.Int(vmConcurrency),
|
||||||
}
|
}
|
||||||
return p.run(ctx)
|
return p.run(ctx, c.Bool(globalSilent))
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
237
app/vmctl/native/client.go
Normal file
237
app/vmctl/native/client.go
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
package native
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
nativeTenantsAddr = "admin/tenants"
|
||||||
|
nativeSeriesAddr = "api/v1/series"
|
||||||
|
nameLabel = "__name__"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client is an HTTP client for exporting and importing
|
||||||
|
// time series via native protocol.
|
||||||
|
type Client struct {
|
||||||
|
Addr string
|
||||||
|
User string
|
||||||
|
Password string
|
||||||
|
ExtraLabels []string
|
||||||
|
Headers string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LabelValues represents series from api/v1/series response
|
||||||
|
type LabelValues map[string]string
|
||||||
|
|
||||||
|
// Response represents response from api/v1/series
|
||||||
|
type Response struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Series []LabelValues `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explore finds series by provided filter from api/v1/series
|
||||||
|
func (c *Client) Explore(ctx context.Context, f Filter, tenantID string) (map[string]struct{}, error) {
|
||||||
|
url := fmt.Sprintf("%s/%s", c.Addr, nativeSeriesAddr)
|
||||||
|
if tenantID != "" {
|
||||||
|
url = fmt.Sprintf("%s/select/%s/prometheus/%s", c.Addr, tenantID, nativeSeriesAddr)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot create request to %q: %s", url, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
if f.TimeStart != "" {
|
||||||
|
params.Set("start", f.TimeStart)
|
||||||
|
}
|
||||||
|
if f.TimeEnd != "" {
|
||||||
|
params.Set("end", f.TimeEnd)
|
||||||
|
}
|
||||||
|
params.Set("match[]", f.Match)
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
resp, err := c.do(req, http.StatusOK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("series request failed: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response Response
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot decode series response: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot close series response body: %s", err)
|
||||||
|
}
|
||||||
|
names := make(map[string]struct{})
|
||||||
|
for _, series := range response.Series {
|
||||||
|
// TODO: consider tweaking /api/v1/series API to return metric names only
|
||||||
|
// this could make explore response much lighter.
|
||||||
|
for key, value := range series {
|
||||||
|
if key != nameLabel {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := names[value]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
names[value] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return names, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportPipe uses pipe reader in request to process data
|
||||||
|
func (c *Client) ImportPipe(ctx context.Context, dstURL string, pr *io.PipeReader) error {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, dstURL, pr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot create import request to %q: %s", c.Addr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedHeaders, err := parseHeaders(c.Headers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, header := range parsedHeaders {
|
||||||
|
req.Header.Set(header.key, header.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
importResp, err := c.do(req, http.StatusNoContent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("import request failed: %s", err)
|
||||||
|
}
|
||||||
|
if err := importResp.Body.Close(); err != nil {
|
||||||
|
return fmt.Errorf("cannot close import response body: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportPipe makes request by provided filter and return io.ReadCloser which can be used to get data
|
||||||
|
func (c *Client) ExportPipe(ctx context.Context, url string, f Filter) (io.ReadCloser, error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot create request to %q: %s", c.Addr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("match[]", f.Match)
|
||||||
|
if f.TimeStart != "" {
|
||||||
|
params.Set("start", f.TimeStart)
|
||||||
|
}
|
||||||
|
if f.TimeEnd != "" {
|
||||||
|
params.Set("end", f.TimeEnd)
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
// disable compression since it is meaningless for native format
|
||||||
|
req.Header.Set("Accept-Encoding", "identity")
|
||||||
|
|
||||||
|
parsedHeaders, err := parseHeaders(c.Headers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, header := range parsedHeaders {
|
||||||
|
req.Header.Set(header.key, header.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.do(req, http.StatusOK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("export request failed: %w", err)
|
||||||
|
}
|
||||||
|
return resp.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSourceTenants discovers tenants by provided filter
|
||||||
|
func (c *Client) GetSourceTenants(ctx context.Context, f Filter) ([]string, error) {
|
||||||
|
u := fmt.Sprintf("%s/%s", c.Addr, nativeTenantsAddr)
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot create request to %q: %s", u, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
if f.TimeStart != "" {
|
||||||
|
params.Set("start", f.TimeStart)
|
||||||
|
}
|
||||||
|
if f.TimeEnd != "" {
|
||||||
|
params.Set("end", f.TimeEnd)
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
parsedHeaders, err := parseHeaders(c.Headers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, header := range parsedHeaders {
|
||||||
|
req.Header.Set(header.key, header.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.do(req, http.StatusOK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("tenants request failed: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var r struct {
|
||||||
|
Tenants []string `json:"data"`
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot decode tenants response: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot close tenants response body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.Tenants, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) do(req *http.Request, expSC int) (*http.Response, error) {
|
||||||
|
if c.User != "" {
|
||||||
|
req.SetBasicAuth(c.User, c.Password)
|
||||||
|
}
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unexpected error when performing request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != expSC {
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type keyValue struct {
|
||||||
|
key string
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseHeaders(headers string) ([]keyValue, error) {
|
||||||
|
if len(headers) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var headersSplitByDelimiter = strings.Split(headers, "^^")
|
||||||
|
|
||||||
|
kvs := make([]keyValue, len(headersSplitByDelimiter))
|
||||||
|
for i, h := range headersSplitByDelimiter {
|
||||||
|
n := strings.IndexByte(h, ':')
|
||||||
|
if n < 0 {
|
||||||
|
return nil, fmt.Errorf(`missing ':' in header %q; expecting "key: value" format`, h)
|
||||||
|
}
|
||||||
|
kv := &kvs[i]
|
||||||
|
kv.key = strings.TrimSpace(h[:n])
|
||||||
|
kv.value = strings.TrimSpace(h[n+1:])
|
||||||
|
}
|
||||||
|
return kvs, nil
|
||||||
|
}
|
22
app/vmctl/native/filter.go
Normal file
22
app/vmctl/native/filter.go
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package native
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Filter represents request filter
|
||||||
|
type Filter struct {
|
||||||
|
Match string
|
||||||
|
TimeStart string
|
||||||
|
TimeEnd string
|
||||||
|
Chunk string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Filter) String() string {
|
||||||
|
s := fmt.Sprintf("\n\tfilter: match[]=%s", f.Match)
|
||||||
|
if f.TimeStart != "" {
|
||||||
|
s += fmt.Sprintf("\n\tstart: %s", f.TimeStart)
|
||||||
|
}
|
||||||
|
if f.TimeEnd != "" {
|
||||||
|
s += fmt.Sprintf("\n\tend: %s", f.TimeEnd)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
|
@ -21,7 +21,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultReadTimeout = 30 * time.Second
|
defaultReadTimeout = 5 * time.Minute
|
||||||
remoteReadPath = "/api/v1/read"
|
remoteReadPath = "/api/v1/read"
|
||||||
healthPath = "/-/healthy"
|
healthPath = "/-/healthy"
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,177 +2,125 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cheggaaa/pb/v3"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
|
"github.com/cheggaaa/pb/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type vmNativeProcessor struct {
|
type vmNativeProcessor struct {
|
||||||
filter filter
|
filter native.Filter
|
||||||
|
|
||||||
|
dst *native.Client
|
||||||
|
src *native.Client
|
||||||
|
backoff *backoff.Backoff
|
||||||
|
|
||||||
|
s *stats
|
||||||
rateLimit int64
|
rateLimit int64
|
||||||
|
|
||||||
dst *vmNativeClient
|
|
||||||
src *vmNativeClient
|
|
||||||
interCluster bool
|
interCluster bool
|
||||||
}
|
cc int
|
||||||
|
|
||||||
type vmNativeClient struct {
|
|
||||||
addr string
|
|
||||||
user string
|
|
||||||
password string
|
|
||||||
extraLabels []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type filter struct {
|
|
||||||
match string
|
|
||||||
timeStart string
|
|
||||||
timeEnd string
|
|
||||||
chunk string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f filter) String() string {
|
|
||||||
s := fmt.Sprintf("\n\tfilter: match[]=%s", f.match)
|
|
||||||
if f.timeStart != "" {
|
|
||||||
s += fmt.Sprintf("\n\tstart: %s", f.timeStart)
|
|
||||||
}
|
|
||||||
if f.timeEnd != "" {
|
|
||||||
s += fmt.Sprintf("\n\tend: %s", f.timeEnd)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
nativeExportAddr = "api/v1/export/native"
|
nativeExportAddr = "api/v1/export/native"
|
||||||
nativeImportAddr = "api/v1/import/native"
|
nativeImportAddr = "api/v1/import/native"
|
||||||
nativeTenantsAddr = "admin/tenants"
|
nativeBarTpl = `{{ blue "%s:" }} {{ counters . }} {{ bar . "[" "█" (cycle . "█") "▒" "]" }} {{ percent . }}`
|
||||||
|
|
||||||
nativeBarTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}`
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (p *vmNativeProcessor) run(ctx context.Context) error {
|
func (p *vmNativeProcessor) run(ctx context.Context, silent bool) error {
|
||||||
if p.filter.chunk == "" {
|
if p.cc == 0 {
|
||||||
return p.runWithFilter(ctx, p.filter)
|
p.cc = 1
|
||||||
|
}
|
||||||
|
p.s = &stats{
|
||||||
|
startTime: time.Now(),
|
||||||
}
|
}
|
||||||
|
|
||||||
startOfRange, err := time.Parse(time.RFC3339, p.filter.timeStart)
|
start, err := time.Parse(time.RFC3339, p.filter.TimeStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to parse %s, provided: %s, expected format: %s, error: %v", vmNativeFilterTimeStart, p.filter.timeStart, time.RFC3339, err)
|
return fmt.Errorf("failed to parse %s, provided: %s, expected format: %s, error: %w",
|
||||||
|
vmNativeFilterTimeStart, p.filter.TimeStart, time.RFC3339, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var endOfRange time.Time
|
end := time.Now().In(start.Location())
|
||||||
if p.filter.timeEnd != "" {
|
if p.filter.TimeEnd != "" {
|
||||||
endOfRange, err = time.Parse(time.RFC3339, p.filter.timeEnd)
|
end, err = time.Parse(time.RFC3339, p.filter.TimeEnd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to parse %s, provided: %s, expected format: %s, error: %v", vmNativeFilterTimeEnd, p.filter.timeEnd, time.RFC3339, err)
|
return fmt.Errorf("failed to parse %s, provided: %s, expected format: %s, error: %w",
|
||||||
|
vmNativeFilterTimeEnd, p.filter.TimeEnd, time.RFC3339, err)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
endOfRange = time.Now()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ranges, err := stepper.SplitDateRange(startOfRange, endOfRange, p.filter.chunk)
|
ranges := [][]time.Time{{start, end}}
|
||||||
|
if p.filter.Chunk != "" {
|
||||||
|
ranges, err = stepper.SplitDateRange(start, end, p.filter.Chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create date ranges for the given time filters: %v", err)
|
return fmt.Errorf("failed to create date ranges for the given time filters: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for rangeIdx, r := range ranges {
|
tenants := []string{""}
|
||||||
formattedStartTime := r[0].Format(time.RFC3339)
|
if p.interCluster {
|
||||||
formattedEndTime := r[1].Format(time.RFC3339)
|
log.Printf("Discovering tenants...")
|
||||||
log.Printf("Processing range %d/%d: %s - %s \n", rangeIdx+1, len(ranges), formattedStartTime, formattedEndTime)
|
tenants, err = p.src.GetSourceTenants(ctx, p.filter)
|
||||||
f := filter{
|
|
||||||
match: p.filter.match,
|
|
||||||
timeStart: formattedStartTime,
|
|
||||||
timeEnd: formattedEndTime,
|
|
||||||
}
|
|
||||||
err := p.runWithFilter(ctx, f)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("processing failed for range %d/%d: %s - %s \n", rangeIdx+1, len(ranges), formattedStartTime, formattedEndTime)
|
return fmt.Errorf("failed to get tenants: %w", err)
|
||||||
return err
|
}
|
||||||
|
question := fmt.Sprintf("The following tenants were discovered: %s.\n Continue?", tenants)
|
||||||
|
if !silent && !prompt(question) {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, tenantID := range tenants {
|
||||||
|
err := p.runBackfilling(ctx, tenantID, ranges, silent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("migration failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Import finished!")
|
||||||
|
log.Print(p.s)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *vmNativeProcessor) runWithFilter(ctx context.Context, f filter) error {
|
func (p *vmNativeProcessor) do(ctx context.Context, f native.Filter, srcURL, dstURL string) error {
|
||||||
nativeImportAddr, err := vm.AddExtraLabelsToImportPath(nativeImportAddr, p.dst.extraLabels)
|
|
||||||
|
|
||||||
|
retryableFunc := func() error { return p.runSingle(ctx, f, srcURL, dstURL) }
|
||||||
|
attempts, err := p.backoff.Retry(ctx, retryableFunc)
|
||||||
|
p.s.Lock()
|
||||||
|
p.s.retries += attempts
|
||||||
|
p.s.Unlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to add labels to import path: %s", err)
|
return fmt.Errorf("failed to migrate from %s to %s (retry attempts: %d): %w\nwith fileter %s", srcURL, dstURL, attempts, err, f)
|
||||||
}
|
|
||||||
|
|
||||||
if !p.interCluster {
|
|
||||||
srcURL := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr)
|
|
||||||
dstURL := fmt.Sprintf("%s/%s", p.dst.addr, nativeImportAddr)
|
|
||||||
|
|
||||||
return p.runSingle(ctx, f, srcURL, dstURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
tenants, err := p.getSourceTenants(ctx, f)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get source tenants: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Discovered tenants: %v", tenants)
|
|
||||||
for _, tenant := range tenants {
|
|
||||||
// src and dst expected formats: http://vminsert:8480/ and http://vmselect:8481/
|
|
||||||
srcURL := fmt.Sprintf("%s/select/%s/prometheus/%s", p.src.addr, tenant, nativeExportAddr)
|
|
||||||
dstURL := fmt.Sprintf("%s/insert/%s/prometheus/%s", p.dst.addr, tenant, nativeImportAddr)
|
|
||||||
|
|
||||||
if err := p.runSingle(ctx, f, srcURL, dstURL); err != nil {
|
|
||||||
return fmt.Errorf("failed to migrate data for tenant %q: %s", tenant, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *vmNativeProcessor) runSingle(ctx context.Context, f filter, srcURL, dstURL string) error {
|
func (p *vmNativeProcessor) runSingle(ctx context.Context, f native.Filter, srcURL, dstURL string) error {
|
||||||
log.Printf("Initing export pipe from %q with filters: %s\n", srcURL, f)
|
|
||||||
|
|
||||||
exportReader, err := p.exportPipe(ctx, srcURL, f)
|
exportReader, err := p.src.ExportPipe(ctx, srcURL, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to init export pipe: %s", err)
|
return fmt.Errorf("failed to init export pipe: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
sync := make(chan struct{})
|
done := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
defer func() { close(sync) }()
|
defer func() { close(done) }()
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, dstURL, pr)
|
if err := p.dst.ImportPipe(ctx, dstURL, pr); err != nil {
|
||||||
if err != nil {
|
logger.Errorf("error initialize import pipe: %s", err)
|
||||||
log.Fatalf("cannot create import request to %q: %s", p.dst.addr, err)
|
return
|
||||||
}
|
|
||||||
importResp, err := p.dst.do(req, http.StatusNoContent)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("import request failed: %s", err)
|
|
||||||
}
|
|
||||||
if err := importResp.Body.Close(); err != nil {
|
|
||||||
log.Fatalf("cannot close import response body: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
fmt.Printf("Initing import process to %q:\n", dstURL)
|
|
||||||
pool := pb.NewPool()
|
|
||||||
bar := pb.ProgressBarTemplate(nativeBarTpl).New(0)
|
|
||||||
pool.Add(bar)
|
|
||||||
barReader := bar.NewProxyReader(exportReader)
|
|
||||||
if err := pool.Start(); err != nil {
|
|
||||||
log.Printf("error start process bars pool: %s", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
bar.Finish()
|
|
||||||
if err := pool.Stop(); err != nil {
|
|
||||||
fmt.Printf("failed to stop barpool: %+v\n", err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -182,95 +130,176 @@ func (p *vmNativeProcessor) runSingle(ctx context.Context, f filter, srcURL, dst
|
||||||
w = limiter.NewWriteLimiter(pw, rl)
|
w = limiter.NewWriteLimiter(pw, rl)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = io.Copy(w, barReader)
|
written, err := io.Copy(w, exportReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to write into %q: %s", p.dst.addr, err)
|
return fmt.Errorf("failed to write into %q: %s", p.dst.Addr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.s.Lock()
|
||||||
|
p.s.bytes += uint64(written)
|
||||||
|
p.s.requests++
|
||||||
|
p.s.Unlock()
|
||||||
|
|
||||||
if err := pw.Close(); err != nil {
|
if err := pw.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
<-sync
|
<-done
|
||||||
|
|
||||||
log.Println("Import finished!")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *vmNativeProcessor) getSourceTenants(ctx context.Context, f filter) ([]string, error) {
|
func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string, ranges [][]time.Time, silent bool) error {
|
||||||
u := fmt.Sprintf("%s/%s", p.src.addr, nativeTenantsAddr)
|
exportAddr := nativeExportAddr
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
|
srcURL := fmt.Sprintf("%s/%s", p.src.Addr, exportAddr)
|
||||||
|
|
||||||
|
importAddr, err := vm.AddExtraLabelsToImportPath(nativeImportAddr, p.dst.ExtraLabels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create request to %q: %s", u, err)
|
return fmt.Errorf("failed to add labels to import path: %s", err)
|
||||||
|
}
|
||||||
|
dstURL := fmt.Sprintf("%s/%s", p.dst.Addr, importAddr)
|
||||||
|
|
||||||
|
if p.interCluster {
|
||||||
|
srcURL = fmt.Sprintf("%s/select/%s/prometheus/%s", p.src.Addr, tenantID, exportAddr)
|
||||||
|
dstURL = fmt.Sprintf("%s/insert/%s/prometheus/%s", p.dst.Addr, tenantID, importAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
params := req.URL.Query()
|
barPrefix := "Requests to make"
|
||||||
if f.timeStart != "" {
|
initMessage := "Initing import process from %q to %q with filter %s"
|
||||||
params.Set("start", f.timeStart)
|
initParams := []interface{}{srcURL, dstURL, p.filter.String()}
|
||||||
|
if p.interCluster {
|
||||||
|
barPrefix = fmt.Sprintf("Requests to make for tenant %s", tenantID)
|
||||||
|
initMessage = "Initing import process from %q to %q with filter %s for tenant %s"
|
||||||
|
initParams = []interface{}{srcURL, dstURL, p.filter.String(), tenantID}
|
||||||
}
|
}
|
||||||
if f.timeEnd != "" {
|
|
||||||
params.Set("end", f.timeEnd)
|
|
||||||
}
|
|
||||||
req.URL.RawQuery = params.Encode()
|
|
||||||
|
|
||||||
resp, err := p.src.do(req, http.StatusOK)
|
fmt.Println("") // extra line for better output formatting
|
||||||
|
log.Printf(initMessage, initParams...)
|
||||||
|
|
||||||
|
log.Printf("Exploring metrics...")
|
||||||
|
metrics, err := p.src.Explore(ctx, p.filter, tenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("tenants request failed: %s", err)
|
return fmt.Errorf("cannot get metrics from source %s: %w", p.src.Addr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var r struct {
|
if len(metrics) == 0 {
|
||||||
Tenants []string `json:"data"`
|
return fmt.Errorf("no metrics found")
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot decode tenants response: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
foundSeriesMsg := fmt.Sprintf("Found %d metrics to import", len(metrics))
|
||||||
return nil, fmt.Errorf("cannot close tenants response body: %s", err)
|
if !p.interCluster {
|
||||||
|
// do not prompt for intercluster because there could be many tenants,
|
||||||
|
// and we don't want to interrupt the process when moving to the next tenant.
|
||||||
|
question := foundSeriesMsg + ". Continue?"
|
||||||
|
if !silent && !prompt(question) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Print(foundSeriesMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.Tenants, nil
|
processingMsg := fmt.Sprintf("Requests to make: %d", len(metrics)*len(ranges))
|
||||||
|
if len(ranges) > 1 {
|
||||||
|
processingMsg = fmt.Sprintf("Selected time range will be split into %d ranges according to %q step. %s", len(ranges), p.filter.Chunk, processingMsg)
|
||||||
|
}
|
||||||
|
log.Print(processingMsg)
|
||||||
|
|
||||||
|
var bar *pb.ProgressBar
|
||||||
|
if !silent {
|
||||||
|
bar = pb.ProgressBarTemplate(fmt.Sprintf(nativeBarTpl, barPrefix)).New(len(metrics) * len(ranges))
|
||||||
|
bar.Start()
|
||||||
|
defer bar.Finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *vmNativeProcessor) exportPipe(ctx context.Context, url string, f filter) (io.ReadCloser, error) {
|
filterCh := make(chan native.Filter)
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
errCh := make(chan error, p.cc)
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot create request to %q: %s", p.src.addr, err)
|
var wg sync.WaitGroup
|
||||||
|
for i := 0; i < p.cc; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for f := range filterCh {
|
||||||
|
if err := p.do(ctx, f, srcURL, dstURL); err != nil {
|
||||||
|
errCh <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if bar != nil {
|
||||||
|
bar.Increment()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
params := req.URL.Query()
|
// any error breaks the import
|
||||||
params.Set("match[]", f.match)
|
for s := range metrics {
|
||||||
if f.timeStart != "" {
|
for _, times := range ranges {
|
||||||
params.Set("start", f.timeStart)
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return fmt.Errorf("context canceled")
|
||||||
|
case infErr := <-errCh:
|
||||||
|
return fmt.Errorf("native error: %s", infErr)
|
||||||
|
case filterCh <- native.Filter{
|
||||||
|
Match: fmt.Sprintf("{%s=%q}", nameLabel, s),
|
||||||
|
TimeStart: times[0].Format(time.RFC3339),
|
||||||
|
TimeEnd: times[1].Format(time.RFC3339),
|
||||||
|
}:
|
||||||
}
|
}
|
||||||
if f.timeEnd != "" {
|
|
||||||
params.Set("end", f.timeEnd)
|
|
||||||
}
|
}
|
||||||
req.URL.RawQuery = params.Encode()
|
|
||||||
|
|
||||||
// disable compression since it is meaningless for native format
|
|
||||||
req.Header.Set("Accept-Encoding", "identity")
|
|
||||||
resp, err := p.src.do(req, http.StatusOK)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("export request failed: %s", err)
|
|
||||||
}
|
|
||||||
return resp.Body, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *vmNativeClient) do(req *http.Request, expSC int) (*http.Response, error) {
|
close(filterCh)
|
||||||
if c.user != "" {
|
wg.Wait()
|
||||||
req.SetBasicAuth(c.user, c.password)
|
close(errCh)
|
||||||
}
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
for err := range errCh {
|
||||||
if err != nil {
|
return fmt.Errorf("import process failed: %s", err)
|
||||||
return nil, fmt.Errorf("unexpected error when performing request: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != expSC {
|
return nil
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err)
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body))
|
|
||||||
|
// stats represents client statistic
|
||||||
|
// when processing data
|
||||||
|
type stats struct {
|
||||||
|
sync.Mutex
|
||||||
|
startTime time.Time
|
||||||
|
bytes uint64
|
||||||
|
requests uint64
|
||||||
|
retries uint64
|
||||||
}
|
}
|
||||||
return resp, err
|
|
||||||
|
func (s *stats) String() string {
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
|
||||||
|
totalImportDuration := time.Since(s.startTime)
|
||||||
|
totalImportDurationS := totalImportDuration.Seconds()
|
||||||
|
bytesPerS := byteCountSI(0)
|
||||||
|
if s.bytes > 0 && totalImportDurationS > 0 {
|
||||||
|
bytesPerS = byteCountSI(int64(float64(s.bytes) / totalImportDurationS))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("VictoriaMetrics importer stats:\n"+
|
||||||
|
" time spent while importing: %v;\n"+
|
||||||
|
" total bytes: %s;\n"+
|
||||||
|
" bytes/s: %s;\n"+
|
||||||
|
" requests: %d;\n"+
|
||||||
|
" requests retries: %d;",
|
||||||
|
totalImportDuration,
|
||||||
|
byteCountSI(int64(s.bytes)), bytesPerS,
|
||||||
|
s.requests, s.retries)
|
||||||
|
}
|
||||||
|
|
||||||
|
func byteCountSI(b int64) string {
|
||||||
|
const unit = 1000
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := int64(unit), 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.1f %cB",
|
||||||
|
float64(b)/float64(div), "kMGTPE"[exp])
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,10 +28,10 @@ const (
|
||||||
func Test_vmNativeProcessor_run(t *testing.T) {
|
func Test_vmNativeProcessor_run(t *testing.T) {
|
||||||
t.Skip()
|
t.Skip()
|
||||||
type fields struct {
|
type fields struct {
|
||||||
filter filter
|
filter native.Filter
|
||||||
rateLimit int64
|
rateLimit int64
|
||||||
dst *vmNativeClient
|
dst *native.Client
|
||||||
src *vmNativeClient
|
src *native.Client
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -41,16 +42,16 @@ func Test_vmNativeProcessor_run(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "simulate syscall.SIGINT",
|
name: "simulate syscall.SIGINT",
|
||||||
fields: fields{
|
fields: fields{
|
||||||
filter: filter{
|
filter: native.Filter{
|
||||||
match: matchFilter,
|
Match: matchFilter,
|
||||||
timeStart: timeStartFilter,
|
TimeStart: timeStartFilter,
|
||||||
},
|
},
|
||||||
rateLimit: 0,
|
rateLimit: 0,
|
||||||
dst: &vmNativeClient{
|
dst: &native.Client{
|
||||||
addr: dstAddr,
|
Addr: dstAddr,
|
||||||
},
|
},
|
||||||
src: &vmNativeClient{
|
src: &native.Client{
|
||||||
addr: srcAddr,
|
Addr: srcAddr,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
closer: func(cancelFunc context.CancelFunc) {
|
closer: func(cancelFunc context.CancelFunc) {
|
||||||
|
@ -62,16 +63,16 @@ func Test_vmNativeProcessor_run(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "simulate correct work",
|
name: "simulate correct work",
|
||||||
fields: fields{
|
fields: fields{
|
||||||
filter: filter{
|
filter: native.Filter{
|
||||||
match: matchFilter,
|
Match: matchFilter,
|
||||||
timeStart: timeStartFilter,
|
TimeStart: timeStartFilter,
|
||||||
},
|
},
|
||||||
rateLimit: 0,
|
rateLimit: 0,
|
||||||
dst: &vmNativeClient{
|
dst: &native.Client{
|
||||||
addr: dstAddr,
|
Addr: dstAddr,
|
||||||
},
|
},
|
||||||
src: &vmNativeClient{
|
src: &native.Client{
|
||||||
addr: srcAddr,
|
Addr: srcAddr,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
closer: func(cancelFunc context.CancelFunc) {},
|
closer: func(cancelFunc context.CancelFunc) {},
|
||||||
|
@ -80,18 +81,18 @@ func Test_vmNativeProcessor_run(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "simulate correct work with chunking",
|
name: "simulate correct work with chunking",
|
||||||
fields: fields{
|
fields: fields{
|
||||||
filter: filter{
|
filter: native.Filter{
|
||||||
match: matchFilter,
|
Match: matchFilter,
|
||||||
timeStart: timeStartFilter,
|
TimeStart: timeStartFilter,
|
||||||
timeEnd: timeEndFilter,
|
TimeEnd: timeEndFilter,
|
||||||
chunk: stepper.StepMonth,
|
Chunk: stepper.StepMonth,
|
||||||
},
|
},
|
||||||
rateLimit: 0,
|
rateLimit: 0,
|
||||||
dst: &vmNativeClient{
|
dst: &native.Client{
|
||||||
addr: dstAddr,
|
Addr: dstAddr,
|
||||||
},
|
},
|
||||||
src: &vmNativeClient{
|
src: &native.Client{
|
||||||
addr: srcAddr,
|
Addr: srcAddr,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
closer: func(cancelFunc context.CancelFunc) {},
|
closer: func(cancelFunc context.CancelFunc) {},
|
||||||
|
@ -110,7 +111,7 @@ func Test_vmNativeProcessor_run(t *testing.T) {
|
||||||
|
|
||||||
tt.closer(cancelFn)
|
tt.closer(cancelFn)
|
||||||
|
|
||||||
if err := p.run(ctx); (err != nil) != tt.wantErr {
|
if err := p.run(ctx, true); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("run() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("run() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"files": {
|
"files": {
|
||||||
"main.css": "./static/css/main.5c28f4a7.css",
|
"main.css": "./static/css/main.5c28f4a7.css",
|
||||||
"main.js": "./static/js/main.0be86920.js",
|
"main.js": "./static/js/main.6eed9ce1.js",
|
||||||
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
|
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
|
||||||
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
|
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
|
||||||
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
|
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
|
||||||
|
@ -9,6 +9,6 @@
|
||||||
},
|
},
|
||||||
"entrypoints": [
|
"entrypoints": [
|
||||||
"static/css/main.5c28f4a7.css",
|
"static/css/main.5c28f4a7.css",
|
||||||
"static/js/main.0be86920.js"
|
"static/js/main.6eed9ce1.js"
|
||||||
]
|
]
|
||||||
}
|
}
|
|
@ -1 +1 @@
|
||||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.0be86920.js"></script><link href="./static/css/main.5c28f4a7.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.6eed9ce1.js"></script><link href="./static/css/main.5c28f4a7.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
|
@ -1,6 +1,7 @@
|
||||||
import Header from "./Header/Header";
|
import Header from "./Header/Header";
|
||||||
import React, { FC, useEffect } from "preact/compat";
|
import React, { FC, useEffect } from "preact/compat";
|
||||||
import { Outlet, useLocation } from "react-router-dom";
|
import { Outlet, useLocation, useSearchParams } from "react-router-dom";
|
||||||
|
import qs from "qs";
|
||||||
import "./style.scss";
|
import "./style.scss";
|
||||||
import { getAppModeEnable } from "../../utils/app-mode";
|
import { getAppModeEnable } from "../../utils/app-mode";
|
||||||
import classNames from "classnames";
|
import classNames from "classnames";
|
||||||
|
@ -12,14 +13,33 @@ import useDeviceDetect from "../../hooks/useDeviceDetect";
|
||||||
const Layout: FC = () => {
|
const Layout: FC = () => {
|
||||||
const appModeEnable = getAppModeEnable();
|
const appModeEnable = getAppModeEnable();
|
||||||
const { isMobile } = useDeviceDetect();
|
const { isMobile } = useDeviceDetect();
|
||||||
|
const { pathname } = useLocation();
|
||||||
|
const [searchParams, setSearchParams] = useSearchParams();
|
||||||
|
|
||||||
useFetchDashboards();
|
useFetchDashboards();
|
||||||
|
|
||||||
const { pathname } = useLocation();
|
const setDocumentTitle = () => {
|
||||||
useEffect(() => {
|
|
||||||
const defaultTitle = "vmui";
|
const defaultTitle = "vmui";
|
||||||
const routeTitle = routerOptions[pathname]?.title;
|
const routeTitle = routerOptions[pathname]?.title;
|
||||||
document.title = routeTitle ? `${routeTitle} - ${defaultTitle}` : defaultTitle;
|
document.title = routeTitle ? `${routeTitle} - ${defaultTitle}` : defaultTitle;
|
||||||
}, [pathname]);
|
};
|
||||||
|
|
||||||
|
// for support old links with search params
|
||||||
|
const redirectSearchToHashParams = () => {
|
||||||
|
const { search } = window.location;
|
||||||
|
if (search) {
|
||||||
|
const query = qs.parse(search, { ignoreQueryPrefix: true });
|
||||||
|
Object.entries(query).forEach(([key, value]) => {
|
||||||
|
searchParams.set(key, value as string);
|
||||||
|
setSearchParams(searchParams);
|
||||||
|
});
|
||||||
|
window.location.search = "";
|
||||||
|
}
|
||||||
|
window.location.replace(window.location.href.replace(/\/\?#\//, "/#/"));
|
||||||
|
};
|
||||||
|
|
||||||
|
useEffect(setDocumentTitle, [pathname]);
|
||||||
|
useEffect(redirectSearchToHashParams, []);
|
||||||
|
|
||||||
return <section className="vm-container">
|
return <section className="vm-container">
|
||||||
<Header/>
|
<Header/>
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
import React, { FC } from "preact/compat";
|
||||||
|
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
|
||||||
|
import { CloseIcon } from "../../Icons";
|
||||||
|
import { MouseEvent } from "react";
|
||||||
|
|
||||||
|
interface MultipleSelectedValueProps {
|
||||||
|
values: string[]
|
||||||
|
onRemoveItem: (val: string) => void
|
||||||
|
}
|
||||||
|
|
||||||
|
const MultipleSelectedValue: FC<MultipleSelectedValueProps> = ({ values, onRemoveItem }) => {
|
||||||
|
const { isMobile } = useDeviceDetect();
|
||||||
|
|
||||||
|
const createHandleClick = (value: string) => (e: MouseEvent) => {
|
||||||
|
onRemoveItem(value);
|
||||||
|
e.stopPropagation();
|
||||||
|
};
|
||||||
|
|
||||||
|
if (isMobile) {
|
||||||
|
return (
|
||||||
|
<span className="vm-select-input-content__counter">
|
||||||
|
selected {values.length}
|
||||||
|
</span>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return <>
|
||||||
|
{values.map(item => (
|
||||||
|
<div
|
||||||
|
className="vm-select-input-content__selected"
|
||||||
|
key={item}
|
||||||
|
>
|
||||||
|
<span>{item}</span>
|
||||||
|
<div onClick={createHandleClick(item)}>
|
||||||
|
<CloseIcon/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default MultipleSelectedValue;
|
|
@ -6,6 +6,7 @@ import Autocomplete from "../Autocomplete/Autocomplete";
|
||||||
import { useAppState } from "../../../state/common/StateContext";
|
import { useAppState } from "../../../state/common/StateContext";
|
||||||
import "./style.scss";
|
import "./style.scss";
|
||||||
import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
||||||
|
import MultipleSelectedValue from "./MultipleSelectedValue/MultipleSelectedValue";
|
||||||
|
|
||||||
interface SelectProps {
|
interface SelectProps {
|
||||||
value: string | string[]
|
value: string | string[]
|
||||||
|
@ -39,8 +40,9 @@ const Select: FC<SelectProps> = ({
|
||||||
|
|
||||||
const inputRef = useRef<HTMLInputElement>(null);
|
const inputRef = useRef<HTMLInputElement>(null);
|
||||||
|
|
||||||
const isMultiple = useMemo(() => Array.isArray(value), [value]);
|
const isMultiple = Array.isArray(value);
|
||||||
const selectedValues = useMemo(() => Array.isArray(value) ? value : undefined, [isMultiple, value]);
|
const selectedValues = Array.isArray(value) ? value : undefined;
|
||||||
|
const hideInput = isMobile && isMultiple && !!selectedValues?.length;
|
||||||
|
|
||||||
const textFieldValue = useMemo(() => {
|
const textFieldValue = useMemo(() => {
|
||||||
if (openList) return search;
|
if (openList) return search;
|
||||||
|
@ -124,23 +126,13 @@ const Select: FC<SelectProps> = ({
|
||||||
ref={autocompleteAnchorEl}
|
ref={autocompleteAnchorEl}
|
||||||
>
|
>
|
||||||
<div className="vm-select-input-content">
|
<div className="vm-select-input-content">
|
||||||
{!isMobile && selectedValues && selectedValues.map(item => (
|
{!!selectedValues?.length && (
|
||||||
<div
|
<MultipleSelectedValue
|
||||||
className="vm-select-input-content__selected"
|
values={selectedValues}
|
||||||
key={item}
|
onRemoveItem={handleSelected}
|
||||||
>
|
/>
|
||||||
<span>{item}</span>
|
|
||||||
<div onClick={createHandleClick(item)}>
|
|
||||||
<CloseIcon/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
))}
|
|
||||||
{isMobile && !!selectedValues?.length && (
|
|
||||||
<span className="vm-select-input-content__counter">
|
|
||||||
selected {selectedValues.length}
|
|
||||||
</span>
|
|
||||||
)}
|
)}
|
||||||
{!isMobile || (isMobile && (!selectedValues || !selectedValues?.length)) && (
|
{!hideInput && (
|
||||||
<input
|
<input
|
||||||
value={textFieldValue}
|
value={textFieldValue}
|
||||||
type="text"
|
type="text"
|
||||||
|
|
|
@ -19,9 +19,15 @@ export interface QueryConfiguratorProps {
|
||||||
error?: ErrorTypes | string;
|
error?: ErrorTypes | string;
|
||||||
queryOptions: string[]
|
queryOptions: string[]
|
||||||
onHideQuery: (queries: number[]) => void
|
onHideQuery: (queries: number[]) => void
|
||||||
|
onRunQuery: () => void
|
||||||
}
|
}
|
||||||
|
|
||||||
const QueryConfigurator: FC<QueryConfiguratorProps> = ({ error, queryOptions, onHideQuery }) => {
|
const QueryConfigurator: FC<QueryConfiguratorProps> = ({
|
||||||
|
error,
|
||||||
|
queryOptions,
|
||||||
|
onHideQuery,
|
||||||
|
onRunQuery
|
||||||
|
}) => {
|
||||||
const { isMobile } = useDeviceDetect();
|
const { isMobile } = useDeviceDetect();
|
||||||
|
|
||||||
const { query, queryHistory, autocomplete } = useQueryState();
|
const { query, queryHistory, autocomplete } = useQueryState();
|
||||||
|
@ -45,21 +51,22 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({ error, queryOptions, on
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
const onRunQuery = () => {
|
const handleRunQuery = () => {
|
||||||
updateHistory();
|
updateHistory();
|
||||||
queryDispatch({ type: "SET_QUERY", payload: stateQuery });
|
queryDispatch({ type: "SET_QUERY", payload: stateQuery });
|
||||||
timeDispatch({ type: "RUN_QUERY" });
|
timeDispatch({ type: "RUN_QUERY" });
|
||||||
|
onRunQuery();
|
||||||
};
|
};
|
||||||
|
|
||||||
const onAddQuery = () => {
|
const handleAddQuery = () => {
|
||||||
setStateQuery(prev => [...prev, ""]);
|
setStateQuery(prev => [...prev, ""]);
|
||||||
};
|
};
|
||||||
|
|
||||||
const onRemoveQuery = (index: number) => {
|
const handleRemoveQuery = (index: number) => {
|
||||||
setStateQuery(prev => prev.filter((q, i) => i !== index));
|
setStateQuery(prev => prev.filter((q, i) => i !== index));
|
||||||
};
|
};
|
||||||
|
|
||||||
const onToggleHideQuery = (e: ReactMouseEvent<HTMLButtonElement, MouseEvent>, index: number) => {
|
const handleToggleHideQuery = (e: ReactMouseEvent<HTMLButtonElement, MouseEvent>, index: number) => {
|
||||||
const { ctrlKey, metaKey } = e;
|
const { ctrlKey, metaKey } = e;
|
||||||
const ctrlMetaKey = ctrlKey || metaKey;
|
const ctrlMetaKey = ctrlKey || metaKey;
|
||||||
|
|
||||||
|
@ -95,17 +102,17 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({ error, queryOptions, on
|
||||||
};
|
};
|
||||||
|
|
||||||
const createHandlerRemoveQuery = (i: number) => () => {
|
const createHandlerRemoveQuery = (i: number) => () => {
|
||||||
onRemoveQuery(i);
|
handleRemoveQuery(i);
|
||||||
setHideQuery(prev => prev.includes(i) ? prev.filter(n => n !== i) : prev.map(n => n > i ? n - 1: n));
|
setHideQuery(prev => prev.includes(i) ? prev.filter(n => n !== i) : prev.map(n => n > i ? n - 1: n));
|
||||||
};
|
};
|
||||||
|
|
||||||
const createHandlerHideQuery = (i: number) => (e: ReactMouseEvent<HTMLButtonElement, MouseEvent>) => {
|
const createHandlerHideQuery = (i: number) => (e: ReactMouseEvent<HTMLButtonElement, MouseEvent>) => {
|
||||||
onToggleHideQuery(e, i);
|
handleToggleHideQuery(e, i);
|
||||||
};
|
};
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (prevStateQuery && (stateQuery.length < prevStateQuery.length)) {
|
if (prevStateQuery && (stateQuery.length < prevStateQuery.length)) {
|
||||||
onRunQuery();
|
handleRunQuery();
|
||||||
}
|
}
|
||||||
}, [stateQuery]);
|
}, [stateQuery]);
|
||||||
|
|
||||||
|
@ -137,7 +144,7 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({ error, queryOptions, on
|
||||||
error={error}
|
error={error}
|
||||||
onArrowUp={createHandlerArrow(-1, i)}
|
onArrowUp={createHandlerArrow(-1, i)}
|
||||||
onArrowDown={createHandlerArrow(1, i)}
|
onArrowDown={createHandlerArrow(1, i)}
|
||||||
onEnter={onRunQuery}
|
onEnter={handleRunQuery}
|
||||||
onChange={createHandlerChangeQuery(i)}
|
onChange={createHandlerChangeQuery(i)}
|
||||||
label={`Query ${i + 1}`}
|
label={`Query ${i + 1}`}
|
||||||
disabled={hideQuery.includes(i)}
|
disabled={hideQuery.includes(i)}
|
||||||
|
@ -173,7 +180,7 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({ error, queryOptions, on
|
||||||
{stateQuery.length < MAX_QUERY_FIELDS && (
|
{stateQuery.length < MAX_QUERY_FIELDS && (
|
||||||
<Button
|
<Button
|
||||||
variant="outlined"
|
variant="outlined"
|
||||||
onClick={onAddQuery}
|
onClick={handleAddQuery}
|
||||||
startIcon={<PlusIcon/>}
|
startIcon={<PlusIcon/>}
|
||||||
>
|
>
|
||||||
Add Query
|
Add Query
|
||||||
|
@ -181,7 +188,7 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({ error, queryOptions, on
|
||||||
)}
|
)}
|
||||||
<Button
|
<Button
|
||||||
variant="contained"
|
variant="contained"
|
||||||
onClick={onRunQuery}
|
onClick={handleRunQuery}
|
||||||
startIcon={<PlayIcon/>}
|
startIcon={<PlayIcon/>}
|
||||||
>
|
>
|
||||||
{isMobile ? "Execute" : "Execute Query"}
|
{isMobile ? "Execute" : "Execute Query"}
|
||||||
|
|
|
@ -35,6 +35,7 @@ const CustomPanel: FC = () => {
|
||||||
const [tracesState, setTracesState] = useState<Trace[]>([]);
|
const [tracesState, setTracesState] = useState<Trace[]>([]);
|
||||||
const [hideQuery, setHideQuery] = useState<number[]>([]);
|
const [hideQuery, setHideQuery] = useState<number[]>([]);
|
||||||
const [showAllSeries, setShowAllSeries] = useState(false);
|
const [showAllSeries, setShowAllSeries] = useState(false);
|
||||||
|
const [hideError, setHideError] = useState(!query[0]);
|
||||||
|
|
||||||
const { customStep, yaxis } = useGraphState();
|
const { customStep, yaxis } = useGraphState();
|
||||||
const graphDispatch = useGraphDispatch();
|
const graphDispatch = useGraphDispatch();
|
||||||
|
@ -72,6 +73,10 @@ const CustomPanel: FC = () => {
|
||||||
setHideQuery(queries);
|
setHideQuery(queries);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const handleRunQuery = () => {
|
||||||
|
setHideError(false);
|
||||||
|
};
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (traces) {
|
if (traces) {
|
||||||
setTracesState([...tracesState, ...traces]);
|
setTracesState([...tracesState, ...traces]);
|
||||||
|
@ -94,9 +99,10 @@ const CustomPanel: FC = () => {
|
||||||
})}
|
})}
|
||||||
>
|
>
|
||||||
<QueryConfigurator
|
<QueryConfigurator
|
||||||
error={error}
|
error={!hideError ? error : ""}
|
||||||
queryOptions={queryOptions}
|
queryOptions={queryOptions}
|
||||||
onHideQuery={handleHideQuery}
|
onHideQuery={handleHideQuery}
|
||||||
|
onRunQuery={handleRunQuery}
|
||||||
/>
|
/>
|
||||||
{isTracingEnabled && (
|
{isTracingEnabled && (
|
||||||
<div className="vm-custom-panel__trace">
|
<div className="vm-custom-panel__trace">
|
||||||
|
@ -107,7 +113,7 @@ const CustomPanel: FC = () => {
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
{isLoading && <Spinner />}
|
{isLoading && <Spinner />}
|
||||||
{error && <Alert variant="error">{error}</Alert>}
|
{!hideError && error && <Alert variant="error">{error}</Alert>}
|
||||||
{warning && <Alert variant="warning">
|
{warning && <Alert variant="warning">
|
||||||
<div
|
<div
|
||||||
className={classNames({
|
className={classNames({
|
||||||
|
|
|
@ -612,7 +612,7 @@
|
||||||
"uid": "$ds"
|
"uid": "$ds"
|
||||||
},
|
},
|
||||||
"exemplar": true,
|
"exemplar": true,
|
||||||
"expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", type!~\"indexdb.*\"}) / sum(vm_rows{job=~\"$job_storage\", type!~\"indexdb.*\"})",
|
"expr": "sum(vm_data_size_bytes{job=~\"$job_storage\"}) / sum(vm_rows{job=~\"$job_storage\"})",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"interval": "",
|
"interval": "",
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# File contains default list of alerts for мьфдуке service.
|
# File contains default list of alerts for vmalert service.
|
||||||
# The alerts below are just recommendations and may require some updates
|
# The alerts below are just recommendations and may require some updates
|
||||||
# and threshold calibration according to every specific setup.
|
# and threshold calibration according to every specific setup.
|
||||||
groups:
|
groups:
|
||||||
|
|
|
@ -15,6 +15,16 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): increase the default value for `--remote-read-http-timeout` command-line option from 30s (30 seconds) to 5m (5 minutes). This reduces the probability of timeout errors when migrating big number of time series. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3879).
|
||||||
|
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): migrate series one-by-one in [vm-native mode](https://docs.victoriametrics.com/vmctl.html#native-protocol). This allows better tracking the migration progress and resuming the migration process from the last migrated time series. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3859) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3600).
|
||||||
|
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add `--vm-native-src-headers` and `--vm-native-dst-headers` command-line flags, which can be used for setting custom HTTP headers during [vm-native migration mode](https://docs.victoriametrics.com/vmctl.html#native-protocol). Thanks to @baconmania for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3906).
|
||||||
|
|
||||||
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): fix panic when [writing data to Kafka](https://docs.victoriametrics.com/vmagent.html#writing-metrics-to-kafka). The panic has been introduced in [v1.88.0](https://docs.victoriametrics.com/CHANGELOG.html#v1880).
|
||||||
|
* BUGFIX: prevent from possible `invalid memory address or nil pointer dereference` panic during [background merge](https://docs.victoriametrics.com/#storage). The issue has been introduced at [v1.85.0](https://docs.victoriametrics.com/CHANGELOG.html#v1850). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3897).
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): stop showing `Please enter a valid Query and execute it` error message on the first load of vmui.
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly process `Run in VMUI` button click in [VictoriaMetrics datasource plugin for Grafana](https://github.com/VictoriaMetrics/grafana-datasource).
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the display of the selected value for dropdowns on `Explore` page.
|
||||||
|
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): fix `cannot serve http` panic when plain HTTP request is sent to `vmauth` configured to accept requests over [proxy protocol](https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt)-encoded request (e.g. when `vmauth` runs with `-httpListenAddr.useProxyProtocol` command-line flag). The issue has been introduced at [v1.87.0](https://docs.victoriametrics.com/CHANGELOG.html#v1870) when implementing [this feature](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3335).
|
||||||
|
|
||||||
## [v1.88.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.88.1)
|
## [v1.88.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.88.1)
|
||||||
|
|
||||||
|
|
|
@ -127,11 +127,22 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
|
||||||
|
|
||||||
## Operation
|
## Operation
|
||||||
|
|
||||||
### How to start VictoriaMetrics
|
### Install
|
||||||
|
|
||||||
Just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
To quickly try VictoriaMetrics, just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
||||||
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
|
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
|
||||||
|
|
||||||
|
VictoriaMetrics can also be installed via these installation methods:
|
||||||
|
|
||||||
|
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
||||||
|
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
||||||
|
* [Ansible role for installing cluster VictoriaMetrics (by VictoriaMetrics)](https://github.com/VictoriaMetrics/ansible-playbooks).
|
||||||
|
* [Ansible role for installing cluster VictoriaMetrics (by community)](https://github.com/Slapper/ansible-victoriametrics-cluster-role).
|
||||||
|
* [Ansible role for installing single-node VictoriaMetrics (by community)](https://github.com/dreamteam-gg/ansible-victoriametrics-role).
|
||||||
|
* [Snap package for VictoriaMetrics](https://snapcraft.io/victoriametrics).
|
||||||
|
|
||||||
|
### How to start VictoriaMetrics
|
||||||
|
|
||||||
The following command-line flags are used the most:
|
The following command-line flags are used the most:
|
||||||
|
|
||||||
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||||
|
@ -2040,17 +2051,10 @@ It is safe sharing the collected profiles from security point of view, since the
|
||||||
|
|
||||||
## Integrations
|
## Integrations
|
||||||
|
|
||||||
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
|
||||||
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
|
||||||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
|
||||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
|
||||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||||
See [this example](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml).
|
See [this example](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml).
|
||||||
* [Ansible role for installing cluster VictoriaMetrics (by VictoriaMetrics)](https://github.com/VictoriaMetrics/ansible-playbooks).
|
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||||
* [Ansible role for installing cluster VictoriaMetrics (by community)](https://github.com/Slapper/ansible-victoriametrics-cluster-role).
|
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||||
* [Ansible role for installing single-node VictoriaMetrics (by community)](https://github.com/dreamteam-gg/ansible-victoriametrics-role).
|
|
||||||
|
|
||||||
* [Snap package for VictoriaMetrics](https://snapcraft.io/victoriametrics).
|
|
||||||
* [vmalert-cli](https://github.com/aorfanos/vmalert-cli) - a CLI application for managing [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
* [vmalert-cli](https://github.com/aorfanos/vmalert-cli) - a CLI application for managing [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
||||||
|
|
||||||
## Third-party contributions
|
## Third-party contributions
|
||||||
|
@ -2368,7 +2372,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.seriesLimitPerTarget int
|
-promscrape.seriesLimitPerTarget int
|
||||||
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
||||||
-promscrape.streamParse
|
-promscrape.streamParse
|
||||||
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
||||||
-promscrape.suppressDuplicateScrapeTargetErrors
|
-promscrape.suppressDuplicateScrapeTargetErrors
|
||||||
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
||||||
-promscrape.suppressScrapeErrors
|
-promscrape.suppressScrapeErrors
|
||||||
|
|
|
@ -130,11 +130,22 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
|
||||||
|
|
||||||
## Operation
|
## Operation
|
||||||
|
|
||||||
### How to start VictoriaMetrics
|
### Install
|
||||||
|
|
||||||
Just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
To quickly try VictoriaMetrics, just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
||||||
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
|
See also [QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for additional information.
|
||||||
|
|
||||||
|
VictoriaMetrics can also be installed via these installation methods:
|
||||||
|
|
||||||
|
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
||||||
|
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
||||||
|
* [Ansible role for installing cluster VictoriaMetrics (by VictoriaMetrics)](https://github.com/VictoriaMetrics/ansible-playbooks).
|
||||||
|
* [Ansible role for installing cluster VictoriaMetrics (by community)](https://github.com/Slapper/ansible-victoriametrics-cluster-role).
|
||||||
|
* [Ansible role for installing single-node VictoriaMetrics (by community)](https://github.com/dreamteam-gg/ansible-victoriametrics-role).
|
||||||
|
* [Snap package for VictoriaMetrics](https://snapcraft.io/victoriametrics).
|
||||||
|
|
||||||
|
### How to start VictoriaMetrics
|
||||||
|
|
||||||
The following command-line flags are used the most:
|
The following command-line flags are used the most:
|
||||||
|
|
||||||
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||||
|
@ -2043,17 +2054,10 @@ It is safe sharing the collected profiles from security point of view, since the
|
||||||
|
|
||||||
## Integrations
|
## Integrations
|
||||||
|
|
||||||
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
|
||||||
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
|
||||||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
|
||||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
|
||||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||||
See [this example](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml).
|
See [this example](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml).
|
||||||
* [Ansible role for installing cluster VictoriaMetrics (by VictoriaMetrics)](https://github.com/VictoriaMetrics/ansible-playbooks).
|
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||||
* [Ansible role for installing cluster VictoriaMetrics (by community)](https://github.com/Slapper/ansible-victoriametrics-cluster-role).
|
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||||
* [Ansible role for installing single-node VictoriaMetrics (by community)](https://github.com/dreamteam-gg/ansible-victoriametrics-role).
|
|
||||||
|
|
||||||
* [Snap package for VictoriaMetrics](https://snapcraft.io/victoriametrics).
|
|
||||||
* [vmalert-cli](https://github.com/aorfanos/vmalert-cli) - a CLI application for managing [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
* [vmalert-cli](https://github.com/aorfanos/vmalert-cli) - a CLI application for managing [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
||||||
|
|
||||||
## Third-party contributions
|
## Third-party contributions
|
||||||
|
@ -2371,7 +2375,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.seriesLimitPerTarget int
|
-promscrape.seriesLimitPerTarget int
|
||||||
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
||||||
-promscrape.streamParse
|
-promscrape.streamParse
|
||||||
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
||||||
-promscrape.suppressDuplicateScrapeTargetErrors
|
-promscrape.suppressDuplicateScrapeTargetErrors
|
||||||
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
||||||
-promscrape.suppressScrapeErrors
|
-promscrape.suppressScrapeErrors
|
||||||
|
|
Binary file not shown.
After Width: | Height: | Size: 248 KiB |
Binary file not shown.
After Width: | Height: | Size: 345 KiB |
Binary file not shown.
After Width: | Height: | Size: 387 KiB |
Binary file not shown.
After Width: | Height: | Size: 268 KiB |
Binary file not shown.
After Width: | Height: | Size: 248 KiB |
Binary file not shown.
After Width: | Height: | Size: 340 KiB |
|
@ -0,0 +1,295 @@
|
||||||
|
---
|
||||||
|
sort: 4
|
||||||
|
weight: 4
|
||||||
|
title: Alerting with vmalert and Managed VictoriaMetrics
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: "managed"
|
||||||
|
weight: 4
|
||||||
|
---
|
||||||
|
# Alerting with vmalert and Managed VictoriaMetrics
|
||||||
|
|
||||||
|
This guide explains the different ways in which you can use vmalert in conjunction with Managed VictoriaMetrics
|
||||||
|
|
||||||
|
<img src="alerting-vmalert-managed-victoria-metrics-setup.png">
|
||||||
|
|
||||||
|
## Preconditions
|
||||||
|
|
||||||
|
* [vmalert](https://docs.victoriametrics.com/vmalert.html) is installed. You can obtain it by building it from [source](https://docs.victoriametrics.com/vmalert.html#quickstart), downloading it from the [GitHub releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), or using the [docker image](https://hub.docker.com/r/victoriametrics/vmalert) for the container ecosystem (such as docker, k8s, etc.).
|
||||||
|
* [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) is installed.
|
||||||
|
* You have a [single or cluster](https://docs.victoriametrics.com/managed-victoriametrics/quickstart.html#creating-instance) deployment in [Managed VictoriaMetrics](https://docs.victoriametrics.com/managed-victoriametrics/overview.html).
|
||||||
|
* If you are using helm, add the [VictoriaMetrics helm chart](https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-alert#how-to-install) repository to your helm repositories. This step is optional.
|
||||||
|
* If you are using [vmoperator](https://docs.victoriametrics.com/operator/quick-start.html#quick-start), make sure that it and its CRDs are installed. This step is also optional.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
### Alerting and recording rules file(s)
|
||||||
|
|
||||||
|
You need to prepare file(s) with alerting or recording rules.
|
||||||
|
|
||||||
|
An example file with one alerting rule.
|
||||||
|
|
||||||
|
alerts.yml
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
groups:
|
||||||
|
- name: common
|
||||||
|
rules:
|
||||||
|
- alert: instanceIsDown
|
||||||
|
for: 1m
|
||||||
|
expr: up == 0
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "{{ $labels.job }} instance: {{$labels.instance }} is not up"
|
||||||
|
description: "Job {{ $labels.job }} instance: {{$labels.instance }} is not up for the last 1 minute"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Managed VictoriaMetrics access token and deployment endpoint
|
||||||
|
|
||||||
|
To use vmalert with Managed VictoriaMetrics, you must create a read/write token, or use an existing one. The token must have write access to ingest recording rules, ALERTS and ALERTS_FOR_STATE metrics, and read access for rules evaluation.
|
||||||
|
|
||||||
|
For instructions on how to create tokens, please refer to this section of the [documentation](https://docs.victoriametrics.com/managed-victoriametrics/quickstart.html#access).
|
||||||
|
|
||||||
|
#### Single-Node
|
||||||
|
|
||||||
|
<img src="alerting-vmalert-managed-victoria-metrics-single-1.png">
|
||||||
|
<img src="alerting-vmalert-managed-victoria-metrics-single-2.png">
|
||||||
|
|
||||||
|
#### Cluster
|
||||||
|
|
||||||
|
<img src="alerting-vmalert-managed-victoria-metrics-cluster-1.png">
|
||||||
|
<img src="alerting-vmalert-managed-victoria-metrics-cluster-2.png">
|
||||||
|
<img src="alerting-vmalert-managed-victoria-metrics-cluster-3.png">
|
||||||
|
|
||||||
|
### vmalert configuration
|
||||||
|
|
||||||
|
#### Single-Node
|
||||||
|
|
||||||
|
##### Binary
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=81e8226e-4e25-480d-9397-31ba4007f0bf
|
||||||
|
export MANAGED_VM_URL=https://gw-c15-1c.cloud.victoriametrics.com
|
||||||
|
export ALERTMANAGER_URL=http://localhost:9093
|
||||||
|
./vmalert -rule=alerts.yml -datasource.url=$MANAGED_VM_URL -datasource.bearerToken=$TOKEN -notifier.url=$ALERTMANAGER_URL -remoteWrite.url=$MANAGED_VM_URL -remoteWrite.bearerToken=$TOKEN -remoteRead.url=$MANAGED_VM_URL -remoteRead.bearerToken=$TOKEN
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Docker
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=81e8226e-4e25-480d-9397-31ba4007f0bf
|
||||||
|
export MANAGED_VM_URL=https://gw-c15-1c.cloud.victoriametrics.com
|
||||||
|
export ALERTMANAGER_URL=http://alertmanager:9093
|
||||||
|
docker run -it -p 8080:8080 -v $(pwd)/alerts.yml:/etc/alerts/alerts.yml victoriametrics/vmalert:v1.87.1 -datasource.url=$MANAGED_VM_URL -datasource.bearerToken=$TOKEN -remoteRead.url=$MANAGED_VM_URL -remoteRead.bearerToken=$TOKEN -remoteWrite.url=$MANAGED_VM_URL -remoteWrite.bearerToken=$TOKEN -notifier.url=$ALERTMANAGER_URL -rule="/etc/alerts/*.yml"
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Helm Chart
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=81e8226e-4e25-480d-9397-31ba4007f0bf
|
||||||
|
export MANAGED_VM_URL=https://gw-c15-1c.cloud.victoriametrics.com
|
||||||
|
export ALERTMANAGER=http://alertmanager:9093
|
||||||
|
cat <<EOF | helm install vmalert vm/victoria-metrics-alert -f -
|
||||||
|
server:
|
||||||
|
datasource:
|
||||||
|
url: $MANAGED_VM_URL
|
||||||
|
bearer:
|
||||||
|
token: $TOKEN
|
||||||
|
remote:
|
||||||
|
write:
|
||||||
|
url: $MANAGED_VM_URL
|
||||||
|
bearer:
|
||||||
|
token: $TOKEN
|
||||||
|
read:
|
||||||
|
url: $MANAGED_VM_URL
|
||||||
|
bearer:
|
||||||
|
token: $TOKEN
|
||||||
|
notifier:
|
||||||
|
alertmanager:
|
||||||
|
url: $ALERTMANAGER
|
||||||
|
config:
|
||||||
|
alerts:
|
||||||
|
groups:
|
||||||
|
- name: common
|
||||||
|
rules:
|
||||||
|
- alert: instanceIsDown
|
||||||
|
for: 1m
|
||||||
|
expr: up == 0
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "{{ $labels.job }} instance: {{$labels.instance }} is not up"
|
||||||
|
description: "Job {{ $labels.job }} instance: {{$labels.instance }} is not up for the last 1 minute"
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
##### VMalert CRD for vmoperator
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=81e8226e-4e25-480d-9397-31ba4007f0bf
|
||||||
|
export MANAGED_VM_URL=https://gw-c15-1c.cloud.victoriametrics.com
|
||||||
|
export ALERTMANAGER=http://alertmanager:9093
|
||||||
|
cat << EOF | kubectl apply -f -
|
||||||
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMAlert
|
||||||
|
metadata:
|
||||||
|
name: vmalert-managed-vm
|
||||||
|
spec:
|
||||||
|
replicaCount: 1
|
||||||
|
datasource:
|
||||||
|
url: $MANAGED_VM_URL
|
||||||
|
bearerTokenSecret:
|
||||||
|
name: managed-token
|
||||||
|
key: token
|
||||||
|
remoteWrite:
|
||||||
|
url: $MANAGED_VM_URL
|
||||||
|
bearerTokenSecret:
|
||||||
|
name: managed-token
|
||||||
|
key: token
|
||||||
|
remoteRead:
|
||||||
|
url: $MANAGED_VM_URL
|
||||||
|
bearerTokenSecret:
|
||||||
|
name: managed-token
|
||||||
|
key: token
|
||||||
|
notifier:
|
||||||
|
url: $ALERTMANAGER
|
||||||
|
ruleSelector:
|
||||||
|
matchLabels:
|
||||||
|
type: managed
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: managed-token
|
||||||
|
stringData:
|
||||||
|
token: $TOKEN
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Testing
|
||||||
|
|
||||||
|
You can ingest metric that will raise an alert
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=81e8226e-4e25-480d-9397-31ba4007f0bf
|
||||||
|
export MANAGED_VM_URL=https://gw-c15-1c.cloud.victoriametrics.com/
|
||||||
|
curl -H "Authorization: Bearer $TOKEN" -X POST "$MANAGED_VM_URLapi/v1/import/prometheus" -d 'up{job="vmalert-test", instance="localhost"} 0'
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Cluster
|
||||||
|
|
||||||
|
##### Binary
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=76bc5470-d340-4e5e-9574-49ed30911cc4
|
||||||
|
export MANAGED_VM_READ_URL=https://gw-c15-1a.cloud.victoriametrics.com/select/0/prometheus/
|
||||||
|
export MANAGED_VM_WRITE_URL=https://gw-c15-1a.cloud.victoriametrics.com/insert/0/prometheus/
|
||||||
|
export ALERTMANAGER_URL=http://localhost:9093
|
||||||
|
./vmalert -rule=alerts.yml -datasource.url=$MANAGED_VM_READ_URL -datasource.bearerToken=$TOKEN -notifier.url=$ALERTMANAGER_URL -remoteWrite.url=$MANAGED_VM_WRITE_URL -remoteWrite.bearerToken=$TOKEN -remoteRead.url=$MANAGED_VM_READ_URL -remoteRead.bearerToken=$TOKEN
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Docker
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=76bc5470-d340-4e5e-9574-49ed30911cc4
|
||||||
|
export MANAGED_VM_READ_URL=https://gw-c15-1a.cloud.victoriametrics.com/select/0/prometheus/
|
||||||
|
export MANAGED_VM_WRITE_URL=https://gw-c15-1a.cloud.victoriametrics.com/insert/0/prometheus/
|
||||||
|
export ALERTMANAGER_URL=http://alertmanager:9093
|
||||||
|
docker run -it -p 8080:8080 -v $(pwd)/alerts.yml:/etc/alerts/alerts.yml victoriametrics/vmalert:v1.87.1 -datasource.url=$MANAGED_VM_READ_URL -datasource.bearerToken=$TOKEN -remoteRead.url=$MANAGED_VM_READ_URL -remoteRead.bearerToken=$TOKEN -remoteWrite.url=$MANAGED_VM_WRITE_URL -remoteWrite.bearerToken=$TOKEN -notifier.url=$ALERTMANAGER_URL -rule="/etc/alerts/*.yml"
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Helm Chart
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=76bc5470-d340-4e5e-9574-49ed30911cc4
|
||||||
|
export MANAGED_VM_READ_URL=https://gw-c15-1a.cloud.victoriametrics.com/select/0/prometheus/
|
||||||
|
export MANAGED_VM_WRITE_URL=https://gw-c15-1a.cloud.victoriametrics.com/insert/0/prometheus/
|
||||||
|
export ALERTMANAGER=http://alertmanager:9093
|
||||||
|
cat <<EOF | helm install vmalert vm/victoria-metrics-alert -f -
|
||||||
|
server:
|
||||||
|
datasource:
|
||||||
|
url: $MANAGED_VM_READ_URL
|
||||||
|
bearer:
|
||||||
|
token: $TOKEN
|
||||||
|
remote:
|
||||||
|
write:
|
||||||
|
url: $MANAGED_VM_WRITE_URL
|
||||||
|
bearer:
|
||||||
|
token: $TOKEN
|
||||||
|
read:
|
||||||
|
url: $MANAGED_VM_READ_URL
|
||||||
|
bearer:
|
||||||
|
token: $TOKEN
|
||||||
|
notifier:
|
||||||
|
alertmanager:
|
||||||
|
url: $ALERTMANAGER
|
||||||
|
config:
|
||||||
|
alerts:
|
||||||
|
groups:
|
||||||
|
- name: common
|
||||||
|
rules:
|
||||||
|
- alert: instanceIsDown
|
||||||
|
for: 1m
|
||||||
|
expr: up == 0
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "{{ $labels.job }} instance: {{$labels.instance }} is not up"
|
||||||
|
description: "Job {{ $labels.job }} instance: {{$labels.instance }} is not up for the last 1 minute"
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
##### VMalert CRD for vmoperator
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=76bc5470-d340-4e5e-9574-49ed30911cc4
|
||||||
|
export MANAGED_VM_READ_URL=https://gw-c15-1a.cloud.victoriametrics.com/select/0/prometheus/
|
||||||
|
export MANAGED_VM_WRITE_URL=https://gw-c15-1a.cloud.victoriametrics.com/insert/0/prometheus/
|
||||||
|
export ALERTMANAGER=http://alertmanager:9093
|
||||||
|
cat << EOF | kubectl apply -f -
|
||||||
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMAlert
|
||||||
|
metadata:
|
||||||
|
name: vmalert-managed-vm
|
||||||
|
spec:
|
||||||
|
replicaCount: 1
|
||||||
|
datasource:
|
||||||
|
url: $MANAGED_VM_READ_URL
|
||||||
|
bearerTokenSecret:
|
||||||
|
name: managed-token
|
||||||
|
key: token
|
||||||
|
remoteWrite:
|
||||||
|
url: $MANAGED_VM_WRITE_URL
|
||||||
|
bearerTokenSecret:
|
||||||
|
name: managed-token
|
||||||
|
key: token
|
||||||
|
remoteRead:
|
||||||
|
url: $MANAGED_VM_READ_URL
|
||||||
|
bearerTokenSecret:
|
||||||
|
name: managed-token
|
||||||
|
key: token
|
||||||
|
notifier:
|
||||||
|
url: $ALERTMANAGER
|
||||||
|
ruleSelector:
|
||||||
|
matchLabels:
|
||||||
|
type: managed
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: managed-token
|
||||||
|
stringData:
|
||||||
|
token: $TOKEN
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Testing
|
||||||
|
|
||||||
|
You can ingest metric that will raise an alert
|
||||||
|
|
||||||
|
```console
|
||||||
|
export TOKEN=76bc5470-d340-4e5e-9574-49ed30911cc4
|
||||||
|
export MANAGED_VM_WRITE_URL=https://gw-c15-1a.cloud.victoriametrics.com/insert/0/prometheus/
|
||||||
|
curl -H "Authorization: Bearer $TOKEN" -X POST "$MANAGED_VM_WRITE_URLapi/v1/import/prometheus" -d 'up{job="vmalert-test", instance="localhost"} 0'
|
||||||
|
```
|
|
@ -1,11 +1,11 @@
|
||||||
---
|
---
|
||||||
sort: 4
|
sort: 5
|
||||||
weight: 4
|
weight: 5
|
||||||
title: User Management in Managed VictoriaMetrics
|
title: User Management in Managed VictoriaMetrics
|
||||||
menu:
|
menu:
|
||||||
docs:
|
docs:
|
||||||
parent: "managed"
|
parent: "managed"
|
||||||
weight: 4
|
weight: 5
|
||||||
---
|
---
|
||||||
# User Management in Managed VictoriaMetrics
|
# User Management in Managed VictoriaMetrics
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,7 @@ This Document documents the types introduced by the VictoriaMetrics to be consum
|
||||||
* [VMAgentStatus](#vmagentstatus)
|
* [VMAgentStatus](#vmagentstatus)
|
||||||
* [BasicAuth](#basicauth)
|
* [BasicAuth](#basicauth)
|
||||||
* [BearerAuth](#bearerauth)
|
* [BearerAuth](#bearerauth)
|
||||||
|
* [ConfigMapKeyReference](#configmapkeyreference)
|
||||||
* [DiscoverySelector](#discoveryselector)
|
* [DiscoverySelector](#discoveryselector)
|
||||||
* [EmbeddedHPA](#embeddedhpa)
|
* [EmbeddedHPA](#embeddedhpa)
|
||||||
* [EmbeddedObjectMetadata](#embeddedobjectmetadata)
|
* [EmbeddedObjectMetadata](#embeddedobjectmetadata)
|
||||||
|
@ -171,6 +172,7 @@ VMAlertmanagerSpec is a specification of the desired behavior of the VMAlertmana
|
||||||
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
|
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
|
||||||
| secrets | Secrets is a list of Secrets in the same namespace as the VMAlertmanager object, which shall be mounted into the VMAlertmanager Pods. The Secrets are mounted into /etc/vm/secrets/<secret-name> | []string | false |
|
| secrets | Secrets is a list of Secrets in the same namespace as the VMAlertmanager object, which shall be mounted into the VMAlertmanager Pods. The Secrets are mounted into /etc/vm/secrets/<secret-name> | []string | false |
|
||||||
| configMaps | ConfigMaps is a list of ConfigMaps in the same namespace as the VMAlertmanager object, which shall be mounted into the VMAlertmanager Pods. The ConfigMaps are mounted into /etc/vm/configs/<configmap-name>. | []string | false |
|
| configMaps | ConfigMaps is a list of ConfigMaps in the same namespace as the VMAlertmanager object, which shall be mounted into the VMAlertmanager Pods. The ConfigMaps are mounted into /etc/vm/configs/<configmap-name>. | []string | false |
|
||||||
|
| templates | Templates is a list of ConfigMap key references for ConfigMaps in the same namespace as the VMAlertmanager object, which shall be mounted into the VMAlertmanager Pods. The Templates are mounted into /etc/vm/templates/<configmap-name>/<configmap-key>. | [][ConfigMapKeyReference](#configmapkeyreference) | false |
|
||||||
| configRawYaml | ConfigRawYaml - raw configuration for alertmanager, it helps it to start without secret. priority -> hardcoded ConfigRaw -> ConfigRaw, provided by user -> ConfigSecret. | string | false |
|
| configRawYaml | ConfigRawYaml - raw configuration for alertmanager, it helps it to start without secret. priority -> hardcoded ConfigRaw -> ConfigRaw, provided by user -> ConfigSecret. | string | false |
|
||||||
| configSecret | ConfigSecret is the name of a Kubernetes Secret in the same namespace as the VMAlertmanager object, which contains configuration for this VMAlertmanager, configuration must be inside secret key: alertmanager.yaml. It must be created by user. instance. Defaults to 'vmalertmanager-<alertmanager-name>' The secret is mounted into /etc/alertmanager/config. | string | false |
|
| configSecret | ConfigSecret is the name of a Kubernetes Secret in the same namespace as the VMAlertmanager object, which contains configuration for this VMAlertmanager, configuration must be inside secret key: alertmanager.yaml. It must be created by user. instance. Defaults to 'vmalertmanager-<alertmanager-name>' The secret is mounted into /etc/alertmanager/config. | string | false |
|
||||||
| logLevel | Log level for VMAlertmanager to be configured with. | string | false |
|
| logLevel | Log level for VMAlertmanager to be configured with. | string | false |
|
||||||
|
@ -206,6 +208,9 @@ VMAlertmanagerSpec is a specification of the desired behavior of the VMAlertmana
|
||||||
| serviceSpec | ServiceSpec that will be added to vmalertmanager service spec | *[ServiceSpec](#servicespec) | false |
|
| serviceSpec | ServiceSpec that will be added to vmalertmanager service spec | *[ServiceSpec](#servicespec) | false |
|
||||||
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
||||||
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
||||||
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| selectAllByDefault | SelectAllByDefault changes default behavior for empty CRD selectors, such ConfigSelector. with selectAllScrapes: true and undefined ConfigSelector and ConfigNamespaceSelector Operator selects all exist alertManagerConfigs with selectAllScrapes: false - selects nothing | bool | false |
|
| selectAllByDefault | SelectAllByDefault changes default behavior for empty CRD selectors, such ConfigSelector. with selectAllScrapes: true and undefined ConfigSelector and ConfigNamespaceSelector Operator selects all exist alertManagerConfigs with selectAllScrapes: false - selects nothing | bool | false |
|
||||||
| configSelector | ConfigSelector defines selector for VMAlertmanagerConfig, result config will be merged with with Raw or Secret config. Works in combination with NamespaceSelector. NamespaceSelector nil - only objects at VMAlertmanager namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
| configSelector | ConfigSelector defines selector for VMAlertmanagerConfig, result config will be merged with with Raw or Secret config. Works in combination with NamespaceSelector. NamespaceSelector nil - only objects at VMAlertmanager namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
||||||
| configNamespaceSelector | \n ConfigNamespaceSelector defines namespace selector for VMAlertmanagerConfig.\nWorks in combination with Selector. NamespaceSelector nil - only objects at VMAlertmanager namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
| configNamespaceSelector | \n ConfigNamespaceSelector defines namespace selector for VMAlertmanagerConfig.\nWorks in combination with Selector. NamespaceSelector nil - only objects at VMAlertmanager namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
||||||
|
@ -430,6 +435,7 @@ Route defines a node in the routing tree.
|
||||||
| continue | Continue indicating whether an alert should continue matching subsequent sibling nodes. It will always be true for the first-level route. | bool | false |
|
| continue | Continue indicating whether an alert should continue matching subsequent sibling nodes. It will always be true for the first-level route. | bool | false |
|
||||||
| routes | RawRoutes alertmanager nested routes https://prometheus.io/docs/alerting/latest/configuration/#route | []apiextensionsv1.JSON | false |
|
| routes | RawRoutes alertmanager nested routes https://prometheus.io/docs/alerting/latest/configuration/#route | []apiextensionsv1.JSON | false |
|
||||||
| mute_time_intervals | MuteTimeIntervals for alerts | []string | false |
|
| mute_time_intervals | MuteTimeIntervals for alerts | []string | false |
|
||||||
|
| active_time_intervals | ActiveTimeIntervals Times when the route should be active These must match the name at time_intervals | []string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -533,6 +539,7 @@ TimeInterval defines intervals of time
|
||||||
| days_of_month | DayOfMonth defines list of numerical days in the month. Days begin at 1. Negative values are also accepted. for example, ['1:5', '-3:-1'] | []string | false |
|
| days_of_month | DayOfMonth defines list of numerical days in the month. Days begin at 1. Negative values are also accepted. for example, ['1:5', '-3:-1'] | []string | false |
|
||||||
| months | Months defines list of calendar months identified by a case-insentive name (e.g. ‘January’) or numeric 1. For example, ['1:3', 'may:august', 'december'] | []string | false |
|
| months | Months defines list of calendar months identified by a case-insentive name (e.g. ‘January’) or numeric 1. For example, ['1:3', 'may:august', 'december'] | []string | false |
|
||||||
| years | Years defines numerical list of years, ranges are accepted. For example, ['2020:2022', '2030'] | []string | false |
|
| years | Years defines numerical list of years, ranges are accepted. For example, ['2020:2022', '2030'] | []string | false |
|
||||||
|
| location | Location in golang time location form, e.g. UTC | string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -580,6 +587,7 @@ VMAlertmanagerConfigSpec defines configuration for VMAlertmanagerConfig
|
||||||
| receivers | Receivers defines alert receivers. without defined Route, receivers will be skipped. | [][Receiver](#receiver) | true |
|
| receivers | Receivers defines alert receivers. without defined Route, receivers will be skipped. | [][Receiver](#receiver) | true |
|
||||||
| inhibit_rules | InhibitRules will only apply for alerts matching the resource's namespace. | [][InhibitRule](#inhibitrule) | false |
|
| inhibit_rules | InhibitRules will only apply for alerts matching the resource's namespace. | [][InhibitRule](#inhibitrule) | false |
|
||||||
| mute_time_intervals | MuteTimeInterval - global mute time See https://prometheus.io/docs/alerting/latest/configuration/#mute_time_interval | [][MuteTimeInterval](#mutetimeinterval) | false |
|
| mute_time_intervals | MuteTimeInterval - global mute time See https://prometheus.io/docs/alerting/latest/configuration/#mute_time_interval | [][MuteTimeInterval](#mutetimeinterval) | false |
|
||||||
|
| time_intervals | ParsingError contents error with context if operator was failed to parse json object from kubernetes api server TimeIntervals modern config option, use it instead of mute_time_intervals | [][MuteTimeInterval](#mutetimeinterval) | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -696,6 +704,7 @@ VMAgentRemoteWriteSpec defines the remote storage configuration for VmAgent
|
||||||
| basicAuth | BasicAuth allow an endpoint to authenticate over basic authentication | *[BasicAuth](#basicauth) | false |
|
| basicAuth | BasicAuth allow an endpoint to authenticate over basic authentication | *[BasicAuth](#basicauth) | false |
|
||||||
| bearerTokenSecret | Optional bearer auth token to use for -remoteWrite.url | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
| bearerTokenSecret | Optional bearer auth token to use for -remoteWrite.url | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
||||||
| urlRelabelConfig | ConfigMap with relabeling config which is applied to metrics before sending them to the corresponding -remoteWrite.url | *v1.ConfigMapKeySelector | false |
|
| urlRelabelConfig | ConfigMap with relabeling config which is applied to metrics before sending them to the corresponding -remoteWrite.url | *v1.ConfigMapKeySelector | false |
|
||||||
|
| inlineUrlRelabelConfig | InlineUrlRelabelConfig defines relabeling config for remoteWriteURL, it can be defined at crd spec. | [][RelabelConfig](#relabelconfig) | false |
|
||||||
| oauth2 | OAuth2 defines auth configuration | *[OAuth2](#oauth2) | false |
|
| oauth2 | OAuth2 defines auth configuration | *[OAuth2](#oauth2) | false |
|
||||||
| tlsConfig | TLSConfig describes tls configuration for remote write target | *[TLSConfig](#tlsconfig) | false |
|
| tlsConfig | TLSConfig describes tls configuration for remote write target | *[TLSConfig](#tlsconfig) | false |
|
||||||
| sendTimeout | Timeout for sending a single block of data to -remoteWrite.url (default 1m0s) | *string | false |
|
| sendTimeout | Timeout for sending a single block of data to -remoteWrite.url (default 1m0s) | *string | false |
|
||||||
|
@ -745,6 +754,7 @@ VMAgentSpec defines the desired state of VMAgent
|
||||||
| remoteWrite | RemoteWrite list of victoria metrics /some other remote write system for vm it must looks like: http://victoria-metrics-single:8429/api/v1/write or for cluster different url https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmagent#splitting-data-streams-among-multiple-systems | [][VMAgentRemoteWriteSpec](#vmagentremotewritespec) | true |
|
| remoteWrite | RemoteWrite list of victoria metrics /some other remote write system for vm it must looks like: http://victoria-metrics-single:8429/api/v1/write or for cluster different url https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmagent#splitting-data-streams-among-multiple-systems | [][VMAgentRemoteWriteSpec](#vmagentremotewritespec) | true |
|
||||||
| remoteWriteSettings | RemoteWriteSettings defines global settings for all remoteWrite urls. | *[VMAgentRemoteWriteSettings](#vmagentremotewritesettings) | false |
|
| remoteWriteSettings | RemoteWriteSettings defines global settings for all remoteWrite urls. | *[VMAgentRemoteWriteSettings](#vmagentremotewritesettings) | false |
|
||||||
| relabelConfig | RelabelConfig ConfigMap with global relabel config -remoteWrite.relabelConfig This relabeling is applied to all the collected metrics before sending them to remote storage. | *v1.ConfigMapKeySelector | false |
|
| relabelConfig | RelabelConfig ConfigMap with global relabel config -remoteWrite.relabelConfig This relabeling is applied to all the collected metrics before sending them to remote storage. | *v1.ConfigMapKeySelector | false |
|
||||||
|
| inlineRelabelConfig | InlineRelabelConfig - defines GlobalRelabelConfig for vmagent, can be defined directly at CRD. | [][RelabelConfig](#relabelconfig) | false |
|
||||||
| selectAllByDefault | SelectAllByDefault changes default behavior for empty CRD selectors, such ServiceScrapeSelector. with selectAllScrapes: true and empty serviceScrapeSelector and ServiceScrapeNamespaceSelector Operator selects all exist serviceScrapes with selectAllScrapes: false - selects nothing | bool | false |
|
| selectAllByDefault | SelectAllByDefault changes default behavior for empty CRD selectors, such ServiceScrapeSelector. with selectAllScrapes: true and empty serviceScrapeSelector and ServiceScrapeNamespaceSelector Operator selects all exist serviceScrapes with selectAllScrapes: false - selects nothing | bool | false |
|
||||||
| serviceScrapeSelector | ServiceScrapeSelector defines ServiceScrapes to be selected for target discovery. Works in combination with NamespaceSelector. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
| serviceScrapeSelector | ServiceScrapeSelector defines ServiceScrapes to be selected for target discovery. Works in combination with NamespaceSelector. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
||||||
| serviceScrapeNamespaceSelector | ServiceScrapeNamespaceSelector Namespaces to be selected for VMServiceScrape discovery. Works in combination with Selector. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
| serviceScrapeNamespaceSelector | ServiceScrapeNamespaceSelector Namespaces to be selected for VMServiceScrape discovery. Works in combination with Selector. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
||||||
|
@ -756,6 +766,7 @@ VMAgentSpec defines the desired state of VMAgent
|
||||||
| nodeScrapeNamespaceSelector | NodeScrapeNamespaceSelector defines Namespaces to be selected for VMNodeScrape discovery. Works in combination with Selector. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
| nodeScrapeNamespaceSelector | NodeScrapeNamespaceSelector defines Namespaces to be selected for VMNodeScrape discovery. Works in combination with Selector. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
||||||
| staticScrapeSelector | StaticScrapeSelector defines PodScrapes to be selected for target discovery. Works in combination with NamespaceSelector. If both nil - match everything. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
| staticScrapeSelector | StaticScrapeSelector defines PodScrapes to be selected for target discovery. Works in combination with NamespaceSelector. If both nil - match everything. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
||||||
| staticScrapeNamespaceSelector | StaticScrapeNamespaceSelector defines Namespaces to be selected for VMStaticScrape discovery. Works in combination with NamespaceSelector. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
| staticScrapeNamespaceSelector | StaticScrapeNamespaceSelector defines Namespaces to be selected for VMStaticScrape discovery. Works in combination with NamespaceSelector. NamespaceSelector nil - only objects at VMAgent namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
|
||||||
|
| inlineScrapeConfig | InlineScrapeConfig As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of VMAgent. It is advised to review VMAgent release notes to ensure that no incompatible scrape configs are going to break VMAgent after the upgrade. it should be defined as single yaml file. inlineScrapeConfig: \|\n - job_name: \"prometheus\"\n static_configs:\n - targets: [\"localhost:9090\"] | string | false |
|
||||||
| additionalScrapeConfigs | AdditionalScrapeConfigs As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of VMAgent. It is advised to review VMAgent release notes to ensure that no incompatible scrape configs are going to break VMAgent after the upgrade. | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
| additionalScrapeConfigs | AdditionalScrapeConfigs As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of VMAgent. It is advised to review VMAgent release notes to ensure that no incompatible scrape configs are going to break VMAgent after the upgrade. | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
||||||
| arbitraryFSAccessThroughSMs | ArbitraryFSAccessThroughSMs configures whether configuration based on a service scrape can access arbitrary files on the file system of the VMAgent container e.g. bearer token files. | [ArbitraryFSAccessThroughSMsConfig](#arbitraryfsaccessthroughsmsconfig) | false |
|
| arbitraryFSAccessThroughSMs | ArbitraryFSAccessThroughSMs configures whether configuration based on a service scrape can access arbitrary files on the file system of the VMAgent container e.g. bearer token files. | [ArbitraryFSAccessThroughSMsConfig](#arbitraryfsaccessthroughsmsconfig) | false |
|
||||||
| insertPorts | InsertPorts - additional listen ports for data ingestion. | *[InsertPorts](#insertports) | false |
|
| insertPorts | InsertPorts - additional listen ports for data ingestion. | *[InsertPorts](#insertports) | false |
|
||||||
|
@ -768,6 +779,9 @@ VMAgentSpec defines the desired state of VMAgent
|
||||||
| updateStrategy | UpdateStrategy - overrides default update strategy. works only for deployments, statefulset always use OnDelete. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
|
| updateStrategy | UpdateStrategy - overrides default update strategy. works only for deployments, statefulset always use OnDelete. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
|
||||||
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
|
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
|
||||||
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
||||||
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
||||||
| serviceScrapeRelabelTemplate | ServiceScrapeRelabelTemplate defines relabel config, that will be added to each VMServiceScrape. it's useful for adding specific labels to all targets | []*[RelabelConfig](#relabelconfig) | false |
|
| serviceScrapeRelabelTemplate | ServiceScrapeRelabelTemplate defines relabel config, that will be added to each VMServiceScrape. it's useful for adding specific labels to all targets | []*[RelabelConfig](#relabelconfig) | false |
|
||||||
| podScrapeRelabelTemplate | PodScrapeRelabelTemplate defines relabel config, that will be added to each VMPodScrape. it's useful for adding specific labels to all targets | []*[RelabelConfig](#relabelconfig) | false |
|
| podScrapeRelabelTemplate | PodScrapeRelabelTemplate defines relabel config, that will be added to each VMPodScrape. it's useful for adding specific labels to all targets | []*[RelabelConfig](#relabelconfig) | false |
|
||||||
|
@ -792,10 +806,12 @@ VmAgentStatus defines the observed state of VmAgent
|
||||||
|
|
||||||
| Field | Description | Scheme | Required |
|
| Field | Description | Scheme | Required |
|
||||||
| ----- | ----------- | ------ | -------- |
|
| ----- | ----------- | ------ | -------- |
|
||||||
| replicas | ReplicaCount Total number of non-terminated pods targeted by this VMAlert cluster (their labels match the selector). | int32 | true |
|
| shards | Shards represents total number of vmagent deployments with uniq scrape targets | int32 | true |
|
||||||
| updatedReplicas | UpdatedReplicas Total number of non-terminated pods targeted by this VMAlert cluster that have the desired version spec. | int32 | true |
|
| selector | Selector string form of label value set for autoscaling | string | true |
|
||||||
|
| replicas | ReplicaCount Total number of pods targeted by this VMAgent | int32 | true |
|
||||||
|
| updatedReplicas | UpdatedReplicas Total number of non-terminated pods targeted by this VMAgent cluster that have the desired version spec. | int32 | true |
|
||||||
| availableReplicas | AvailableReplicas Total number of available pods (ready for at least minReadySeconds) targeted by this VMAlert cluster. | int32 | true |
|
| availableReplicas | AvailableReplicas Total number of available pods (ready for at least minReadySeconds) targeted by this VMAlert cluster. | int32 | true |
|
||||||
| unavailableReplicas | UnavailableReplicas Total number of unavailable pods targeted by this VMAlert cluster. | int32 | true |
|
| unavailableReplicas | UnavailableReplicas Total number of unavailable pods targeted by this VMAgent cluster. | int32 | true |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -822,6 +838,16 @@ BearerAuth defines auth with bearer token
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
## ConfigMapKeyReference
|
||||||
|
|
||||||
|
ConfigMapKeyReference refers to a key in a ConfigMap.
|
||||||
|
|
||||||
|
| Field | Description | Scheme | Required |
|
||||||
|
| ----- | ----------- | ------ | -------- |
|
||||||
|
| key | The ConfigMap key to refer to. | string | true |
|
||||||
|
|
||||||
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
## DiscoverySelector
|
## DiscoverySelector
|
||||||
|
|
||||||
DiscoverySelector can be used at CRD components discovery
|
DiscoverySelector can be used at CRD components discovery
|
||||||
|
@ -888,9 +914,9 @@ EmbeddedProbes - it allows to override some probe params. its not necessary to s
|
||||||
|
|
||||||
| Field | Description | Scheme | Required |
|
| Field | Description | Scheme | Required |
|
||||||
| ----- | ----------- | ------ | -------- |
|
| ----- | ----------- | ------ | -------- |
|
||||||
| livenessProbe | LivenessProbe that will be added CRD pod | *v1.Probe | false |
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| readinessProbe | ReadinessProbe that will be added CRD pod | *v1.Probe | false |
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| startupProbe | StartupProbe that will be added to CRD pod | *v1.Probe | false |
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -903,6 +929,8 @@ HTTPAuth generic auth used with http protocols
|
||||||
| basicAuth | | *[BasicAuth](#basicauth) | false |
|
| basicAuth | | *[BasicAuth](#basicauth) | false |
|
||||||
| OAuth2 | | *[OAuth2](#oauth2) | false |
|
| OAuth2 | | *[OAuth2](#oauth2) | false |
|
||||||
| tlsConfig | | *[TLSConfig](#tlsconfig) | false |
|
| tlsConfig | | *[TLSConfig](#tlsconfig) | false |
|
||||||
|
| bearerTokenFilePath | | string | false |
|
||||||
|
| bearerTokenSecret | Optional bearer auth token to use for -remoteWrite.url | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
||||||
| headers | Headers allow configuring custom http headers Must be in form of semicolon separated header with value e.g. headerName:headerValue vmalert supports it since 1.79.0 version | []string | false |
|
| headers | Headers allow configuring custom http headers Must be in form of semicolon separated header with value e.g. headerName:headerValue vmalert supports it since 1.79.0 version | []string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
@ -949,6 +977,12 @@ VMAgentRemoteReadSpec defines the remote storage configuration for VmAlert to re
|
||||||
| Field | Description | Scheme | Required |
|
| Field | Description | Scheme | Required |
|
||||||
| ----- | ----------- | ------ | -------- |
|
| ----- | ----------- | ------ | -------- |
|
||||||
| url | Victoria Metrics or VMSelect url. Required parameter. E.g. http://127.0.0.1:8428 | string | true |
|
| url | Victoria Metrics or VMSelect url. Required parameter. E.g. http://127.0.0.1:8428 | string | true |
|
||||||
|
| basicAuth | | *[BasicAuth](#basicauth) | false |
|
||||||
|
| OAuth2 | | *[OAuth2](#oauth2) | false |
|
||||||
|
| tlsConfig | | *[TLSConfig](#tlsconfig) | false |
|
||||||
|
| bearerTokenFilePath | | string | false |
|
||||||
|
| bearerTokenSecret | Optional bearer auth token to use for -remoteWrite.url | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
||||||
|
| headers | Headers allow configuring custom http headers Must be in form of semicolon separated header with value e.g. headerName:headerValue vmalert supports it since 1.79.0 version | []string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -971,6 +1005,12 @@ VMAlertNotifierSpec defines the notifier url for sending information about alert
|
||||||
| ----- | ----------- | ------ | -------- |
|
| ----- | ----------- | ------ | -------- |
|
||||||
| url | AlertManager url. E.g. http://127.0.0.1:9093 | string | false |
|
| url | AlertManager url. E.g. http://127.0.0.1:9093 | string | false |
|
||||||
| selector | Selector allows service discovery for alertmanager in this case all matched vmalertmanager replicas will be added into vmalert notifier.url as statefulset pod.fqdn | *[DiscoverySelector](#discoveryselector) | false |
|
| selector | Selector allows service discovery for alertmanager in this case all matched vmalertmanager replicas will be added into vmalert notifier.url as statefulset pod.fqdn | *[DiscoverySelector](#discoveryselector) | false |
|
||||||
|
| basicAuth | | *[BasicAuth](#basicauth) | false |
|
||||||
|
| OAuth2 | | *[OAuth2](#oauth2) | false |
|
||||||
|
| tlsConfig | | *[TLSConfig](#tlsconfig) | false |
|
||||||
|
| bearerTokenFilePath | | string | false |
|
||||||
|
| bearerTokenSecret | Optional bearer auth token to use for -remoteWrite.url | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
||||||
|
| headers | Headers allow configuring custom http headers Must be in form of semicolon separated header with value e.g. headerName:headerValue vmalert supports it since 1.79.0 version | []string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -982,6 +1022,12 @@ VMAgentRemoteReadSpec defines the remote storage configuration for VmAlert to re
|
||||||
| ----- | ----------- | ------ | -------- |
|
| ----- | ----------- | ------ | -------- |
|
||||||
| url | URL of the endpoint to send samples to. | string | true |
|
| url | URL of the endpoint to send samples to. | string | true |
|
||||||
| lookback | Lookback defines how far to look into past for alerts timeseries. For example, if lookback=1h then range from now() to now()-1h will be scanned. (default 1h0m0s) Applied only to RemoteReadSpec | *string | false |
|
| lookback | Lookback defines how far to look into past for alerts timeseries. For example, if lookback=1h then range from now() to now()-1h will be scanned. (default 1h0m0s) Applied only to RemoteReadSpec | *string | false |
|
||||||
|
| basicAuth | | *[BasicAuth](#basicauth) | false |
|
||||||
|
| OAuth2 | | *[OAuth2](#oauth2) | false |
|
||||||
|
| tlsConfig | | *[TLSConfig](#tlsconfig) | false |
|
||||||
|
| bearerTokenFilePath | | string | false |
|
||||||
|
| bearerTokenSecret | Optional bearer auth token to use for -remoteWrite.url | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
||||||
|
| headers | Headers allow configuring custom http headers Must be in form of semicolon separated header with value e.g. headerName:headerValue vmalert supports it since 1.79.0 version | []string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -996,6 +1042,12 @@ VMAgentRemoteWriteSpec defines the remote storage configuration for VmAlert
|
||||||
| flushInterval | Defines interval of flushes to remote write endpoint (default 5s) | *string | false |
|
| flushInterval | Defines interval of flushes to remote write endpoint (default 5s) | *string | false |
|
||||||
| maxBatchSize | Defines defines max number of timeseries to be flushed at once (default 1000) | *int32 | false |
|
| maxBatchSize | Defines defines max number of timeseries to be flushed at once (default 1000) | *int32 | false |
|
||||||
| maxQueueSize | Defines the max number of pending datapoints to remote write endpoint (default 100000) | *int32 | false |
|
| maxQueueSize | Defines the max number of pending datapoints to remote write endpoint (default 100000) | *int32 | false |
|
||||||
|
| basicAuth | | *[BasicAuth](#basicauth) | false |
|
||||||
|
| OAuth2 | | *[OAuth2](#oauth2) | false |
|
||||||
|
| tlsConfig | | *[TLSConfig](#tlsconfig) | false |
|
||||||
|
| bearerTokenFilePath | | string | false |
|
||||||
|
| bearerTokenSecret | Optional bearer auth token to use for -remoteWrite.url | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#secretkeyselector-v1-core) | false |
|
||||||
|
| headers | Headers allow configuring custom http headers Must be in form of semicolon separated header with value e.g. headerName:headerValue vmalert supports it since 1.79.0 version | []string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -1050,6 +1102,9 @@ VMAlertSpec defines the desired state of VMAlert
|
||||||
| updateStrategy | UpdateStrategy - overrides default update strategy. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
|
| updateStrategy | UpdateStrategy - overrides default update strategy. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
|
||||||
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
|
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
|
||||||
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
||||||
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
||||||
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
|
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
|
||||||
| dnsConfig | Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. | *v1.PodDNSConfig | false |
|
| dnsConfig | Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. | *v1.PodDNSConfig | false |
|
||||||
|
@ -1137,6 +1192,9 @@ VMSingleSpec defines the desired state of VMSingle
|
||||||
| extraEnvs | ExtraEnvs that will be added to VMSingle pod | [][v1.EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) | false |
|
| extraEnvs | ExtraEnvs that will be added to VMSingle pod | [][v1.EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) | false |
|
||||||
| serviceSpec | ServiceSpec that will be added to vmsingle service spec | *[ServiceSpec](#servicespec) | false |
|
| serviceSpec | ServiceSpec that will be added to vmsingle service spec | *[ServiceSpec](#servicespec) | false |
|
||||||
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
||||||
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
||||||
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
|
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
|
||||||
| readinessGates | ReadinessGates defines pod readiness gates | []v1.PodReadinessGate | false |
|
| readinessGates | ReadinessGates defines pod readiness gates | []v1.PodReadinessGate | false |
|
||||||
|
@ -1165,6 +1223,7 @@ Rule describes an alerting or recording rule.
|
||||||
| record | Record represents a query, that will be recorded to dataSource | string | false |
|
| record | Record represents a query, that will be recorded to dataSource | string | false |
|
||||||
| alert | Alert is a name for alert | string | false |
|
| alert | Alert is a name for alert | string | false |
|
||||||
| expr | Expr is query, that will be evaluated at dataSource | string | true |
|
| expr | Expr is query, that will be evaluated at dataSource | string | true |
|
||||||
|
| debug | Debug enables logging for rule it useful for tracking | *bool | false |
|
||||||
| for | For evaluation interval in time.Duration format 30s, 1m, 1h or nanoseconds | string | false |
|
| for | For evaluation interval in time.Duration format 30s, 1m, 1h or nanoseconds | string | false |
|
||||||
| labels | Labels will be added to rule configuration | map[string]string | false |
|
| labels | Labels will be added to rule configuration | map[string]string | false |
|
||||||
| annotations | Annotations will be added to rule configuration | map[string]string | false |
|
| annotations | Annotations will be added to rule configuration | map[string]string | false |
|
||||||
|
@ -1180,12 +1239,14 @@ RuleGroup is a list of sequentially evaluated recording and alerting rules.
|
||||||
| name | Name of group | string | true |
|
| name | Name of group | string | true |
|
||||||
| interval | evaluation interval for group | string | false |
|
| interval | evaluation interval for group | string | false |
|
||||||
| rules | Rules list of alert rules | [][Rule](#rule) | true |
|
| rules | Rules list of alert rules | [][Rule](#rule) | true |
|
||||||
|
| limit | Limit the number of alerts an alerting rule and series a recording rule can produce | int | false |
|
||||||
| concurrency | Concurrency defines how many rules execute at once. | int | false |
|
| concurrency | Concurrency defines how many rules execute at once. | int | false |
|
||||||
| labels | Labels optional list of labels added to every rule within a group. It has priority over the external labels. Labels are commonly used for adding environment or tenant-specific tag. | map[string]string | false |
|
| labels | Labels optional list of labels added to every rule within a group. It has priority over the external labels. Labels are commonly used for adding environment or tenant-specific tag. | map[string]string | false |
|
||||||
| extra_filter_labels | ExtraFilterLabels optional list of label filters applied to every rule's request withing a group. Is compatible only with VM datasource. See more details at https://docs.victoriametrics.com#prometheus-querying-api-enhancements Deprecated, use params instead | map[string]string | false |
|
| extra_filter_labels | ExtraFilterLabels optional list of label filters applied to every rule's request withing a group. Is compatible only with VM datasource. See more details at https://docs.victoriametrics.com#prometheus-querying-api-enhancements Deprecated, use params instead | map[string]string | false |
|
||||||
| tenant | Tenant id for group, can be used only with enterprise version of vmalert See more details at https://docs.victoriametrics.com/vmalert.html#multitenancy | string | false |
|
| tenant | Tenant id for group, can be used only with enterprise version of vmalert See more details at https://docs.victoriametrics.com/vmalert.html#multitenancy | string | false |
|
||||||
| params | Params optional HTTP URL parameters added to each rule request | url.Values | false |
|
| params | Params optional HTTP URL parameters added to each rule request | url.Values | false |
|
||||||
| type | Type defines datasource type for enterprise version of vmalert possible values - prometheus,graphite | string | false |
|
| type | Type defines datasource type for enterprise version of vmalert possible values - prometheus,graphite | string | false |
|
||||||
|
| headers | Headers contains optional HTTP headers added to each rule request Must be in form `header-name: value` For example:\n headers:\n - \"CustomHeader: foo\"\n - \"CustomHeader2: bar\" | []string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -1345,6 +1406,9 @@ RelabelConfig allows dynamic rewriting of the label set, being applied to sample
|
||||||
| modulus | Modulus to take of the hash of the source label values. | uint64 | false |
|
| modulus | Modulus to take of the hash of the source label values. | uint64 | false |
|
||||||
| replacement | Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' | string | false |
|
| replacement | Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' | string | false |
|
||||||
| action | Action to perform based on regex matching. Default is 'replace' | string | false |
|
| action | Action to perform based on regex matching. Default is 'replace' | string | false |
|
||||||
|
| if | If represents metricsQL match expression: '{__name__=~\"foo_.*\"}' | string | false |
|
||||||
|
| match | Match is used together with Labels for `action: graphite` | string | false |
|
||||||
|
| labels | Labels is used together with Match for `action: graphite` | map[string]string | false |
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
@ -1382,6 +1446,8 @@ VMScrapeParams defines scrape target configuration that compatible only with Vic
|
||||||
|
|
||||||
| Field | Description | Scheme | Required |
|
| Field | Description | Scheme | Required |
|
||||||
| ----- | ----------- | ------ | -------- |
|
| ----- | ----------- | ------ | -------- |
|
||||||
|
| relabel_debug | | *bool | false |
|
||||||
|
| metric_relabel_debug | | *bool | false |
|
||||||
| disable_compression | | *bool | false |
|
| disable_compression | | *bool | false |
|
||||||
| disable_keep_alive | | *bool | false |
|
| disable_keep_alive | | *bool | false |
|
||||||
| no_stale_markers | | *bool | false |
|
| no_stale_markers | | *bool | false |
|
||||||
|
@ -1660,6 +1726,9 @@ VMClusterStatus defines the observed state of VMCluster
|
||||||
| updateStrategy | UpdateStrategy - overrides default update strategy. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
|
| updateStrategy | UpdateStrategy - overrides default update strategy. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
|
||||||
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
|
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
|
||||||
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
||||||
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| hpa | HPA defines kubernetes PodAutoScaling configuration version 2. | *[EmbeddedHPA](#embeddedhpa) | false |
|
| hpa | HPA defines kubernetes PodAutoScaling configuration version 2. | *[EmbeddedHPA](#embeddedhpa) | false |
|
||||||
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
||||||
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
|
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
|
||||||
|
@ -1725,6 +1794,9 @@ VMClusterStatus defines the observed state of VMCluster
|
||||||
| serviceSpec | ServiceSpec that will be added to vmselect service spec | *[ServiceSpec](#servicespec) | false |
|
| serviceSpec | ServiceSpec that will be added to vmselect service spec | *[ServiceSpec](#servicespec) | false |
|
||||||
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
||||||
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
||||||
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| hpa | Configures horizontal pod autoscaling. Note, enabling this option disables vmselect to vmselect communication. In most cases it's not an issue. | *[EmbeddedHPA](#embeddedhpa) | false |
|
| hpa | Configures horizontal pod autoscaling. Note, enabling this option disables vmselect to vmselect communication. In most cases it's not an issue. | *[EmbeddedHPA](#embeddedhpa) | false |
|
||||||
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
||||||
| rollingUpdateStrategy | RollingUpdateStrategy defines strategy for application updates Default is OnDelete, in this case operator handles update process Can be changed for RollingUpdate | [appsv1.StatefulSetUpdateStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#statefulsetupdatestrategy-v1-apps) | false |
|
| rollingUpdateStrategy | RollingUpdateStrategy defines strategy for application updates Default is OnDelete, in this case operator handles update process Can be changed for RollingUpdate | [appsv1.StatefulSetUpdateStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#statefulsetupdatestrategy-v1-apps) | false |
|
||||||
|
@ -1775,6 +1847,9 @@ VMClusterStatus defines the observed state of VMCluster
|
||||||
| serviceSpec | ServiceSpec that will be create additional service for vmstorage | *[ServiceSpec](#servicespec) | false |
|
| serviceSpec | ServiceSpec that will be create additional service for vmstorage | *[ServiceSpec](#servicespec) | false |
|
||||||
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
||||||
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
||||||
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| maintenanceInsertNodeIDs | MaintenanceInsertNodeIDs - excludes given node ids from insert requests routing, must contain pod suffixes - for pod-0, id will be 0 and etc. lets say, you have pod-0, pod-1, pod-2, pod-3. to exclude pod-0 and pod-3 from insert routing, define nodeIDs: [0,3]. Useful at storage expanding, when you want to rebalance some data at cluster. | []int32 | false |
|
| maintenanceInsertNodeIDs | MaintenanceInsertNodeIDs - excludes given node ids from insert requests routing, must contain pod suffixes - for pod-0, id will be 0 and etc. lets say, you have pod-0, pod-1, pod-2, pod-3. to exclude pod-0 and pod-3 from insert routing, define nodeIDs: [0,3]. Useful at storage expanding, when you want to rebalance some data at cluster. | []int32 | false |
|
||||||
| maintenanceSelectNodeIDs | MaintenanceInsertNodeIDs - excludes given node ids from select requests routing, must contain pod suffixes - for pod-0, id will be 0 and etc. | []int32 | false |
|
| maintenanceSelectNodeIDs | MaintenanceInsertNodeIDs - excludes given node ids from select requests routing, must contain pod suffixes - for pod-0, id will be 0 and etc. | []int32 | false |
|
||||||
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
||||||
|
@ -1923,6 +1998,9 @@ EmbeddedIngress describes ingress configuration options.
|
||||||
| Field | Description | Scheme | Required |
|
| Field | Description | Scheme | Required |
|
||||||
| ----- | ----------- | ------ | -------- |
|
| ----- | ----------- | ------ | -------- |
|
||||||
| class_name | ClassName defines ingress class name for VMAuth | *string | false |
|
| class_name | ClassName defines ingress class name for VMAuth | *string | false |
|
||||||
|
| name | Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names | string | false |
|
||||||
|
| labels | Labels Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels | map[string]string | false |
|
||||||
|
| annotations | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations | map[string]string | false |
|
||||||
| tlsHosts | TlsHosts configures TLS access for ingress, tlsSecretName must be defined for it. | []string | false |
|
| tlsHosts | TlsHosts configures TLS access for ingress, tlsSecretName must be defined for it. | []string | false |
|
||||||
| tlsSecretName | TlsSecretName defines secretname at the VMAuth namespace with cert and key https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | string | false |
|
| tlsSecretName | TlsSecretName defines secretname at the VMAuth namespace with cert and key https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | string | false |
|
||||||
| extraRules | ExtraRules - additional rules for ingress, must be checked for correctness by user. | [][v12.IngressRule](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#ingressrule-v1-networking-k8s-io) | false |
|
| extraRules | ExtraRules - additional rules for ingress, must be checked for correctness by user. | [][v12.IngressRule](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#ingressrule-v1-networking-k8s-io) | false |
|
||||||
|
@ -1996,6 +2074,9 @@ VMAuthSpec defines the desired state of VMAuth
|
||||||
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
|
||||||
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
|
||||||
| ingress | Ingress enables ingress configuration for VMAuth. | *[EmbeddedIngress](#embeddedingress) | false |
|
| ingress | Ingress enables ingress configuration for VMAuth. | *[EmbeddedIngress](#embeddedingress) | false |
|
||||||
|
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
|
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
|
||||||
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
|
||||||
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
|
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
|
||||||
| readinessGates | ReadinessGates defines pod readiness gates | []v1.PodReadinessGate | false |
|
| readinessGates | ReadinessGates defines pod readiness gates | []v1.PodReadinessGate | false |
|
||||||
|
|
|
@ -20,7 +20,8 @@ To run VMAgent in a highly available manner you have to configure deduplication
|
||||||
|
|
||||||
Then increase replicas for VMAgent.
|
Then increase replicas for VMAgent.
|
||||||
|
|
||||||
create `VMSingle` with dedup flag
|
create `VMSingle` with dedup flag:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cat <<EOF | kubectl apply -f -
|
cat <<EOF | kubectl apply -f -
|
||||||
apiVersion: operator.victoriametrics.com/v1beta1
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
@ -33,7 +34,9 @@ spec:
|
||||||
dedup.minScrapeInterval: 60s
|
dedup.minScrapeInterval: 60s
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
create `VMAgent` with 2 replicas
|
|
||||||
|
create `VMAgent` with 2 replicas:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cat <<EOF | kubectl apply -f -
|
cat <<EOF | kubectl apply -f -
|
||||||
apiVersion: operator.victoriametrics.com/v1beta1
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
@ -55,10 +58,12 @@ EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
### Sharding
|
### Sharding
|
||||||
|
|
||||||
Sharding for `VMAgent` distributes scraping between multiple deployments of `VMAgent`.
|
Sharding for `VMAgent` distributes scraping between multiple deployments of `VMAgent`.
|
||||||
more info https://victoriametrics.github.io/vmagent.html#scraping-big-number-of-targets
|
more info https://victoriametrics.github.io/vmagent.html#scraping-big-number-of-targets
|
||||||
|
|
||||||
Example usage:
|
Example usage:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
|
||||||
cat <<EOF | kubectl apply -f -
|
cat <<EOF | kubectl apply -f -
|
||||||
|
@ -77,12 +82,27 @@ spec:
|
||||||
replicaCount: 2
|
replicaCount: 2
|
||||||
remoteWrite:
|
remoteWrite:
|
||||||
- url: "http://vmsingle-example-vmsingle-persisted.default.svc:8429/api/v1/write"
|
- url: "http://vmsingle-example-vmsingle-persisted.default.svc:8429/api/v1/write"
|
||||||
|
affinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
|
matchLabels:
|
||||||
|
shard-num: '%SHARD_NUM%'
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
This configuration produces 5 deployments with 2 replicas at each. Each deployment has its own shard num
|
This configuration produces 5 deployments with 2 replicas at each. Each deployment has its own shard num
|
||||||
and scrapes only 1/5 of all targets.
|
and scrapes only 1/5 of all targets.
|
||||||
|
|
||||||
|
You can use special placeholder `%SHARD_NUM%` in any field of `VMAgent` specification.
|
||||||
|
and operator will replace it with current shard num of vmagent when creating deployment or statefullset for vmagent.
|
||||||
|
|
||||||
|
In the example above, the `%SHARD_NUM%` placeholder is used in the `podAntiAffinity` section,
|
||||||
|
which recommend to scheduler that pods with the same shard num (label `shard-num` in the pod template)
|
||||||
|
are not deployed on the same node. You can use another `topologyKey` for availability zone or region instead of nodes.
|
||||||
|
|
||||||
### StatefulMode
|
### StatefulMode
|
||||||
|
|
||||||
In `StatefulMode` `VMAgent` doesn't lose state of the PersistentQueue (file-based buffer size for unsent data) on pod restarts.
|
In `StatefulMode` `VMAgent` doesn't lose state of the PersistentQueue (file-based buffer size for unsent data) on pod restarts.
|
||||||
|
@ -90,6 +110,7 @@ In `StatefulMode` `VMAgent` doesn't lose state of the PersistentQueue (file-base
|
||||||
Operator creates `StatefulSet` and, with provided `PersistenVolumeClaimTemplate` at `StatefulStorage` configuration param, metrics queue is stored on disk.
|
Operator creates `StatefulSet` and, with provided `PersistenVolumeClaimTemplate` at `StatefulStorage` configuration param, metrics queue is stored on disk.
|
||||||
|
|
||||||
Example of configuration:
|
Example of configuration:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: operator.victoriametrics.com/v1beta1
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
kind: VMAgent
|
kind: VMAgent
|
||||||
|
@ -117,6 +138,7 @@ Note, if you want to use `VMAlert` with high-available `VMAlertmanager`, which h
|
||||||
at `VMAlert.spec.notifiers.[url]`. Or you can use service discovery for notifier, examples:
|
at `VMAlert.spec.notifiers.[url]`. Or you can use service discovery for notifier, examples:
|
||||||
|
|
||||||
alertmanager:
|
alertmanager:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
|
@ -155,6 +177,7 @@ spec:
|
||||||
configNamespaceSelector: {}
|
configNamespaceSelector: {}
|
||||||
```
|
```
|
||||||
vmalert with fqdns:
|
vmalert with fqdns:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: operator.victoriametrics.com/v1beta1
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
kind: VMAlert
|
kind: VMAlert
|
||||||
|
@ -170,6 +193,7 @@ spec:
|
||||||
```
|
```
|
||||||
|
|
||||||
vmalert with service discovery:
|
vmalert with service discovery:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: operator.victoriametrics.com/v1beta1
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
kind: VMAlert
|
kind: VMAlert
|
||||||
|
|
|
@ -54,6 +54,7 @@ spec:
|
||||||
|
|
||||||
Also, if there is no secret data at configuration, or you just want to redefine some global variables for `alertmanager`.
|
Also, if there is no secret data at configuration, or you just want to redefine some global variables for `alertmanager`.
|
||||||
You can define configuration at `spec.configRawYaml` section of `VMAlertmanager` configuration:
|
You can define configuration at `spec.configRawYaml` section of `VMAlertmanager` configuration:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: operator.victoriametrics.com/v1beta1
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
kind: VMAlertmanager
|
kind: VMAlertmanager
|
||||||
|
@ -75,6 +76,7 @@ spec:
|
||||||
If both `configSecret` and `configRawYaml` are defined, only configuration from `configRawYaml` will be used. Values from `configRawYaml` will be ignored.
|
If both `configSecret` and `configRawYaml` are defined, only configuration from `configRawYaml` will be used. Values from `configRawYaml` will be ignored.
|
||||||
|
|
||||||
## Using VMAlertmanagerConfig
|
## Using VMAlertmanagerConfig
|
||||||
|
|
||||||
`VMAlertmanagerConfig` allows delegating notification configuration to the kubernetes cluster users.
|
`VMAlertmanagerConfig` allows delegating notification configuration to the kubernetes cluster users.
|
||||||
The application owner may configure notifications by defining it at `VMAlertmanagerConfig`.
|
The application owner may configure notifications by defining it at `VMAlertmanagerConfig`.
|
||||||
With the combination of `VMRule` and `VMServiceScrape` it allows delegating configuration observability to application owners, and uses popular `GitOps` practice.
|
With the combination of `VMRule` and `VMServiceScrape` it allows delegating configuration observability to application owners, and uses popular `GitOps` practice.
|
||||||
|
@ -104,11 +106,47 @@ spec:
|
||||||
text: ALARM
|
text: ALARM
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Special Case
|
### Special Case
|
||||||
|
|
||||||
VMAlertmanagerConfig has enforced namespace matcher.
|
VMAlertmanagerConfig has enforced namespace matcher.
|
||||||
Alerts must have a proper namespace label, with the same value as name of namespace for VMAlertmanagerConfig.
|
Alerts must have a proper namespace label, with the same value as name of namespace for VMAlertmanagerConfig.
|
||||||
It can be disabled, by setting the following value to the VMAlertmanager: spec.disableNamespaceMatcher: true.
|
It can be disabled, by setting the following value to the VMAlertmanager: spec.disableNamespaceMatcher: true.
|
||||||
|
|
||||||
## behavior without provided config
|
## Extra configuration files
|
||||||
|
|
||||||
|
`VMAlertmanager` specification has the following fields, that can be used to configure without editing raw configuration file:
|
||||||
|
|
||||||
|
- `spec.templates` - list of keys in `ConfigMaps`, that contains template files for `alertmanager`, e.g.:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMAlertmanager
|
||||||
|
metadata:
|
||||||
|
name: example-alertmanager
|
||||||
|
spec:
|
||||||
|
replicaCount: 2
|
||||||
|
templates:
|
||||||
|
- Name: alertmanager-templates
|
||||||
|
Key: my-template-1.tmpl
|
||||||
|
- Name: alertmanager-templates
|
||||||
|
Key: my-template-2.tmpl
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: alertmanager-templates
|
||||||
|
data:
|
||||||
|
my-template-1.tmpl: |
|
||||||
|
{{ define "hello" -}}
|
||||||
|
hello, Victoria!
|
||||||
|
{{- end }}
|
||||||
|
my-template-2.tmpl: """
|
||||||
|
```
|
||||||
|
|
||||||
|
These templates will be automatically added to `VMAlertmanager` configuration and will be automatically reloaded on changes in source `ConfigMap`.
|
||||||
|
- `spec.configMaps` - list of `ConfigMap` names (in the same namespace) that will be mounted at `VMAlertmanager`
|
||||||
|
workload and will be automatically reloaded on changes in source `ConfigMap`. Mount path is `/etc/vm/configs/<configmap-name>`.
|
||||||
|
|
||||||
|
## Behavior without provided config
|
||||||
|
|
||||||
If no configuration is provided, operator configures stub configuration with blackhole route.
|
If no configuration is provided, operator configures stub configuration with blackhole route.
|
|
@ -1402,7 +1402,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||||
-promscrape.seriesLimitPerTarget int
|
-promscrape.seriesLimitPerTarget int
|
||||||
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
|
||||||
-promscrape.streamParse
|
-promscrape.streamParse
|
||||||
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
||||||
-promscrape.suppressDuplicateScrapeTargetErrors
|
-promscrape.suppressDuplicateScrapeTargetErrors
|
||||||
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
|
||||||
-promscrape.suppressScrapeErrors
|
-promscrape.suppressScrapeErrors
|
||||||
|
|
231
docs/vmctl.md
231
docs/vmctl.md
|
@ -487,6 +487,10 @@ Processing ranges: 8798 / 8798 [████████████████
|
||||||
2022/10/19 16:45:37 Total time: 1m19.406283424s
|
2022/10/19 16:45:37 Total time: 1m19.406283424s
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Migrating big volumes of data may result in remote read client reaching the timeout.
|
||||||
|
Consider increasing the value of `--remote-read-http-timeout` (default `5m`) command-line flag when seeing
|
||||||
|
timeouts or `context canceled` errors.
|
||||||
|
|
||||||
### Filtering
|
### Filtering
|
||||||
|
|
||||||
The filtering consists of two parts: by labels and time.
|
The filtering consists of two parts: by labels and time.
|
||||||
|
@ -737,21 +741,33 @@ or higher.
|
||||||
|
|
||||||
See `./vmctl vm-native --help` for details and full list of flags.
|
See `./vmctl vm-native --help` for details and full list of flags.
|
||||||
|
|
||||||
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
|
Migration in `vm-native` mode takes two steps:
|
||||||
and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be
|
1. Explore the list of the metrics to migrate via `/api/v1/series` API;
|
||||||
processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes:
|
2. Migrate explored metrics one-by-one.
|
||||||
|
|
||||||
```
|
```
|
||||||
./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \
|
./vmctl vm-native \
|
||||||
|
--vm-native-src-addr=http://127.0.0.1:8481/select/0/prometheus \
|
||||||
--vm-native-dst-addr=http://localhost:8428 \
|
--vm-native-dst-addr=http://localhost:8428 \
|
||||||
--vm-native-filter-match='{job="vmagent"}' \
|
--vm-native-filter-time-start='2022-11-20T00:00:00Z' \
|
||||||
--vm-native-filter-time-start='2020-01-01T20:07:00Z'
|
--vm-native-filter-match='{__name__=~"vm_cache_.*"}'
|
||||||
VictoriaMetrics Native import mode
|
VictoriaMetrics Native import mode
|
||||||
Initing export pipe from "http://localhost:8528" with filters:
|
|
||||||
filter: match[]={job="vmagent"}
|
2023/03/02 09:22:02 Initing import process from "http://127.0.0.1:8481/select/0/prometheus/api/v1/export/native" to "http://localhost:8428/api/v1/import/native" with filter
|
||||||
Initing import process to "http://localhost:8428":
|
filter: match[]={__name__=~"vm_cache_.*"}
|
||||||
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
start: 2022-11-20T00:00:00Z
|
||||||
2020/10/13 17:04:59 Total time: 952.143376ms
|
2023/03/02 09:22:02 Exploring metrics...
|
||||||
|
Found 9 metrics to import. Continue? [Y/n]
|
||||||
|
2023/03/02 09:22:04 Requests to make: 9
|
||||||
|
Requests to make: 9 / 9 [███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
|
||||||
|
2023/03/02 09:22:06 Import finished!
|
||||||
|
2023/03/02 09:22:06 VictoriaMetrics importer stats:
|
||||||
|
time spent while importing: 3.632638875s;
|
||||||
|
total bytes: 7.8 MB;
|
||||||
|
bytes/s: 2.1 MB;
|
||||||
|
requests: 9;
|
||||||
|
requests retries: 0;
|
||||||
|
2023/03/02 09:22:06 Total time: 3.633127625s
|
||||||
```
|
```
|
||||||
|
|
||||||
Importing tips:
|
Importing tips:
|
||||||
|
@ -759,6 +775,7 @@ Importing tips:
|
||||||
1. Migrating big volumes of data may result in reaching the safety limits on `src` side.
|
1. Migrating big volumes of data may result in reaching the safety limits on `src` side.
|
||||||
Please verify that `-search.maxExportDuration` and `-search.maxExportSeries` were set with
|
Please verify that `-search.maxExportDuration` and `-search.maxExportSeries` were set with
|
||||||
proper values for `src`. If hitting the limits, follow the recommendations [here](https://docs.victoriametrics.com/#how-to-export-data-in-native-format).
|
proper values for `src`. If hitting the limits, follow the recommendations [here](https://docs.victoriametrics.com/#how-to-export-data-in-native-format).
|
||||||
|
If hitting `the number of matching timeseries exceeds...` error, adjust filters to match less time series or update `-search.maxSeries` command-line flag on vmselect/vmsingle;
|
||||||
2. Migrating all the metrics from one VM to another may collide with existing application metrics
|
2. Migrating all the metrics from one VM to another may collide with existing application metrics
|
||||||
(prefixed with `vm_`) at destination and lead to confusion when using
|
(prefixed with `vm_`) at destination and lead to confusion when using
|
||||||
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
|
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
|
||||||
|
@ -770,71 +787,59 @@ Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/
|
||||||
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
and specify `accountID` param.
|
and specify `accountID` param.
|
||||||
6. When migrating large volumes of data it might be useful to use `--vm-native-step-interval` flag to split single process into smaller steps.
|
6. When migrating large volumes of data it might be useful to use `--vm-native-step-interval` flag to split single process into smaller steps.
|
||||||
|
7. `vmctl` supports `--vm-concurrency` which controls the number of concurrent workers that process the input from source query results.
|
||||||
|
Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according
|
||||||
|
to allocated CPU resources of your VictoriaMetrics installation.
|
||||||
|
8. `vmctl` supports `--vm-native-src-headers` and `--vm-native-dst-headers` which defines headers to send with each request
|
||||||
|
to the corresponding source address.
|
||||||
|
|
||||||
|
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
|
||||||
|
and processing is done by "destination" (`dst`). So no extra memory or CPU resources required on `vmctl` side. Only
|
||||||
|
`src` and `dst` resource matter.
|
||||||
|
|
||||||
#### Using time-based chunking of migration
|
#### Using time-based chunking of migration
|
||||||
|
|
||||||
It is possible split migration process into set of smaller batches based on time. This is especially useful when migrating large volumes of data as this adds indication of progress and ability to restore process from certain point in case of failure.
|
It is possible split migration process into set of smaller batches based on time. This is especially useful when
|
||||||
|
migrating large volumes of data as this adds indication of progress and ability to restore process from certain point
|
||||||
|
in case of failure.
|
||||||
|
|
||||||
To use this you need to specify `--vm-native-step-interval` flag. Supported values are: `month`, `day`, `hour`.
|
To use this you need to specify `--vm-native-step-interval` flag. Supported values are: `month`, `day`, `hour`, `minute`.
|
||||||
Note that in order to use this it is required `--vm-native-filter-time-start` to be set to calculate time ranges for export process.
|
Note that in order to use this it is required `--vm-native-filter-time-start` to be set to calculate time ranges for
|
||||||
|
export process.
|
||||||
|
|
||||||
Every range is being processed independently, which means that:
|
Every range is being processed independently, which means that:
|
||||||
- after range processing is finished all data within range is migrated
|
- after range processing is finished all data within range is migrated
|
||||||
- if process fails on one of stages it is guaranteed that data of prior stages is already written, so it is possible to restart process starting from failed range
|
- if process fails on one of stages it is guaranteed that data of prior stages is already written,
|
||||||
|
so it is possible to restart process starting from failed range.
|
||||||
|
|
||||||
It is recommended using the `month` step when migrating the data over multiple months, since the migration with `day` and `hour` steps may take longer time to complete
|
It is recommended using the `month` step when migrating the data over multiple months,
|
||||||
because of additional overhead.
|
since the migration with `day` and `hour` steps may take longer time to complete because of additional overhead.
|
||||||
|
|
||||||
Usage example:
|
Usage example:
|
||||||
```console
|
```console
|
||||||
./vmctl vm-native
|
./vmctl vm-native \
|
||||||
--vm-native-filter-time-start 2022-06-17T00:07:00Z \
|
--vm-native-src-addr=http://127.0.0.1:8481/select/0/prometheus \
|
||||||
--vm-native-filter-time-end 2022-10-03T00:07:00Z \
|
--vm-native-dst-addr=http://localhost:8428 \
|
||||||
--vm-native-src-addr http://localhost:8428 \
|
--vm-native-filter-time-start='2022-11-20T00:00:00Z' \
|
||||||
--vm-native-dst-addr http://localhost:8528 \
|
--vm-native-step-interval=month \
|
||||||
--vm-native-step-interval=month
|
--vm-native-filter-match='{__name__=~"vm_cache_.*"}'
|
||||||
VictoriaMetrics Native import mode
|
VictoriaMetrics Native import mode
|
||||||
2022/08/30 19:48:24 Processing range 1/5: 2022-06-17T00:07:00Z - 2022-06-30T23:59:59Z
|
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
2023/03/02 09:18:05 Initing import process from "http://127.0.0.1:8481/select/0/prometheus/api/v1/export/native" to "http://localhost:8428/api/v1/import/native" with filter
|
||||||
filter: match[]={__name__!=""}
|
filter: match[]={__name__=~"vm_cache_.*"}
|
||||||
start: 2022-06-17T00:07:00Z
|
start: 2022-11-20T00:00:00Z
|
||||||
end: 2022-06-30T23:59:59Z
|
2023/03/02 09:18:05 Exploring metrics...
|
||||||
Initing import process to "http://localhost:8428":
|
Found 9 metrics to import. Continue? [Y/n]
|
||||||
2022/08/30 19:48:24 Import finished!
|
2023/03/02 09:18:07 Selected time range will be split into 5 ranges according to "month" step. Requests to make: 45.
|
||||||
Total: 16 B ↗ Speed: 28.89 KiB p/s
|
Requests to make: 45 / 45 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
|
||||||
2022/08/30 19:48:24 Processing range 2/5: 2022-07-01T00:00:00Z - 2022-07-31T23:59:59Z
|
2023/03/02 09:18:12 Import finished!
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
2023/03/02 09:18:12 VictoriaMetrics importer stats:
|
||||||
filter: match[]={__name__!=""}
|
time spent while importing: 7.111870667s;
|
||||||
start: 2022-07-01T00:00:00Z
|
total bytes: 7.7 MB;
|
||||||
end: 2022-07-31T23:59:59Z
|
bytes/s: 1.1 MB;
|
||||||
Initing import process to "http://localhost:8428":
|
requests: 45;
|
||||||
2022/08/30 19:48:24 Import finished!
|
requests retries: 0;
|
||||||
Total: 16 B ↗ Speed: 164.35 KiB p/s
|
2023/03/02 09:18:12 Total time: 7.112405875s
|
||||||
2022/08/30 19:48:24 Processing range 3/5: 2022-08-01T00:00:00Z - 2022-08-31T23:59:59Z
|
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
start: 2022-08-01T00:00:00Z
|
|
||||||
end: 2022-08-31T23:59:59Z
|
|
||||||
Initing import process to "http://localhost:8428":
|
|
||||||
2022/08/30 19:48:24 Import finished!
|
|
||||||
Total: 16 B ↗ Speed: 191.42 KiB p/s
|
|
||||||
2022/08/30 19:48:24 Processing range 4/5: 2022-09-01T00:00:00Z - 2022-09-30T23:59:59Z
|
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
start: 2022-09-01T00:00:00Z
|
|
||||||
end: 2022-09-30T23:59:59Z
|
|
||||||
Initing import process to "http://localhost:8428":
|
|
||||||
2022/08/30 19:48:24 Import finished!
|
|
||||||
Total: 16 B ↗ Speed: 141.04 KiB p/s
|
|
||||||
2022/08/30 19:48:24 Processing range 5/5: 2022-10-01T00:00:00Z - 2022-10-03T00:07:00Z
|
|
||||||
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
start: 2022-10-01T00:00:00Z
|
|
||||||
end: 2022-10-03T00:07:00Z
|
|
||||||
Initing import process to "http://localhost:8428":
|
|
||||||
2022/08/30 19:48:24 Import finished!
|
|
||||||
Total: 16 B ↗ Speed: 186.32 KiB p/s
|
|
||||||
2022/08/30 19:48:24 Total time: 12.680582ms
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Cluster-to-cluster migration mode
|
#### Cluster-to-cluster migration mode
|
||||||
|
@ -846,70 +851,41 @@ Cluster-to-cluster uses `/admin/tenants` endpoint (available starting from [v1.8
|
||||||
To use this mode you need to set `--vm-intercluster` flag to `true`, `--vm-native-src-addr` flag to 'http://vmselect:8481/' and `--vm-native-dst-addr` value to http://vminsert:8480/:
|
To use this mode you need to set `--vm-intercluster` flag to `true`, `--vm-native-src-addr` flag to 'http://vmselect:8481/' and `--vm-native-dst-addr` value to http://vminsert:8480/:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
./bin/vmctl vm-native --vm-intercluster=true --vm-native-src-addr=http://localhost:8481/ --vm-native-dst-addr=http://172.17.0.3:8480/
|
./vmctl vm-native --vm-native-src-addr=http://127.0.0.1:8481/ \
|
||||||
|
--vm-native-dst-addr=http://127.0.0.1:8480/ \
|
||||||
|
--vm-native-filter-match='{__name__="vm_app_uptime_seconds"}' \
|
||||||
|
--vm-native-filter-time-start='2023-02-01T00:00:00Z' \
|
||||||
|
--vm-native-step-interval=day \
|
||||||
|
--vm-intercluster
|
||||||
VictoriaMetrics Native import mode
|
VictoriaMetrics Native import mode
|
||||||
2022/12/05 21:20:06 Discovered tenants: [123:1 12812919:1 1289198:1 1289:1283 12:1 1:0 1:1 1:1231231 1:1271727 1:12819 1:281 812891298:1]
|
2023/02/28 10:41:42 Discovering tenants...
|
||||||
2022/12/05 21:20:06 Initing export pipe from "http://localhost:8481/select/123:1/prometheus/api/v1/export/native" with filters:
|
2023/02/28 10:41:42 The following tenants were discovered: [0:0 1:0 2:0 3:0 4:0]
|
||||||
filter: match[]={__name__!=""}
|
2023/02/28 10:41:42 Initing import process from "http://127.0.0.1:8481/select/0:0/prometheus/api/v1/export/native" to "http://127.0.0.1:8480/insert/0:0/prometheus/api/v1/import/native" with filter
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/123:1/prometheus/api/v1/import/native":
|
filter: match[]={__name__="vm_app_uptime_seconds"}
|
||||||
Total: 61.13 MiB ↖ Speed: 2.05 MiB p/s
|
start: 2023-02-01T00:00:00Z for tenant 0:0
|
||||||
Total: 61.13 MiB ↗ Speed: 2.30 MiB p/s
|
2023/02/28 10:41:42 Exploring metrics...
|
||||||
2022/12/05 21:20:33 Initing export pipe from "http://localhost:8481/select/12812919:1/prometheus/api/v1/export/native" with filters:
|
2023/02/28 10:41:42 Found 1 metrics to import
|
||||||
filter: match[]={__name__!=""}
|
2023/02/28 10:41:42 Selected time range will be split into 28 ranges according to "day" step.
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/12812919:1/prometheus/api/v1/import/native":
|
Requests to make for tenant 0:0: 28 / 28 [███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
|
||||||
Total: 43.14 MiB ↘ Speed: 1.86 MiB p/s
|
|
||||||
Total: 43.14 MiB ↙ Speed: 2.36 MiB p/s
|
2023/02/28 10:41:45 Initing import process from "http://127.0.0.1:8481/select/1:0/prometheus/api/v1/export/native" to "http://127.0.0.1:8480/insert/1:0/prometheus/api/v1/import/native" with filter
|
||||||
2022/12/05 21:20:51 Initing export pipe from "http://localhost:8481/select/1289198:1/prometheus/api/v1/export/native" with filters:
|
filter: match[]={__name__="vm_app_uptime_seconds"}
|
||||||
filter: match[]={__name__!=""}
|
start: 2023-02-01T00:00:00Z for tenant 1:0
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1289198:1/prometheus/api/v1/import/native":
|
2023/02/28 10:41:45 Exploring metrics...
|
||||||
Total: 16.64 MiB ↗ Speed: 2.66 MiB p/s
|
2023/02/28 10:41:45 Found 1 metrics to import
|
||||||
Total: 16.64 MiB ↘ Speed: 2.19 MiB p/s
|
2023/02/28 10:41:45 Selected time range will be split into 28 ranges according to "day" step. Requests to make: 28
|
||||||
2022/12/05 21:20:59 Initing export pipe from "http://localhost:8481/select/1289:1283/prometheus/api/v1/export/native" with filters:
|
Requests to make for tenant 1:0: 28 / 28 [████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1289:1283/prometheus/api/v1/import/native":
|
...
|
||||||
Total: 43.33 MiB ↙ Speed: 1.94 MiB p/s
|
|
||||||
Total: 43.33 MiB ↖ Speed: 2.35 MiB p/s
|
2023/02/28 10:42:49 Import finished!
|
||||||
2022/12/05 21:21:18 Initing export pipe from "http://localhost:8481/select/12:1/prometheus/api/v1/export/native" with filters:
|
2023/02/28 10:42:49 VictoriaMetrics importer stats:
|
||||||
filter: match[]={__name__!=""}
|
time spent while importing: 1m6.714210417s;
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/12:1/prometheus/api/v1/import/native":
|
total bytes: 39.7 MB;
|
||||||
Total: 63.78 MiB ↙ Speed: 1.96 MiB p/s
|
bytes/s: 594.4 kB;
|
||||||
Total: 63.78 MiB ↖ Speed: 2.28 MiB p/s
|
requests: 140;
|
||||||
2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:0/prometheus/api/v1/export/native" with filters:
|
requests retries: 0;
|
||||||
filter: match[]={__name__!=""}
|
2023/02/28 10:42:49 Total time: 1m7.147971417s
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:0/prometheus/api/v1/import/native":
|
|
||||||
2022/12/05 21:21:46 Import finished!
|
|
||||||
Total: 330 B ↗ Speed: 3.53 MiB p/s
|
|
||||||
2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:1/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:1/prometheus/api/v1/import/native":
|
|
||||||
Total: 63.81 MiB ↙ Speed: 1.96 MiB p/s
|
|
||||||
Total: 63.81 MiB ↖ Speed: 2.28 MiB p/s
|
|
||||||
2022/12/05 21:22:14 Initing export pipe from "http://localhost:8481/select/1:1231231/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:1231231/prometheus/api/v1/import/native":
|
|
||||||
Total: 63.84 MiB ↙ Speed: 1.93 MiB p/s
|
|
||||||
Total: 63.84 MiB ↖ Speed: 2.29 MiB p/s
|
|
||||||
2022/12/05 21:22:42 Initing export pipe from "http://localhost:8481/select/1:1271727/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:1271727/prometheus/api/v1/import/native":
|
|
||||||
Total: 54.37 MiB ↘ Speed: 1.90 MiB p/s
|
|
||||||
Total: 54.37 MiB ↙ Speed: 2.37 MiB p/s
|
|
||||||
2022/12/05 21:23:05 Initing export pipe from "http://localhost:8481/select/1:12819/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:12819/prometheus/api/v1/import/native":
|
|
||||||
Total: 17.01 MiB ↙ Speed: 1.75 MiB p/s
|
|
||||||
Total: 17.01 MiB ↖ Speed: 2.15 MiB p/s
|
|
||||||
2022/12/05 21:23:13 Initing export pipe from "http://localhost:8481/select/1:281/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/1:281/prometheus/api/v1/import/native":
|
|
||||||
Total: 63.89 MiB ↘ Speed: 1.90 MiB p/s
|
|
||||||
Total: 63.89 MiB ↙ Speed: 2.29 MiB p/s
|
|
||||||
2022/12/05 21:23:42 Initing export pipe from "http://localhost:8481/select/812891298:1/prometheus/api/v1/export/native" with filters:
|
|
||||||
filter: match[]={__name__!=""}
|
|
||||||
Initing import process to "http://172.17.0.3:8480/insert/812891298:1/prometheus/api/v1/import/native":
|
|
||||||
Total: 63.84 MiB ↖ Speed: 1.99 MiB p/s
|
|
||||||
Total: 63.84 MiB ↗ Speed: 2.26 MiB p/s
|
|
||||||
2022/12/05 21:24:10 Total time: 4m4.1466565s
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Verifying exported blocks from VictoriaMetrics
|
## Verifying exported blocks from VictoriaMetrics
|
||||||
|
@ -976,6 +952,7 @@ a sign of network issues or VM being overloaded. See the logs during import for
|
||||||
By default `vmctl` waits confirmation from user before starting the import. If this is unwanted
|
By default `vmctl` waits confirmation from user before starting the import. If this is unwanted
|
||||||
behavior and no user interaction required - pass `-s` flag to enable "silence" mode:
|
behavior and no user interaction required - pass `-s` flag to enable "silence" mode:
|
||||||
|
|
||||||
|
See below the example of `vm-native` migration process:
|
||||||
```
|
```
|
||||||
-s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false)
|
-s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false)
|
||||||
```
|
```
|
||||||
|
|
|
@ -1164,14 +1164,6 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFinal
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot atomically register the created part: %w", err)
|
return fmt.Errorf("cannot atomically register the created part: %w", err)
|
||||||
}
|
}
|
||||||
tb.swapSrcWithDstParts(pws, pwNew, dstPartType)
|
|
||||||
|
|
||||||
d := time.Since(startTime)
|
|
||||||
if d <= 30*time.Second {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log stats for long merges.
|
|
||||||
dstItemsCount := uint64(0)
|
dstItemsCount := uint64(0)
|
||||||
dstBlocksCount := uint64(0)
|
dstBlocksCount := uint64(0)
|
||||||
dstSize := uint64(0)
|
dstSize := uint64(0)
|
||||||
|
@ -1183,6 +1175,15 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFinal
|
||||||
dstSize = pDst.size
|
dstSize = pDst.size
|
||||||
dstPartPath = pDst.path
|
dstPartPath = pDst.path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tb.swapSrcWithDstParts(pws, pwNew, dstPartType)
|
||||||
|
|
||||||
|
d := time.Since(startTime)
|
||||||
|
if d <= 30*time.Second {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log stats for long merges.
|
||||||
durationSecs := d.Seconds()
|
durationSecs := d.Seconds()
|
||||||
itemsPerSec := int(float64(srcItemsCount) / durationSecs)
|
itemsPerSec := int(float64(srcItemsCount) / durationSecs)
|
||||||
logger.Infof("merged (%d parts, %d items, %d blocks, %d bytes) into (1 part, %d items, %d blocks, %d bytes) in %.3f seconds at %d items/sec to %q",
|
logger.Infof("merged (%d parts, %d items, %d blocks, %d bytes) into (1 part, %d items, %d blocks, %d bytes) in %.3f seconds at %d items/sec to %q",
|
||||||
|
|
|
@ -78,6 +78,8 @@ type TCPListener struct {
|
||||||
connMetrics
|
connMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var proxyProtocolReadErrorLogger = logger.WithThrottler("proxyProtocolReadError", 5*time.Second)
|
||||||
|
|
||||||
// Accept accepts connections from the addr passed to NewTCPListener.
|
// Accept accepts connections from the addr passed to NewTCPListener.
|
||||||
func (ln *TCPListener) Accept() (net.Conn, error) {
|
func (ln *TCPListener) Accept() (net.Conn, error) {
|
||||||
for {
|
for {
|
||||||
|
@ -94,10 +96,13 @@ func (ln *TCPListener) Accept() (net.Conn, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ln.useProxyProtocol {
|
if ln.useProxyProtocol {
|
||||||
conn, err = newProxyProtocolConn(conn)
|
pConn, err := newProxyProtocolConn(conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
proxyProtocolReadErrorLogger.Errorf("cannot read proxy proto conn for TCP addr %q: %s", ln.Addr(), err)
|
||||||
|
_ = conn.Close()
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
conn = pConn
|
||||||
}
|
}
|
||||||
ln.conns.Inc()
|
ln.conns.Inc()
|
||||||
sc := &statConn{
|
sc := &statConn{
|
||||||
|
|
|
@ -33,7 +33,7 @@ var (
|
||||||
"Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets")
|
"Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets")
|
||||||
streamParse = flag.Bool("promscrape.streamParse", false, "Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful "+
|
streamParse = flag.Bool("promscrape.streamParse", false, "Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful "+
|
||||||
"for reducing memory usage when millions of metrics are exposed per each scrape target. "+
|
"for reducing memory usage when millions of metrics are exposed per each scrape target. "+
|
||||||
"It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control")
|
"It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control")
|
||||||
)
|
)
|
||||||
|
|
||||||
type client struct {
|
type client struct {
|
||||||
|
@ -260,7 +260,7 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
||||||
swapResponseBodies := len(dst) == 0
|
swapResponseBodies := len(dst) == 0
|
||||||
if swapResponseBodies {
|
if swapResponseBodies {
|
||||||
// An optimization: write response directly to dst.
|
// An optimization: write response directly to dst.
|
||||||
// This should reduce memory uage when scraping big targets.
|
// This should reduce memory usage when scraping big targets.
|
||||||
dst = resp.SwapBody(dst)
|
dst = resp.SwapBody(dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ type UnmarshalWork interface {
|
||||||
// StartUnmarshalWorkers starts unmarshal workers.
|
// StartUnmarshalWorkers starts unmarshal workers.
|
||||||
func StartUnmarshalWorkers() {
|
func StartUnmarshalWorkers() {
|
||||||
if unmarshalWorkCh != nil {
|
if unmarshalWorkCh != nil {
|
||||||
logger.Panicf("BUG: it looks like startUnmarshalWorkers() has been alread called without stopUnmarshalWorkers()")
|
logger.Panicf("BUG: it looks like startUnmarshalWorkers() has been already called without stopUnmarshalWorkers()")
|
||||||
}
|
}
|
||||||
gomaxprocs := cgroup.AvailableCPUs()
|
gomaxprocs := cgroup.AvailableCPUs()
|
||||||
unmarshalWorkCh = make(chan UnmarshalWork, gomaxprocs)
|
unmarshalWorkCh = make(chan UnmarshalWork, gomaxprocs)
|
||||||
|
|
|
@ -1376,14 +1376,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFi
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot atomically register the created part: %w", err)
|
return fmt.Errorf("cannot atomically register the created part: %w", err)
|
||||||
}
|
}
|
||||||
pt.swapSrcWithDstParts(pws, pwNew, dstPartType)
|
|
||||||
|
|
||||||
d := time.Since(startTime)
|
|
||||||
if d <= 30*time.Second {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log stats for long merges.
|
|
||||||
dstRowsCount := uint64(0)
|
dstRowsCount := uint64(0)
|
||||||
dstBlocksCount := uint64(0)
|
dstBlocksCount := uint64(0)
|
||||||
dstSize := uint64(0)
|
dstSize := uint64(0)
|
||||||
|
@ -1395,6 +1388,15 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFi
|
||||||
dstSize = pDst.size
|
dstSize = pDst.size
|
||||||
dstPartPath = pDst.String()
|
dstPartPath = pDst.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pt.swapSrcWithDstParts(pws, pwNew, dstPartType)
|
||||||
|
|
||||||
|
d := time.Since(startTime)
|
||||||
|
if d <= 30*time.Second {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log stats for long merges.
|
||||||
durationSecs := d.Seconds()
|
durationSecs := d.Seconds()
|
||||||
rowsPerSec := int(float64(srcRowsCount) / durationSecs)
|
rowsPerSec := int(float64(srcRowsCount) / durationSecs)
|
||||||
logger.Infof("merged (%d parts, %d rows, %d blocks, %d bytes) into (1 part, %d rows, %d blocks, %d bytes) in %.3f seconds at %d rows/sec to %q",
|
logger.Infof("merged (%d parts, %d rows, %d blocks, %d bytes) into (1 part, %d rows, %d blocks, %d bytes) in %.3f seconds at %d rows/sec to %q",
|
||||||
|
|
Loading…
Reference in a new issue