Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
2
.github/dependabot.yml
vendored
|
@ -21,6 +21,6 @@ updates:
|
|||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/app/vmui"
|
||||
directory: "/app/vmui/packages/vmui"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
|
10
.github/workflows/main.yml
vendored
|
@ -18,13 +18,13 @@ jobs:
|
|||
with:
|
||||
go-version: 1.16
|
||||
id: go
|
||||
- name: Dependencies
|
||||
run: |
|
||||
go get -u golang.org/x/lint/golint
|
||||
go get -u github.com/kisielk/errcheck
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.29.0
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@master
|
||||
- name: Dependencies
|
||||
run: |
|
||||
make install-golint
|
||||
make install-errcheck
|
||||
make install-golangci-lint
|
||||
- name: Build
|
||||
env:
|
||||
GO111MODULE: on
|
||||
|
|
8
Makefile
|
@ -88,6 +88,12 @@ release-snap:
|
|||
snapcraft
|
||||
snapcraft upload "victoriametrics_$(PKG_TAG)_multi.snap" --release beta,edge,candidate
|
||||
|
||||
publish-release:
|
||||
git checkout $(TAG) && $(MAKE) release publish && \
|
||||
git checkout $(TAG)-cluster && $(MAKE) release publish && \
|
||||
git checkout $(TAG)-enterprise && $(MAKE) release publish && \
|
||||
git checkout $(TAG)-enterprise-cluster && $(MAKE) release publish
|
||||
|
||||
release: \
|
||||
release-victoria-metrics \
|
||||
release-vmutils
|
||||
|
@ -261,7 +267,7 @@ golangci-lint: install-golangci-lint
|
|||
golangci-lint run --exclude '(SA4003|SA1019|SA5011):' -D errcheck -D structcheck --timeout 2m
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.42.1
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.43.0
|
||||
|
||||
install-wwhrd:
|
||||
which wwhrd || GO111MODULE=off go get github.com/frapposelli/wwhrd
|
||||
|
|
18
README.md
|
@ -418,7 +418,7 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||
|
||||
* [Graphite API](#graphite-api-usage)
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||
|
||||
## How to send data from OpenTSDB-compatible agents
|
||||
|
@ -561,8 +561,7 @@ visible to the given tenant. It is expected that the `extra_label` query arg is
|
|||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster
|
||||
and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
||||
|
||||
|
||||
### Graphite Render API usage
|
||||
|
@ -1213,6 +1212,7 @@ Consider setting the following command-line flags:
|
|||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
|
||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||
|
@ -1368,6 +1368,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
|||
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
||||
|
||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
|
@ -1514,7 +1515,7 @@ Report bugs and propose new features [here](https://github.com/VictoriaMetrics/V
|
|||
|
||||
## VictoriaMetrics Logo
|
||||
|
||||
[Zip](VM_logo.zip) contains three folders with different image orientations (main color and inverted version).
|
||||
[Zip](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/VM_logo.zip) contains three folders with different image orientations (main color and inverted version).
|
||||
|
||||
Files included in each folder:
|
||||
|
||||
|
@ -1549,6 +1550,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
```
|
||||
-bigMergeConcurrency int
|
||||
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
|
||||
-configAuthKey string
|
||||
Authorization key for accessing /config page. It must be passed via authKey query arg
|
||||
-csvTrimTimestamp duration
|
||||
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-datadog.maxInsertRequestSize size
|
||||
|
@ -1648,7 +1651,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||
-metricsAuthKey string
|
||||
Auth key for /metrics. It overrides httpAuth settings
|
||||
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-opentsdbHTTPListenAddr string
|
||||
TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty
|
||||
-opentsdbListenAddr string
|
||||
|
@ -1661,7 +1664,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-opentsdbhttpTrimTimestamp duration
|
||||
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-precisionBits int
|
||||
The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss (default 64)
|
||||
-promscrape.cluster.memberNum int
|
||||
|
@ -1716,6 +1719,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s)
|
||||
-promscrape.maxDroppedTargets int
|
||||
The maximum number of droppedTargets to show at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000)
|
||||
-promscrape.maxResponseHeadersSize size
|
||||
The maximum size of http response headers from Prometheus scrape targets
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 4096)
|
||||
-promscrape.maxScrapeSize size
|
||||
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
|
||||
|
|
|
@ -452,10 +452,11 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
|
|||
|
||||
* If `vmagent` uses too big amounts of memory, then the following options can help:
|
||||
* Disabling staleness tracking with `-promscrape.noStaleMarkers` option. See [these docs](#prometheus-staleness-markers).
|
||||
* Enabling stream parsing mode. See [these docs](#stream-parsing-mode).
|
||||
* Enabling stream parsing mode if `vmagent` scrapes targets with millions of metrics per target. See [these docs](#stream-parsing-mode).
|
||||
* Reducing the number of output queues with `-remoteWrite.queues` command-line option.
|
||||
* Reducing the amounts of RAM vmagent can use for in-memory buffering with `-memory.allowedPercent` or `-memory.allowedBytes` command-line option. Another option is to reduce memory limits in Docker and/or Kuberntes if `vmagent` runs under these systems.
|
||||
* Reducing the number of CPU cores vmagent can use by passing `GOMAXPROCS=N` environment variable to `vmagent`, where `N` is the desired limit on CPU cores. Another option is to reduce CPU limits in Docker or Kubernetes if `vmagent` runs under these systems.
|
||||
* Passing `-promscrape.dropOriginalLabels` command-line option to `vmagent`, so it drops `"discoveredLabels"` and `"droppedTargets"` lists at `/api/v1/targets` page. This reduces memory usage when scraping big number of targets at the cost of reduced debuggability for improperly configured per-target relabeling.
|
||||
|
||||
* When `vmagent` scrapes many unreliable targets, it can flood the error log with scrape errors. These errors can be suppressed
|
||||
by passing `-promscrape.suppressScrapeErrors` command-line flag to `vmagent`. The most recent scrape error per each target can be observed at `http://vmagent-host:8429/targets`
|
||||
|
@ -464,17 +465,9 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
|
|||
* The `/api/v1/targets` page could be useful for debugging relabeling process for scrape targets.
|
||||
This page contains original labels for targets dropped during relabeling (see "droppedTargets" section in the page output). By default the `-promscrape.maxDroppedTargets` targets are shown here. If your setup drops more targets during relabeling, then increase `-promscrape.maxDroppedTargets` command-line flag value to see all the dropped targets. Note that tracking each dropped target requires up to 10Kb of RAM. Therefore big values for `-promscrape.maxDroppedTargets` may result in increased memory usage if a big number of scrape targets are dropped during relabeling.
|
||||
|
||||
* If `vmagent` scrapes a big number of targets then the `-promscrape.dropOriginalLabels` command-line option may be passed to `vmagent` in order to reduce memory usage.
|
||||
This option drops `"discoveredLabels"` and `"droppedTargets"` lists at `/api/v1/targets` page, which may result in reduced debuggability for improperly configured per-target relabeling.
|
||||
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported at `http://vmagent-host:8429/metrics` page grows constantly. It is also recommended increasing `-remoteWrite.maxBlockSize` and `-remoteWrite.maxRowsPerBlock` command-line options in this case. This can improve data ingestion performance to the configured remote storage systems at the cost of higher memory usage.
|
||||
|
||||
* If `vmagent` scrapes targets with millions of metrics per target (for example, when scraping [federation endpoints](https://prometheus.io/docs/prometheus/latest/federation/)),
|
||||
we recommend enabling [stream parsing mode](#stream-parsing-mode) in order to reduce memory usage during scraping.
|
||||
|
||||
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported at `http://vmagent-host:8429/metrics` page grows constantly.
|
||||
|
||||
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set, try increasing `-remoteWrite.queues`.
|
||||
Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage. Therefore it starts dropping the buffered data
|
||||
if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
|
||||
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set, try increasing `-remoteWrite.queues`. Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage. Therefore it starts dropping the buffered data if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
|
||||
|
||||
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses. The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
|
||||
|
||||
|
@ -707,6 +700,8 @@ vmagent collects metrics data via popular data ingestion protocols and routes th
|
|||
|
||||
See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||
|
||||
-configAuthKey string
|
||||
Authorization key for accessing /config page. It must be passed via authKey query arg
|
||||
-csvTrimTimestamp duration
|
||||
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-datadog.maxInsertRequestSize size
|
||||
|
@ -790,7 +785,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||
-metricsAuthKey string
|
||||
Auth key for /metrics. It overrides httpAuth settings
|
||||
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-opentsdbHTTPListenAddr string
|
||||
TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty
|
||||
-opentsdbListenAddr string
|
||||
|
@ -803,7 +798,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-opentsdbhttpTrimTimestamp duration
|
||||
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-promscrape.cluster.memberNum int
|
||||
The number of number in the cluster of scrapers. It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster
|
||||
-promscrape.cluster.membersCount int
|
||||
|
@ -856,6 +851,9 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s)
|
||||
-promscrape.maxDroppedTargets int
|
||||
The maximum number of droppedTargets to show at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000)
|
||||
-promscrape.maxResponseHeadersSize size
|
||||
The maximum size of http response headers from Prometheus scrape targets
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 4096)
|
||||
-promscrape.maxScrapeSize size
|
||||
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
|
||||
|
@ -895,7 +893,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.maxBlockSize size
|
||||
The maximum size in bytes of unpacked request to send to remote storage. It shouldn't exceed -maxInsertRequestSize from VictoriaMetrics
|
||||
The maximum block size to send to remote storage. Bigger blocks may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxRowsPerBlock
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 8388608)
|
||||
-remoteWrite.maxDailySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
|
@ -904,6 +902,8 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
-remoteWrite.maxHourlySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
-remoteWrite.maxRowsPerBlock int
|
||||
The maximum number of samples to send in each block to remote storage. Higher number may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxBlockSize (default 10000)
|
||||
-remoteWrite.multitenantURL array
|
||||
Base path for multitenant remote storage URL to write data to. See https://docs.victoriametrics.com/vmagent.html#multitenancy for details. Example url: http://<vminsert>:8480 . Pass multiple -remoteWrite.multitenantURL flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
|
|
@ -50,6 +50,7 @@ var (
|
|||
"Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+
|
||||
"Usually :4242 must be set. Doesn't work if empty")
|
||||
opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty")
|
||||
configAuthKey = flag.String("configAuthKey", "", "Authorization key for accessing /config page. It must be passed via authKey query arg")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmagent. The following files are checked: "+
|
||||
"-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . "+
|
||||
"Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse")
|
||||
|
@ -262,6 +263,14 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
promscrape.WriteHumanReadableTargetsStatus(w, r)
|
||||
return true
|
||||
case "/config":
|
||||
if *configAuthKey != "" && r.FormValue("authKey") != *configAuthKey {
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("The provided authKey doesn't match -configAuthKey"),
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
promscrapeConfigRequests.Inc()
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
promscrape.WriteConfigData(w)
|
||||
|
|
|
@ -105,11 +105,11 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
|||
if !strings.Contains(pURL, "://") {
|
||||
logger.Fatalf("cannot parse -remoteWrite.proxyURL=%q: it must start with `http://`, `https://` or `socks5://`", pURL)
|
||||
}
|
||||
urlProxy, err := url.Parse(pURL)
|
||||
pu, err := url.Parse(pURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -remoteWrite.proxyURL=%q: %s", pURL, err)
|
||||
}
|
||||
tr.Proxy = http.ProxyURL(urlProxy)
|
||||
tr.Proxy = http.ProxyURL(pu)
|
||||
}
|
||||
c := &client{
|
||||
sanitizedURL: sanitizedURL,
|
||||
|
@ -165,7 +165,7 @@ func getAuthConfig(argIdx int) (*promauth.Config, error) {
|
|||
if username != "" || password != "" || passwordFile != "" {
|
||||
basicAuthCfg = &promauth.BasicAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Password: promauth.NewSecret(password),
|
||||
PasswordFile: passwordFile,
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ func getAuthConfig(argIdx int) (*promauth.Config, error) {
|
|||
if clientSecretFile != "" || clientSecret != "" {
|
||||
oauth2Cfg = &promauth.OAuth2Config{
|
||||
ClientID: oauth2ClientID.GetOptionalArg(argIdx),
|
||||
ClientSecret: clientSecret,
|
||||
ClientSecret: promauth.NewSecret(clientSecret),
|
||||
ClientSecretFile: clientSecretFile,
|
||||
TokenURL: oauth2TokenURL.GetOptionalArg(argIdx),
|
||||
Scopes: strings.Split(oauth2Scopes.GetOptionalArg(argIdx), ";"),
|
||||
|
|
|
@ -20,16 +20,10 @@ import (
|
|||
var (
|
||||
flushInterval = flag.Duration("remoteWrite.flushInterval", time.Second, "Interval for flushing the data to remote storage. "+
|
||||
"This option takes effect only when less than 10K data points per second are pushed to -remoteWrite.url")
|
||||
maxUnpackedBlockSize = flagutil.NewBytes("remoteWrite.maxBlockSize", 8*1024*1024, "The maximum size in bytes of unpacked request to send to remote storage. "+
|
||||
"It shouldn't exceed -maxInsertRequestSize from VictoriaMetrics")
|
||||
maxUnpackedBlockSize = flagutil.NewBytes("remoteWrite.maxBlockSize", 8*1024*1024, "The maximum block size to send to remote storage. Bigger blocks may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxRowsPerBlock")
|
||||
maxRowsPerBlock = flag.Int("remoteWrite.maxRowsPerBlock", 10000, "The maximum number of samples to send in each block to remote storage. Higher number may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxBlockSize")
|
||||
)
|
||||
|
||||
// the maximum number of rows to send per each block.
|
||||
const maxRowsPerBlock = 10000
|
||||
|
||||
// the maximum number of labels to send per each block.
|
||||
const maxLabelsPerBlock = 10 * maxRowsPerBlock
|
||||
|
||||
type pendingSeries struct {
|
||||
mu sync.Mutex
|
||||
wr writeRequest
|
||||
|
@ -153,10 +147,13 @@ func (wr *writeRequest) adjustSampleValues() {
|
|||
|
||||
func (wr *writeRequest) push(src []prompbmarshal.TimeSeries) {
|
||||
tssDst := wr.tss
|
||||
maxSamplesPerBlock := *maxRowsPerBlock
|
||||
// Allow up to 10x of labels per each block on average.
|
||||
maxLabelsPerBlock := 10 * maxSamplesPerBlock
|
||||
for i := range src {
|
||||
tssDst = append(tssDst, prompbmarshal.TimeSeries{})
|
||||
wr.copyTimeSeries(&tssDst[len(tssDst)-1], &src[i])
|
||||
if len(wr.samples) >= maxRowsPerBlock || len(wr.labels) >= maxLabelsPerBlock {
|
||||
if len(wr.samples) >= maxSamplesPerBlock || len(wr.labels) >= maxLabelsPerBlock {
|
||||
wr.tss = tssDst
|
||||
wr.flush()
|
||||
tssDst = wr.tss
|
||||
|
|
|
@ -171,12 +171,12 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
|||
logger.Panicf("BUG: urls must be non-empty")
|
||||
}
|
||||
|
||||
maxInmemoryBlocks := memory.Allowed() / len(urls) / maxRowsPerBlock / 100
|
||||
if maxInmemoryBlocks > 400 {
|
||||
maxInmemoryBlocks := memory.Allowed() / len(urls) / *maxRowsPerBlock / 100
|
||||
if maxInmemoryBlocks / *queues > 100 {
|
||||
// There is no much sense in keeping higher number of blocks in memory,
|
||||
// since this means that the producer outperforms consumer and the queue
|
||||
// will continue growing. It is better storing the queue to file.
|
||||
maxInmemoryBlocks = 400
|
||||
maxInmemoryBlocks = 100 * *queues
|
||||
}
|
||||
if maxInmemoryBlocks < 2 {
|
||||
maxInmemoryBlocks = 2
|
||||
|
@ -274,6 +274,9 @@ func PushWithAuthToken(at *auth.Token, wr *prompbmarshal.WriteRequest) {
|
|||
rctx = getRelabelCtx()
|
||||
}
|
||||
tss := wr.Timeseries
|
||||
maxSamplesPerBlock := *maxRowsPerBlock
|
||||
// Allow up to 10x of labels per each block on average.
|
||||
maxLabelsPerBlock := 10 * maxSamplesPerBlock
|
||||
for len(tss) > 0 {
|
||||
// Process big tss in smaller blocks in order to reduce the maximum memory usage
|
||||
samplesCount := 0
|
||||
|
@ -283,7 +286,7 @@ func PushWithAuthToken(at *auth.Token, wr *prompbmarshal.WriteRequest) {
|
|||
samplesCount += len(tss[i].Samples)
|
||||
labelsCount += len(tss[i].Labels)
|
||||
i++
|
||||
if samplesCount >= maxRowsPerBlock || labelsCount >= maxLabelsPerBlock {
|
||||
if samplesCount >= maxSamplesPerBlock || labelsCount >= maxLabelsPerBlock {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -301,11 +304,7 @@ func PushWithAuthToken(at *auth.Token, wr *prompbmarshal.WriteRequest) {
|
|||
}
|
||||
sortLabelsIfNeeded(tssBlock)
|
||||
tssBlock = limitSeriesCardinality(tssBlock)
|
||||
if len(tssBlock) > 0 {
|
||||
for _, rwctx := range rwctxs {
|
||||
rwctx.Push(tssBlock)
|
||||
}
|
||||
}
|
||||
pushBlockToRemoteStorages(rwctxs, tssBlock)
|
||||
if rctx != nil {
|
||||
rctx.reset()
|
||||
}
|
||||
|
@ -315,6 +314,23 @@ func PushWithAuthToken(at *auth.Token, wr *prompbmarshal.WriteRequest) {
|
|||
}
|
||||
}
|
||||
|
||||
func pushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmarshal.TimeSeries) {
|
||||
if len(tssBlock) == 0 {
|
||||
// Nothing to push
|
||||
return
|
||||
}
|
||||
// Push block to remote storages in parallel in order to reduce the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
for _, rwctx := range rwctxs {
|
||||
wg.Add(1)
|
||||
go func(rwctx *remoteWriteCtx) {
|
||||
defer wg.Done()
|
||||
rwctx.Push(tssBlock)
|
||||
}(rwctx)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// sortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set.
|
||||
func sortLabelsIfNeeded(tss []prompbmarshal.TimeSeries) {
|
||||
if !*sortLabels {
|
||||
|
|
|
@ -21,8 +21,8 @@ implementation and aims to be compatible with its syntax.
|
|||
* `vmalert` execute queries against remote datasource which has reliability risks because of network.
|
||||
It is recommended to configure alerts thresholds and rules expressions with understanding that network request
|
||||
may fail;
|
||||
* by default, rules execution is sequential within one group, but persisting of execution results to remote
|
||||
storage is asynchronous. Hence, user shouldn't rely on recording rules chaining when result of previous
|
||||
* by default, rules execution is sequential within one group, but persistence of execution results to remote
|
||||
storage is asynchronous. Hence, user shouldn't rely on chaining of recording rules when result of previous
|
||||
recording rule is reused in next one;
|
||||
|
||||
## QuickStart
|
||||
|
@ -55,7 +55,7 @@ Then configure `vmalert` accordingly:
|
|||
-external.label=replica=a # Multiple external labels may be set
|
||||
```
|
||||
|
||||
See the fill list of configuration flags in [configuration](#configuration) section.
|
||||
See the full list of configuration flags in [configuration](#configuration) section.
|
||||
|
||||
If you run multiple `vmalert` services for the same datastore or AlertManager - do not forget
|
||||
to specify different `external.label` flags in order to define which `vmalert` generated rules or alerts.
|
||||
|
@ -85,7 +85,7 @@ name: <string>
|
|||
[ concurrency: <integer> | default = 1 ]
|
||||
|
||||
# Optional type for expressions inside the rules. Supported values: "graphite" and "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
# By default "prometheus" type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# Optional list of label filters applied to every rule's
|
||||
|
@ -113,14 +113,14 @@ expression and then act according to the Rule type.
|
|||
|
||||
There are two types of Rules:
|
||||
* [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) -
|
||||
Alerting rules allows to define alert conditions via `expr` field and to send notifications
|
||||
Alerting rules allow to define alert conditions via `expr` field and to send notifications to
|
||||
[Alertmanager](https://github.com/prometheus/alertmanager) if execution result is not empty.
|
||||
* [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) -
|
||||
Recording rules allows to define `expr` which result will be than backfilled to configured
|
||||
Recording rules allow to define `expr` which result will be then backfilled to configured
|
||||
`-remoteWrite.url`. Recording rules are used to precompute frequently needed or computationally
|
||||
expensive expressions and save their result as a new set of time series.
|
||||
|
||||
`vmalert` forbids to define duplicates - rules with the same combination of name, expression and labels
|
||||
`vmalert` forbids defining duplicates - rules with the same combination of name, expression and labels
|
||||
within one group.
|
||||
|
||||
#### Alerting rules
|
||||
|
@ -130,12 +130,8 @@ The syntax for alerting rule is the following:
|
|||
# The name of the alert. Must be a valid metric name.
|
||||
alert: <string>
|
||||
|
||||
# Optional type for the rule. Supported values: "graphite", "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# The expression to evaluate. The expression language depends on the type value.
|
||||
# By default PromQL/MetricsQL expression is used. If type="graphite", then the expression
|
||||
# By default PromQL/MetricsQL expression is used. If group.type="graphite", then the expression
|
||||
# must contain valid Graphite expression.
|
||||
expr: <string>
|
||||
|
||||
|
@ -166,12 +162,8 @@ The syntax for recording rules is following:
|
|||
# The name of the time series to output to. Must be a valid metric name.
|
||||
record: <string>
|
||||
|
||||
# Optional type for the rule. Supported values: "graphite", "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# The expression to evaluate. The expression language depends on the type value.
|
||||
# By default MetricsQL expression is used. If type="graphite", then the expression
|
||||
# By default MetricsQL expression is used. If group.type="graphite", then the expression
|
||||
# must contain valid Graphite expression.
|
||||
expr: <string>
|
||||
|
||||
|
@ -190,18 +182,18 @@ the process alerts state will be lost. To avoid this situation, `vmalert` should
|
|||
* `-remoteWrite.url` - URL to VictoriaMetrics (Single) or vminsert (Cluster). `vmalert` will persist alerts state
|
||||
into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
|
||||
These are regular time series and may be queried from VM just as any other time series.
|
||||
The state stored to the configured address on every rule evaluation.
|
||||
The state is stored to the configured address on every rule evaluation.
|
||||
* `-remoteRead.url` - URL to VictoriaMetrics (Single) or vmselect (Cluster). `vmalert` will try to restore alerts state
|
||||
from configured address by querying time series with name `ALERTS_FOR_STATE`.
|
||||
|
||||
Both flags are required for the proper state restoring. Restore process may fail if time series are missing
|
||||
Both flags are required for proper state restoring. Restore process may fail if time series are missing
|
||||
in configured `-remoteRead.url`, weren't updated in the last `1h` (controlled by `-remoteRead.lookback`)
|
||||
or received state doesn't match current `vmalert` rules configuration.
|
||||
|
||||
|
||||
### Multitenancy
|
||||
|
||||
There are the following approaches for alerting and recording rules across
|
||||
The following are the approaches for alerting and recording rules across
|
||||
[multiple tenants](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy):
|
||||
|
||||
* To run a separate `vmalert` instance per each tenant.
|
||||
|
@ -234,12 +226,15 @@ If `-clusterMode` is enabled, then `-datasource.url`, `-remoteRead.url` and `-re
|
|||
contain only the hostname without tenant id. For example: `-datasource.url=http://vmselect:8481`.
|
||||
`vmalert` automatically adds the specified tenant to urls per each recording rule in this case.
|
||||
|
||||
If `-clusterMode` is enabled and the `tenant` in a particular group is missing, then the tenant value
|
||||
is obtained from `-defaultTenant.prometheus` or `-defaultTenant.graphite` depending on the `type` of the group.
|
||||
|
||||
The enterprise version of vmalert is available in `vmutils-*-enterprise.tar.gz` files
|
||||
at [release page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) and in `*-enterprise`
|
||||
tags at [Docker Hub](https://hub.docker.com/r/victoriametrics/vmalert/tags).
|
||||
|
||||
|
||||
### WEB
|
||||
### Web
|
||||
|
||||
`vmalert` runs a web-server (`-httpListenAddr`) for serving metrics and alerts endpoints:
|
||||
* `http://<vmalert-addr>` - UI;
|
||||
|
@ -262,7 +257,7 @@ to set `-datasource.appendTypePrefix` flag to `true`, so vmalert can adjust URL
|
|||
## Rules backfilling
|
||||
|
||||
vmalert supports alerting and recording rules backfilling (aka `replay`). In replay mode vmalert
|
||||
can read the same rules configuration as normally, evaluate them on the given time range and backfill
|
||||
can read the same rules configuration as normal, evaluate them on the given time range and backfill
|
||||
results via remote write to the configured storage. vmalert supports any PromQL/MetricsQL compatible
|
||||
data source for backfilling.
|
||||
|
||||
|
@ -308,8 +303,8 @@ max range per request: 8h20m0s
|
|||
In `replay` mode all groups are executed sequentially one-by-one. Rules within the group are
|
||||
executed sequentially as well (`concurrency` setting is ignored). Vmalert sends rule's expression
|
||||
to [/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries) endpoint
|
||||
of the configured `-datasource.url`. Returned data then processed according to the rule type and
|
||||
backfilled to `-remoteWrite.url` via [Remote Write protocol](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations).
|
||||
of the configured `-datasource.url`. Returned data is then processed according to the rule type and
|
||||
backfilled to `-remoteWrite.url` via [remote Write protocol](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations).
|
||||
Vmalert respects `evaluationInterval` value set by flag or per-group during the replay.
|
||||
Vmalert automatically disables caching on VictoriaMetrics side by sending `nocache=1` param. It allows
|
||||
to prevent cache pollution and unwanted time range boundaries adjustment during backfilling.
|
||||
|
|
|
@ -58,7 +58,7 @@ type alertingRuleMetrics struct {
|
|||
|
||||
func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule) *AlertingRule {
|
||||
ar := &AlertingRule{
|
||||
Type: cfg.Type,
|
||||
Type: group.Type,
|
||||
RuleID: cfg.ID,
|
||||
Name: cfg.Alert,
|
||||
Expr: cfg.Expr,
|
||||
|
@ -69,7 +69,7 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
|
|||
GroupName: group.Name,
|
||||
EvalInterval: group.Interval,
|
||||
q: qb.BuildWithParams(datasource.QuerierParams{
|
||||
DataSourceType: &cfg.Type,
|
||||
DataSourceType: &group.Type,
|
||||
EvaluationInterval: group.Interval,
|
||||
ExtraLabels: group.ExtraFilterLabels,
|
||||
}),
|
||||
|
|
|
@ -56,14 +56,6 @@ func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if g.Type.Get() == "" {
|
||||
g.Type.Set(datasource.NewPrometheusType())
|
||||
}
|
||||
// update rules with empty type.
|
||||
for i, r := range g.Rules {
|
||||
if r.Type.Get() == "" {
|
||||
r.Type.Set(g.Type)
|
||||
r.ID = HashRule(r)
|
||||
g.Rules[i] = r
|
||||
}
|
||||
}
|
||||
|
||||
h := md5.New()
|
||||
h.Write(b)
|
||||
|
@ -76,9 +68,6 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
|||
if g.Name == "" {
|
||||
return fmt.Errorf("group name must be set")
|
||||
}
|
||||
if len(g.Rules) == 0 {
|
||||
return fmt.Errorf("group %q can't contain no rules", g.Name)
|
||||
}
|
||||
|
||||
uniqueRules := map[uint64]struct{}{}
|
||||
for _, r := range g.Rules {
|
||||
|
@ -97,9 +86,6 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
|||
// its needed only for tests.
|
||||
// because correct types must be inherited after unmarshalling.
|
||||
exprValidator := g.Type.ValidateExpr
|
||||
if r.Type.Get() != "" {
|
||||
exprValidator = r.Type.ValidateExpr
|
||||
}
|
||||
if err := exprValidator(r.Expr); err != nil {
|
||||
return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
|
@ -120,7 +106,6 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
|||
// recording rule or alerting rule.
|
||||
type Rule struct {
|
||||
ID uint64
|
||||
Type datasource.Type `yaml:"type,omitempty"`
|
||||
Record string `yaml:"record,omitempty"`
|
||||
Alert string `yaml:"alert,omitempty"`
|
||||
Expr string `yaml:"expr"`
|
||||
|
@ -162,7 +147,6 @@ func HashRule(r Rule) uint64 {
|
|||
h.Write([]byte("alerting"))
|
||||
h.Write([]byte(r.Alert))
|
||||
}
|
||||
h.Write([]byte(r.Type.Get()))
|
||||
kv := sortMap(r.Labels)
|
||||
for _, i := range kv {
|
||||
h.Write([]byte(i.key))
|
||||
|
|
|
@ -95,10 +95,6 @@ func TestGroup_Validate(t *testing.T) {
|
|||
group: &Group{},
|
||||
expErr: "group name must be set",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test"},
|
||||
expErr: "contain no rules",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
Rules: []Rule{
|
||||
|
@ -267,7 +263,6 @@ func TestGroup_Validate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Expr: "sum(up == 0 ) by (host)",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -283,7 +278,6 @@ func TestGroup_Validate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Expr: "sumSeries(time('foo.bar',10))",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
groups:
|
||||
- name: TestUpdateGroup
|
||||
interval: 2s
|
||||
concurrency: 2
|
||||
type: prometheus
|
||||
labels:
|
||||
cluster: main
|
||||
rules:
|
||||
- alert: up
|
||||
expr: up == 0
|
||||
for: 30s
|
||||
- alert: up graphite
|
||||
expr: filterSeries(time('host.1',20),'>','0')
|
||||
for: 30s
|
||||
type: graphite
|
|
@ -1,12 +0,0 @@
|
|||
groups:
|
||||
- name: TestUpdateGroup
|
||||
interval: 30s
|
||||
type: graphite
|
||||
rules:
|
||||
- alert: up
|
||||
expr: filterSeries(time('host.2',20),'>','0')
|
||||
for: 30s
|
||||
- alert: up graphite
|
||||
expr: filterSeries(time('host.1',20),'>','0')
|
||||
for: 30s
|
||||
type: graphite
|
|
@ -21,10 +21,3 @@ groups:
|
|||
annotations:
|
||||
summary: Too high connection number for {{$labels.instance}}
|
||||
description: "It is {{ $value }} connections for {{$labels.instance}}"
|
||||
- alert: HostDown
|
||||
type: graphite
|
||||
expr: filterSeries(sumSeries(host.receiver.interface.up),'last','=', 0)
|
||||
for: 3m
|
||||
annotations:
|
||||
summary: Too high connection number for {{$labels.instance}}
|
||||
description: "It is {{ $value }} connections for {{$labels.instance}}"
|
||||
|
|
8
app/vmalert/config/testdata/rules4-good.rules
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
groups:
|
||||
- name: TestEmptyRules
|
||||
interval: 2s
|
||||
concurrency: 2
|
||||
rules:
|
||||
|
||||
- name: TestNoRules
|
||||
type: prometheus
|
|
@ -7,9 +7,6 @@ import (
|
|||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
const graphiteType = "graphite"
|
||||
const prometheusType = "prometheus"
|
||||
|
||||
// Type represents data source type
|
||||
type Type struct {
|
||||
name string
|
||||
|
@ -17,12 +14,16 @@ type Type struct {
|
|||
|
||||
// NewPrometheusType returns prometheus datasource type
|
||||
func NewPrometheusType() Type {
|
||||
return Type{name: prometheusType}
|
||||
return Type{
|
||||
name: "prometheus",
|
||||
}
|
||||
}
|
||||
|
||||
// NewGraphiteType returns graphite datasource type
|
||||
func NewGraphiteType() Type {
|
||||
return Type{name: graphiteType}
|
||||
return Type{
|
||||
name: "graphite",
|
||||
}
|
||||
}
|
||||
|
||||
// NewRawType returns datasource type from raw string
|
||||
|
@ -44,19 +45,19 @@ func (t *Type) Set(d Type) {
|
|||
// String implements String interface with default value.
|
||||
func (t Type) String() string {
|
||||
if t.name == "" {
|
||||
return prometheusType
|
||||
return "prometheus"
|
||||
}
|
||||
return t.name
|
||||
}
|
||||
|
||||
// ValidateExpr validates query expression with datasource ql.
|
||||
func (t *Type) ValidateExpr(expr string) error {
|
||||
switch t.name {
|
||||
case graphiteType:
|
||||
switch t.String() {
|
||||
case "graphite":
|
||||
if _, err := graphiteql.Parse(expr); err != nil {
|
||||
return fmt.Errorf("bad graphite expr: %q, err: %w", expr, err)
|
||||
}
|
||||
case "", prometheusType:
|
||||
case "prometheus":
|
||||
if _, err := metricsql.Parse(expr); err != nil {
|
||||
return fmt.Errorf("bad prometheus expr: %q, err: %w", expr, err)
|
||||
}
|
||||
|
@ -72,12 +73,13 @@ func (t *Type) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
if s == "" {
|
||||
s = "prometheus"
|
||||
}
|
||||
switch s {
|
||||
case "":
|
||||
s = prometheusType
|
||||
case graphiteType, prometheusType:
|
||||
case "graphite", "prometheus":
|
||||
default:
|
||||
return fmt.Errorf("unknown datasource type=%q, want %q or %q", s, prometheusType, graphiteType)
|
||||
return fmt.Errorf("unknown datasource type=%q, want %q or %q", s, "prometheus", "graphite")
|
||||
}
|
||||
t.name = s
|
||||
return nil
|
||||
|
|
|
@ -80,10 +80,10 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
|||
}
|
||||
|
||||
ts := time.Now()
|
||||
switch s.dataSourceType.name {
|
||||
case "", prometheusType:
|
||||
switch s.dataSourceType.String() {
|
||||
case "prometheus":
|
||||
s.setPrometheusInstantReqParams(req, query, ts)
|
||||
case graphiteType:
|
||||
case "graphite":
|
||||
s.setGraphiteReqParams(req, query, ts)
|
||||
default:
|
||||
return nil, fmt.Errorf("engine not found: %q", s.dataSourceType.name)
|
||||
|
@ -98,7 +98,7 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
|||
}()
|
||||
|
||||
parseFn := parsePrometheusResponse
|
||||
if s.dataSourceType.name != prometheusType {
|
||||
if s.dataSourceType.name != "prometheus" {
|
||||
parseFn = parseGraphiteResponse
|
||||
}
|
||||
return parseFn(req, resp)
|
||||
|
@ -108,7 +108,7 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
|||
// For Prometheus type see https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
// Graphite type isn't supported.
|
||||
func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end time.Time) ([]Metric, error) {
|
||||
if s.dataSourceType.name != prometheusType {
|
||||
if s.dataSourceType.name != "prometheus" {
|
||||
return nil, fmt.Errorf("%q is not supported for QueryRange", s.dataSourceType.name)
|
||||
}
|
||||
req, err := s.newRequestPOST()
|
||||
|
|
|
@ -20,7 +20,7 @@ var (
|
|||
basicAuthPass = "bar"
|
||||
baCfg = &promauth.BasicAuthConfig{
|
||||
Username: basicAuthName,
|
||||
Password: basicAuthPass,
|
||||
Password: promauth.NewSecret(basicAuthPass),
|
||||
}
|
||||
query = "vm_rows"
|
||||
queryRender = "constantLine(10)"
|
||||
|
@ -501,14 +501,14 @@ func TestRequestParams(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
switch tc.vm.dataSourceType.name {
|
||||
case "", prometheusType:
|
||||
switch tc.vm.dataSourceType.String() {
|
||||
case "prometheus":
|
||||
if tc.queryRange {
|
||||
tc.vm.setPrometheusRangeReqParams(req, query, timestamp, timestamp)
|
||||
} else {
|
||||
tc.vm.setPrometheusInstantReqParams(req, query, timestamp)
|
||||
}
|
||||
case graphiteType:
|
||||
case "graphite":
|
||||
tc.vm.setGraphiteReqParams(req, query, timestamp)
|
||||
}
|
||||
tc.checkFn(t, req)
|
||||
|
|
|
@ -226,7 +226,7 @@ func (g *Group) start(ctx context.Context, nts []notifier.Notifier, rw *remotewr
|
|||
|
||||
// Spread group rules evaluation over time in order to reduce load on VictoriaMetrics.
|
||||
if !skipRandSleepOnGroupStart {
|
||||
randSleep := uint64(float64(g.Interval) * (float64(uint32(g.ID())) / (1 << 32)))
|
||||
randSleep := uint64(float64(g.Interval) * (float64(g.ID()) / (1 << 64)))
|
||||
sleepOffset := uint64(time.Now().UnixNano()) % uint64(g.Interval)
|
||||
if randSleep < sleepOffset {
|
||||
randSleep += uint64(g.Interval)
|
||||
|
@ -283,14 +283,15 @@ func (g *Group) start(ctx context.Context, nts []notifier.Notifier, rw *remotewr
|
|||
case <-t.C:
|
||||
g.metrics.iterationTotal.Inc()
|
||||
iterationStart := time.Now()
|
||||
resolveDuration := getResolveDuration(g.Interval)
|
||||
errs := e.execConcurrently(ctx, g.Rules, g.Concurrency, resolveDuration)
|
||||
for err := range errs {
|
||||
if err != nil {
|
||||
logger.Errorf("group %q: %s", g.Name, err)
|
||||
if len(g.Rules) > 0 {
|
||||
resolveDuration := getResolveDuration(g.Interval)
|
||||
errs := e.execConcurrently(ctx, g.Rules, g.Concurrency, resolveDuration)
|
||||
for err := range errs {
|
||||
if err != nil {
|
||||
logger.Errorf("group %q: %s", g.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
g.metrics.iterationDuration.UpdateDuration(iterationStart)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
)
|
||||
|
@ -108,17 +107,6 @@ func TestUpdateWith(t *testing.T) {
|
|||
{Record: "foo5"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"update datasource type",
|
||||
[]config.Rule{
|
||||
{Alert: "foo1", Type: datasource.NewPrometheusType()},
|
||||
{Alert: "foo3", Type: datasource.NewGraphiteType()},
|
||||
},
|
||||
[]config.Rule{
|
||||
{Alert: "foo1", Type: datasource.NewGraphiteType()},
|
||||
{Alert: "foo10", Type: datasource.NewPrometheusType()},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
|
|
@ -113,18 +113,6 @@ func TestManagerUpdate(t *testing.T) {
|
|||
Name: "ExampleAlertAlwaysFiring",
|
||||
Expr: "sum by(job) (up == 1)",
|
||||
}
|
||||
ExampleAlertGraphite = &AlertingRule{
|
||||
Name: "up graphite",
|
||||
Expr: "filterSeries(time('host.1',20),'>','0')",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
For: defaultEvalInterval,
|
||||
}
|
||||
ExampleAlertGraphite2 = &AlertingRule{
|
||||
Name: "up",
|
||||
Expr: "filterSeries(time('host.2',20),'>','0')",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
For: defaultEvalInterval,
|
||||
}
|
||||
)
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -226,23 +214,6 @@ func TestManagerUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update prometheus to graphite type",
|
||||
initPath: "config/testdata/dir/rules-update0-good.rules",
|
||||
updatePath: "config/testdata/dir/rules-update1-good.rules",
|
||||
want: []*Group{
|
||||
{
|
||||
File: "config/testdata/dir/rules-update1-good.rules",
|
||||
Interval: defaultEvalInterval,
|
||||
Type: datasource.NewGraphiteType(),
|
||||
Name: "TestUpdateGroup",
|
||||
Rules: []Rule{
|
||||
ExampleAlertGraphite2,
|
||||
ExampleAlertGraphite,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
|
|
@ -60,7 +60,7 @@ func (rr *RecordingRule) ID() uint64 {
|
|||
|
||||
func newRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule) *RecordingRule {
|
||||
rr := &RecordingRule{
|
||||
Type: cfg.Type,
|
||||
Type: group.Type,
|
||||
RuleID: cfg.ID,
|
||||
Name: cfg.Record,
|
||||
Expr: cfg.Expr,
|
||||
|
@ -68,7 +68,7 @@ func newRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rul
|
|||
GroupID: group.ID(),
|
||||
metrics: &recordingRuleMetrics{},
|
||||
q: qb.BuildWithParams(datasource.QuerierParams{
|
||||
DataSourceType: &cfg.Type,
|
||||
DataSourceType: &group.Type,
|
||||
EvaluationInterval: group.Interval,
|
||||
ExtraLabels: group.ExtraFilterLabels,
|
||||
}),
|
||||
|
|
|
@ -10,7 +10,7 @@ func AuthConfig(baUser, baPass, baFile, bearerToken, bearerTokenFile string) (*p
|
|||
if baUser != "" || baPass != "" || baFile != "" {
|
||||
baCfg = &promauth.BasicAuthConfig{
|
||||
Username: baUser,
|
||||
Password: baPass,
|
||||
Password: promauth.NewSecret(baPass),
|
||||
PasswordFile: baFile,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -986,7 +986,7 @@ func badgeState(state string) string {
|
|||
func streambadgeRestored(qw422016 *qt422016.Writer) {
|
||||
//line app/vmalert/web.qtpl:303
|
||||
qw422016.N().S(`
|
||||
<span class="badge bg-warning text-dark" title="Alert state was restored after reload from remote storage">restored</span>
|
||||
<span class="badge bg-warning text-dark" title="Alert state was restored after the service restart from remote storage">restored</span>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:305
|
||||
}
|
||||
|
|
|
@ -117,7 +117,8 @@ users:
|
|||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
headers:
|
||||
- "X-Scope-OrgID: abc"```
|
||||
- "X-Scope-OrgID: abc"
|
||||
```
|
||||
|
||||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
This may be useful for passing secrets to the config.
|
||||
|
|
|
@ -9,8 +9,8 @@ Supported storage systems for backups:
|
|||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/docs/mimic/radosgw/s3/) or [Swift](https://www.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`
|
||||
|
||||
`vmbackup` supports incremental and full backups. Incremental backups created automatically if the destination path already contains data from the previous backup.
|
||||
Full backups can be sped up with `-origin` pointing to already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
||||
`vmbackup` supports incremental and full backups. Incremental backups are created automatically if the destination path already contains data from the previous backup.
|
||||
Full backups can be sped up with `-origin` pointing to an already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
||||
data between the existing backup and new backup. It saves time and costs on data transfer.
|
||||
|
||||
Backup process can be interrupted at any time. It is automatically resumed from the interruption point when restarting `vmbackup` with the same args.
|
||||
|
@ -34,7 +34,7 @@ vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-
|
|||
```
|
||||
|
||||
* `</path/to/victoria-metrics-data>` - path to VictoriaMetrics data pointed by `-storageDataPath` command-line flag in single-node VictoriaMetrics or in cluster `vmstorage`.
|
||||
There is no need to stop VictoriaMetrics for creating backups, since they are performed from immutable [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
||||
There is no need to stop VictoriaMetrics for creating backups since they are performed from immutable [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
||||
* `<local-snapshot>` is the snapshot to back up. See [how to create instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots). `vmbackup` can create the snapshot on itself if `-snapshot.createURL` command-line flag is set to an url for creating snapshots. In this case `-snapshotName` flag isn't needed.
|
||||
* `<bucket>` is an already existing name for [GCS bucket](https://cloud.google.com/storage/docs/creating-buckets).
|
||||
* `<path/to/new/backup>` is the destination path where new backup will be placed.
|
||||
|
@ -54,7 +54,7 @@ It saves time and network bandwidth costs by performing server-side copy for the
|
|||
|
||||
### Incremental backups
|
||||
|
||||
Incremental backups performed if `-dst` points to an already existing backup. In this case only new data uploaded to remote storage.
|
||||
Incremental backups are performed if `-dst` points to an already existing backup. In this case only new data is uploaded to remote storage.
|
||||
It saves time and network bandwidth costs when working with big backups:
|
||||
|
||||
```
|
||||
|
@ -87,7 +87,7 @@ Where `<daily-snapshot>` is the snapshot for the last day `<YYYYMMDD>`.
|
|||
This apporach saves network bandwidth costs on hourly backups (since they are incremental) and allows recovering data from either the last hour (`latest` backup)
|
||||
or from any day (`YYYYMMDD` backups). Note that hourly backup shouldn't run when creating daily backup.
|
||||
|
||||
Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs.
|
||||
Do not forget to remove old snapshots and backups when they are no longer needed in order to save storage costs.
|
||||
|
||||
See also [vmbackupmanager tool](https://docs.victoriametrics.com/vmbackupmanager.html) for automating smart backups.
|
||||
|
||||
|
@ -97,19 +97,19 @@ See also [vmbackupmanager tool](https://docs.victoriametrics.com/vmbackupmanager
|
|||
The backup algorithm is the following:
|
||||
|
||||
1. Collect information about files in the `-snapshotName`, in the `-dst` and in the `-origin`.
|
||||
2. Determine files in `-dst`, which are missing in `-snapshotName`, and delete them. These are usually small files, which are already merged into bigger files in the snapshot.
|
||||
3. Determine files from `-snapshotName`, which are missing in `-dst`. These are usually small new files and bigger merged files.
|
||||
4. Determine files from step 3, which exist in the `-origin`, and perform server-side copy of these files from `-origin` to `-dst`.
|
||||
2. Determine which files in `-dst` are missing in `-snapshotName`, and delete them. These are usually small files, which are already merged into bigger files in the snapshot.
|
||||
3. Determine which files in `-snapshotName` are missing in `-dst`. These are usually small new files and bigger merged files.
|
||||
4. Determine which files from step 3 exist in the `-origin`, and perform server-side copy of these files from `-origin` to `-dst`.
|
||||
These are usually the biggest and the oldest files, which are shared between backups.
|
||||
5. Upload the remaining files from step 3 from `-snapshotName` to `-dst`.
|
||||
|
||||
The algorithm splits source files into 100 MB chunks in the backup. Each chunk stored as a separate file in the backup.
|
||||
The algorithm splits source files into 1 GiB chunks in the backup. Each chunk is stored as a separate file in the backup.
|
||||
Such splitting minimizes the amounts of data to re-transfer after temporary errors.
|
||||
|
||||
`vmbackup` relies on [instant snapshot](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) properties:
|
||||
|
||||
- All the files in the snapshot are immutable.
|
||||
- Old files periodically merged into new files.
|
||||
- Old files are periodically merged into new files.
|
||||
- Smaller files have higher probability to be merged.
|
||||
- Consecutive snapshots share many identical files.
|
||||
|
||||
|
|
|
@ -98,11 +98,11 @@ The result on the GCS bucket
|
|||
|
||||
- The root folder
|
||||
|
||||

|
||||

|
||||
|
||||
- The latest folder
|
||||
|
||||

|
||||

|
||||
|
||||
## Backup Retention Policy
|
||||
|
||||
|
@ -117,7 +117,7 @@ Backup retention policy is controlled by:
|
|||
|
||||
Let’s assume we have a backup manager collecting daily backups for the past 10 days.
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
We enable backup retention policy for backup manager by using following configuration:
|
||||
|
@ -143,4 +143,4 @@ info app/vmbackupmanager/retention.go:106 daily backups to delete [daily/2
|
|||
|
||||
The result on the GCS bucket. We see only 3 daily backups:
|
||||
|
||||

|
||||

|
||||
|
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 99 KiB After Width: | Height: | Size: 99 KiB |
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 64 KiB |
|
@ -42,6 +42,7 @@ var (
|
|||
"Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+
|
||||
"Usually :4242 must be set. Doesn't work if empty")
|
||||
opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty")
|
||||
configAuthKey = flag.String("configAuthKey", "", "Authorization key for accessing /config page. It must be passed via authKey query arg")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped")
|
||||
)
|
||||
|
||||
|
@ -197,6 +198,14 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
promscrape.WriteAPIV1Targets(w, state)
|
||||
return true
|
||||
case "/prometheus/config", "/config":
|
||||
if *configAuthKey != "" && r.FormValue("authKey") != *configAuthKey {
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("The provided authKey doesn't match -configAuthKey"),
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
promscrapeConfigRequests.Inc()
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
promscrape.WriteConfigData(w)
|
||||
|
|
|
@ -32,6 +32,7 @@ var aggrFuncs = map[string]aggrFunc{
|
|||
// PromQL extension funcs
|
||||
"median": aggrFuncMedian,
|
||||
"limitk": aggrFuncLimitK,
|
||||
"limit_offset": aggrFuncLimitOffset,
|
||||
"distinct": newAggrFunc(aggrFuncDistinct),
|
||||
"sum2": newAggrFunc(aggrFuncSum2),
|
||||
"geomean": newAggrFunc(aggrFuncGeomean),
|
||||
|
@ -999,20 +1000,45 @@ func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
|
|||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ks, err := getScalar(args[0], 0)
|
||||
limits, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain limit arg: %w", err)
|
||||
}
|
||||
limit := 0
|
||||
if len(limits) > 0 {
|
||||
limit = int(limits[0])
|
||||
}
|
||||
afe := newLimitOffsetAggrFunc(limit, 0)
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
|
||||
}
|
||||
|
||||
func aggrFuncLimitOffset(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 3); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxK := 0
|
||||
for _, kf := range ks {
|
||||
k := int(kf)
|
||||
if k > maxK {
|
||||
maxK = k
|
||||
}
|
||||
limit, err := getIntNumber(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain limit arg: %w", err)
|
||||
}
|
||||
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||
// Sort series by metricName in order to get consistent set of output series
|
||||
// across multiple calls to limitk() function.
|
||||
offset, err := getIntNumber(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain offset arg: %w", err)
|
||||
}
|
||||
afe := newLimitOffsetAggrFunc(limit, offset)
|
||||
return aggrFuncExt(afe, args[2], &afa.ae.Modifier, afa.ae.Limit, true)
|
||||
}
|
||||
|
||||
func newLimitOffsetAggrFunc(limit, offset int) func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||
if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
return func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||
// Sort series by metricName hash in order to get consistent set of output series
|
||||
// across multiple calls to limitk() and limit_offset() functions.
|
||||
// Sort series by hash in order to guarantee uniform selection across series.
|
||||
type hashSeries struct {
|
||||
h uint64
|
||||
|
@ -1033,21 +1059,15 @@ func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
|
|||
for i, hs := range hss {
|
||||
tss[i] = hs.ts
|
||||
}
|
||||
if len(tss) > maxK {
|
||||
tss = tss[:maxK]
|
||||
if offset > len(tss) {
|
||||
return nil
|
||||
}
|
||||
for i, kf := range ks {
|
||||
k := int(kf)
|
||||
if k < 0 {
|
||||
k = 0
|
||||
}
|
||||
for j := k; j < len(tss); j++ {
|
||||
tss[j].Values[i] = nan
|
||||
}
|
||||
tss = tss[offset:]
|
||||
if limit < len(tss) {
|
||||
tss = tss[:limit]
|
||||
}
|
||||
return tss
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
|
||||
}
|
||||
|
||||
func getHash(d *xxhash.Digest, mn *storage.MetricName) uint64 {
|
||||
|
|
|
@ -2012,6 +2012,62 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`label_graphite_group()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(label_graphite_group((
|
||||
alias(1, "foo.bar.baz"),
|
||||
alias(2, "abc"),
|
||||
label_set(alias(3, "a.xx.zz.asd"), "qwe", "rty"),
|
||||
), 1, 3))`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1, 1, 1, 1, 1, 1},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.MetricGroup = []byte("bar.")
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{2, 2, 2, 2, 2, 2},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.MetricGroup = []byte(".")
|
||||
r3 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{3, 3, 3, 3, 3, 3},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r3.MetricName.MetricGroup = []byte("xx.asd")
|
||||
r3.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("qwe"),
|
||||
Value: []byte("rty"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r1, r2, r3}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`sum(label_graphite_group)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(sum by (__name__) (
|
||||
label_graphite_group((
|
||||
alias(1, "foo.bar.baz"),
|
||||
alias(2, "x.y.z"),
|
||||
alias(3, "qe.bar.qqq"),
|
||||
), 1)
|
||||
))`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{2, 2, 2, 2, 2, 2},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.MetricGroup = []byte("y")
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{4, 4, 4, 4, 4, 4},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.MetricGroup = []byte("bar")
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`two_timeseries`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_desc(time() or label_set(2, "xx", "foo"))`
|
||||
|
@ -4974,6 +5030,17 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`duration_over_time`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `duration_over_time((time()<1200)[600s:10s], 20s)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{590, 580, 380, 180, nan, nan},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`share_gt_over_time`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `share_gt_over_time(rand(0)[200s:10s], 0.7)`
|
||||
|
@ -5083,6 +5150,21 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r1}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`limit_offset()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `limit_offset(1, 0, (label_set(10, "foo", "bar"), label_set(time()/150, "xbaz", "sss")))`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{10, 10, 10, 10, 10, 10},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r1}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`limitk(10)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(limitk(10, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss")))`
|
||||
|
@ -7276,6 +7358,7 @@ func TestExecError(t *testing.T) {
|
|||
f(`label_keep()`)
|
||||
f(`label_match()`)
|
||||
f(`label_mismatch()`)
|
||||
f(`label_graphite_group()`)
|
||||
f(`round()`)
|
||||
f(`round(1,2,3)`)
|
||||
f(`sgn()`)
|
||||
|
@ -7368,6 +7451,7 @@ func TestExecError(t *testing.T) {
|
|||
f(`prometheus_buckets()`)
|
||||
f(`buckets_limit()`)
|
||||
f(`buckets_limit(1)`)
|
||||
f(`duration_over_time()`)
|
||||
f(`share_le_over_time()`)
|
||||
f(`share_gt_over_time()`)
|
||||
f(`count_le_over_time()`)
|
||||
|
@ -7379,6 +7463,7 @@ func TestExecError(t *testing.T) {
|
|||
f(`bitmap_or()`)
|
||||
f(`bitmap_xor()`)
|
||||
f(`quantiles()`)
|
||||
f(`limit_offset()`)
|
||||
|
||||
// Invalid argument type
|
||||
f(`median_over_time({}, 2)`)
|
||||
|
@ -7391,6 +7476,8 @@ func TestExecError(t *testing.T) {
|
|||
f(`topk(label_set(2, "xx", "foo") or 1, 12)`)
|
||||
f(`topk_avg(label_set(2, "xx", "foo") or 1, 12)`)
|
||||
f(`limitk(label_set(2, "xx", "foo") or 1, 12)`)
|
||||
f(`limit_offet((alias(1,"foo"),alias(2,"bar")), 2, 10)`)
|
||||
f(`limit_offet(1, (alias(1,"foo"),alias(2,"bar")), 10)`)
|
||||
f(`round(1, 1 or label_set(2, "xx", "foo"))`)
|
||||
f(`histogram_quantile(1 or label_set(2, "xx", "foo"), 1)`)
|
||||
f(`label_set(1, 2, 3)`)
|
||||
|
|
|
@ -63,6 +63,7 @@ var rollupFuncs = map[string]newRollupFunc{
|
|||
"tmax_over_time": newRollupFuncOneArg(rollupTmax),
|
||||
"tfirst_over_time": newRollupFuncOneArg(rollupTfirst),
|
||||
"tlast_over_time": newRollupFuncOneArg(rollupTlast),
|
||||
"duration_over_time": newRollupDurationOverTime,
|
||||
"share_le_over_time": newRollupShareLE,
|
||||
"share_gt_over_time": newRollupShareGT,
|
||||
"count_le_over_time": newRollupCountLE,
|
||||
|
@ -879,6 +880,37 @@ func linearRegression(rfa *rollupFuncArg) (float64, float64) {
|
|||
return v, k
|
||||
}
|
||||
|
||||
func newRollupDurationOverTime(args []interface{}) (rollupFunc, error) {
|
||||
if err := expectRollupArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dMaxs, err := getScalar(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rf := func(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleaned up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
timestamps := rfa.timestamps
|
||||
tPrev := timestamps[0]
|
||||
dSum := int64(0)
|
||||
dMax := int64(dMaxs[rfa.idx] * 1000)
|
||||
for _, t := range timestamps {
|
||||
d := t - tPrev
|
||||
if d <= dMax {
|
||||
dSum += d
|
||||
}
|
||||
tPrev = t
|
||||
}
|
||||
return float64(dSum / 1000)
|
||||
}
|
||||
return rf, nil
|
||||
}
|
||||
|
||||
func newRollupShareLE(args []interface{}) (rollupFunc, error) {
|
||||
return newRollupShareFilter(args, countFilterLE)
|
||||
}
|
||||
|
@ -1950,6 +1982,18 @@ func getScalar(arg interface{}, argNum int) ([]float64, error) {
|
|||
return ts[0].Values, nil
|
||||
}
|
||||
|
||||
func getIntNumber(arg interface{}, argNum int) (int, error) {
|
||||
v, err := getScalar(arg, argNum)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n := 0
|
||||
if len(v) > 0 {
|
||||
n = int(v[0])
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func getString(tss []*timeseries, argNum int) (string, error) {
|
||||
if len(tss) != 1 {
|
||||
return "", fmt.Errorf(`arg #%d must contain a single timeseries; got %d timeseries`, argNum+1, len(tss))
|
||||
|
|
|
@ -1167,8 +1167,11 @@ func testRowsEqual(t *testing.T, values []float64, timestamps []int64, valuesExp
|
|||
}
|
||||
continue
|
||||
}
|
||||
if math.Abs(v-vExpected) > 1e-15 {
|
||||
t.Fatalf("unexpected value at values[%d]; got %f; want %f\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
// Compare values with the reduced precision because of different precision errors
|
||||
// on different OS/architectures. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1738
|
||||
// and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1653
|
||||
if math.Abs(v-vExpected)/math.Abs(vExpected) > 1e-13 {
|
||||
t.Fatalf("unexpected value at values[%d]; got %v; want %v\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
i, v, vExpected, values, valuesExpected)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
|
@ -88,18 +89,20 @@ var transformFuncs = map[string]transformFunc{
|
|||
"year": newTransformFuncDateTime(transformYear),
|
||||
|
||||
// New funcs
|
||||
"label_set": transformLabelSet,
|
||||
"label_map": transformLabelMap,
|
||||
"label_uppercase": transformLabelUppercase,
|
||||
"label_lowercase": transformLabelLowercase,
|
||||
"label_del": transformLabelDel,
|
||||
"label_keep": transformLabelKeep,
|
||||
"label_copy": transformLabelCopy,
|
||||
"label_move": transformLabelMove,
|
||||
"label_transform": transformLabelTransform,
|
||||
"label_value": transformLabelValue,
|
||||
"label_match": transformLabelMatch,
|
||||
"label_mismatch": transformLabelMismatch,
|
||||
"label_set": transformLabelSet,
|
||||
"label_map": transformLabelMap,
|
||||
"label_uppercase": transformLabelUppercase,
|
||||
"label_lowercase": transformLabelLowercase,
|
||||
"label_del": transformLabelDel,
|
||||
"label_keep": transformLabelKeep,
|
||||
"label_copy": transformLabelCopy,
|
||||
"label_move": transformLabelMove,
|
||||
"label_transform": transformLabelTransform,
|
||||
"label_value": transformLabelValue,
|
||||
"label_match": transformLabelMatch,
|
||||
"label_mismatch": transformLabelMismatch,
|
||||
"label_graphite_group": transformLabelGraphiteGroup,
|
||||
|
||||
"union": transformUnion,
|
||||
"": transformUnion, // empty func is a synonym to union
|
||||
"keep_last_value": transformKeepLastValue,
|
||||
|
@ -356,7 +359,10 @@ func transformBucketsLimit(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limit := int(limits[0])
|
||||
limit := 0
|
||||
if len(limits) > 0 {
|
||||
limit = int(limits[0])
|
||||
}
|
||||
if limit <= 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -1182,10 +1188,10 @@ func transformRangeQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(phis) == 0 {
|
||||
return nil, nil
|
||||
phi := float64(0)
|
||||
if len(phis) > 0 {
|
||||
phi = phis[0]
|
||||
}
|
||||
phi := phis[0]
|
||||
rvs := args[1]
|
||||
a := getFloat64s()
|
||||
values := a.A[:0]
|
||||
|
@ -1733,6 +1739,39 @@ func transformLabelMismatch(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
return rvs, nil
|
||||
}
|
||||
|
||||
func transformLabelGraphiteGroup(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("unexpected number of args: %d; want at least 2 args", len(args))
|
||||
}
|
||||
tss := args[0]
|
||||
groupArgs := args[1:]
|
||||
groupIDs := make([]int, len(groupArgs))
|
||||
for i, arg := range groupArgs {
|
||||
groupID, err := getIntNumber(arg, i+1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get group name from arg #%d: %w", i+1, err)
|
||||
}
|
||||
groupIDs[i] = groupID
|
||||
}
|
||||
for _, ts := range tss {
|
||||
groups := bytes.Split(ts.MetricName.MetricGroup, dotSeparator)
|
||||
groupName := ts.MetricName.MetricGroup[:0]
|
||||
for j, groupID := range groupIDs {
|
||||
if groupID >= 0 && groupID < len(groups) {
|
||||
groupName = append(groupName, groups[groupID]...)
|
||||
}
|
||||
if j < len(groupIDs)-1 {
|
||||
groupName = append(groupName, '.')
|
||||
}
|
||||
}
|
||||
ts.MetricName.MetricGroup = groupName
|
||||
}
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
var dotSeparator = []byte(".")
|
||||
|
||||
func transformLn(v float64) float64 {
|
||||
return math.Log(v)
|
||||
}
|
||||
|
@ -1972,7 +2011,9 @@ func newTransformRand(newRandFunc func(r *rand.Rand) func() float64) transformFu
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seed = int64(tmp[0])
|
||||
if len(tmp) > 0 {
|
||||
seed = int64(tmp[0])
|
||||
}
|
||||
} else {
|
||||
seed = time.Now().UnixNano()
|
||||
}
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.acc63211.chunk.css",
|
||||
"main.js": "./static/js/main.fe86f8ba.chunk.js",
|
||||
"runtime-main.js": "./static/js/runtime-main.b765a534.js",
|
||||
"static/css/2.a684aa27.chunk.css": "./static/css/2.a684aa27.chunk.css",
|
||||
"static/js/2.632b68e4.chunk.js": "./static/js/2.632b68e4.chunk.js",
|
||||
"static/js/3.daeccd9c.chunk.js": "./static/js/3.daeccd9c.chunk.js",
|
||||
"main.css": "./static/css/main.23671ff2.chunk.css",
|
||||
"main.js": "./static/js/main.3039190d.chunk.js",
|
||||
"runtime-main.js": "./static/js/runtime-main.3e786f5f.js",
|
||||
"static/css/2.81b2a0ac.chunk.css": "./static/css/2.81b2a0ac.chunk.css",
|
||||
"static/js/2.a835ef7b.chunk.js": "./static/js/2.a835ef7b.chunk.js",
|
||||
"static/js/3.b7e923a6.chunk.js": "./static/js/3.b7e923a6.chunk.js",
|
||||
"index.html": "./index.html",
|
||||
"static/js/2.632b68e4.chunk.js.LICENSE.txt": "./static/js/2.632b68e4.chunk.js.LICENSE.txt"
|
||||
"static/js/2.a835ef7b.chunk.js.LICENSE.txt": "./static/js/2.a835ef7b.chunk.js.LICENSE.txt"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/js/runtime-main.b765a534.js",
|
||||
"static/css/2.a684aa27.chunk.css",
|
||||
"static/js/2.632b68e4.chunk.js",
|
||||
"static/css/main.acc63211.chunk.css",
|
||||
"static/js/main.fe86f8ba.chunk.js"
|
||||
"static/js/runtime-main.3e786f5f.js",
|
||||
"static/css/2.81b2a0ac.chunk.css",
|
||||
"static/js/2.a835ef7b.chunk.js",
|
||||
"static/css/main.23671ff2.chunk.css",
|
||||
"static/js/main.3039190d.chunk.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.a684aa27.chunk.css" rel="stylesheet"><link href="./static/css/main.acc63211.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"daeccd9c"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.632b68e4.chunk.js"></script><script src="./static/js/main.fe86f8ba.chunk.js"></script></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.81b2a0ac.chunk.css" rel="stylesheet"><link href="./static/css/main.23671ff2.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"b7e923a6"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.a835ef7b.chunk.js"></script><script src="./static/js/main.3039190d.chunk.js"></script></body></html>
|
1
app/vmselect/vmui/static/css/2.81b2a0ac.chunk.css
Normal file
|
@ -0,0 +1 @@
|
|||
.uplot,.uplot *,.uplot :after,.uplot :before{box-sizing:border-box}.uplot{font-family:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";line-height:1.5;width:-webkit-min-content;width:min-content}.u-title{text-align:center;font-size:18px;font-weight:700}.u-wrap{position:relative;-webkit-user-select:none;-ms-user-select:none;user-select:none}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{display:block;position:relative;width:100%;height:100%}.u-legend{font-size:14px;margin:auto;text-align:center}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{vertical-align:middle;display:inline-block}.u-legend .u-marker{width:1em;height:1em;margin-right:4px;background-clip:padding-box!important}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{background:rgba(0,0,0,.07)}.u-cursor-x,.u-cursor-y,.u-select{position:absolute;pointer-events:none}.u-cursor-x,.u-cursor-y{left:0;top:0;will-change:transform;z-index:100}.u-hz .u-cursor-x,.u-vt .u-cursor-y{height:100%;border-right:1px dashed #607d8b}.u-hz .u-cursor-y,.u-vt .u-cursor-x{width:100%;border-bottom:1px dashed #607d8b}.u-cursor-pt{position:absolute;top:0;left:0;border-radius:50%;border:0 solid;pointer-events:none;will-change:transform;z-index:100;background-clip:padding-box!important}.u-cursor-pt.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-select.u-off{display:none}
|
|
@ -1 +0,0 @@
|
|||
.uplot,.uplot *,.uplot :after,.uplot :before{box-sizing:border-box}.uplot{font-family:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";line-height:1.5;width:-webkit-min-content;width:-moz-min-content;width:min-content}.u-title{text-align:center;font-size:18px;font-weight:700}.u-wrap{position:relative;-webkit-user-select:none;-ms-user-select:none;user-select:none}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{display:block;position:relative;width:100%;height:100%}.u-legend{font-size:14px;margin:auto;text-align:center}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{vertical-align:middle;display:inline-block}.u-legend .u-marker{width:1em;height:1em;margin-right:4px;background-clip:padding-box!important}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{background:rgba(0,0,0,.07)}.u-cursor-x,.u-cursor-y,.u-select{position:absolute;pointer-events:none}.u-cursor-x,.u-cursor-y{left:0;top:0;will-change:transform;z-index:100}.u-hz .u-cursor-x,.u-vt .u-cursor-y{height:100%;border-right:1px dashed #607d8b}.u-hz .u-cursor-y,.u-vt .u-cursor-x{width:100%;border-bottom:1px dashed #607d8b}.u-cursor-pt{position:absolute;top:0;left:0;border-radius:50%;border:0 solid;pointer-events:none;will-change:transform;z-index:100;background-clip:padding-box!important}.u-cursor-pt.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-select.u-off{display:none}
|
1
app/vmselect/vmui/static/css/main.23671ff2.chunk.css
Normal file
|
@ -0,0 +1 @@
|
|||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:10px 0!important}.cm-activeLine{background-color:inherit!important}.cm-editor{border-radius:4px;border:1px solid #b9b9b9;font-size:10px}.one-line-scroll .cm-editor{height:24px}.cm-gutters{border-radius:4px 0 0 4px;height:100%}.multi-line-scroll .cm-content,.multi-line-scroll .cm-gutters{min-height:51px!important}.one-line-scroll .cm-content,.one-line-scroll .cm-gutters{min-height:auto}.u-tooltip{position:absolute;display:none;grid-gap:12px;max-width:300px;padding:8px;border-radius:4px;background:rgba(57,57,57,.9);color:#fff;font-size:10px;line-height:1.4em;font-weight:500;word-wrap:break-word;font-family:monospace;pointer-events:none;z-index:100}.u-tooltip-data{display:flex;flex-wrap:wrap;align-items:center;font-size:11px;line-height:150%}.u-tooltip-data__value{padding:4px;font-weight:700}.u-tooltip__info{display:grid;grid-gap:4px}.u-tooltip__marker{width:12px;height:12px;margin-right:4px}.legendWrapper{margin-top:20px}.legendItem{display:inline-grid;grid-template-columns:auto auto;grid-gap:4px;align-items:center;justify-content:start;padding:5px 10px;background-color:#fff;cursor:pointer;transition:.2s ease}.legendItemHide{text-decoration:line-through;opacity:.5}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{width:12px;height:12px;border-width:2px;border-style:solid;box-sizing:border-box;transition:.2s ease}.legendLabel{font-size:12px;font-weight:600}
|
|
@ -1 +0,0 @@
|
|||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:10px 0!important}.cm-activeLine{background-color:inherit!important}.cm-wrap{border-radius:4px;border:1px solid #b9b9b9;font-size:10px}.one-line-scroll .cm-wrap{height:24px}.cm-content,.cm-gutter{min-height:51px}.one-line-scroll .cm-content,.one-line-scroll .cm-gutter{min-height:auto}.uplot .u-legend{display:grid;align-items:center;justify-content:start;text-align:left;margin-top:25px}.uplot .u-legend .u-series{font-size:12px}.u-tooltip{position:absolute;display:none;grid-gap:12px;max-width:300px;padding:8px;border-radius:4px;background:rgba(57,57,57,.9);color:#fff;font-size:10px;line-height:1.4em;font-weight:500;word-wrap:break-word;font-family:monospace;pointer-events:none;z-index:100}.u-tooltip-data{display:flex;flex-wrap:wrap;align-items:center;font-size:11px}.u-tooltip__info{display:grid;grid-gap:4px}.u-tooltip__marker{width:12px;height:12px;margin-right:4px}
|
2
app/vmselect/vmui/static/js/2.a835ef7b.chunk.js
Normal file
|
@ -4,21 +4,6 @@ object-assign
|
|||
@license MIT
|
||||
*/
|
||||
|
||||
/*! *****************************************************************************
|
||||
Copyright (c) Microsoft Corporation.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
||||
***************************************************************************** */
|
||||
|
||||
/*! @preserve
|
||||
* numeral.js
|
||||
* version : 2.0.6
|
||||
|
@ -63,7 +48,7 @@ PERFORMANCE OF THIS SOFTWARE.
|
|||
* @license MIT
|
||||
*/
|
||||
|
||||
/** @license React v0.20.1
|
||||
/** @license React v0.20.2
|
||||
* scheduler.production.min.js
|
||||
*
|
||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
||||
|
@ -81,7 +66,7 @@ PERFORMANCE OF THIS SOFTWARE.
|
|||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
/** @license React v17.0.1
|
||||
/** @license React v17.0.2
|
||||
* react-dom.production.min.js
|
||||
*
|
||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
||||
|
@ -90,7 +75,16 @@ PERFORMANCE OF THIS SOFTWARE.
|
|||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
/** @license React v17.0.1
|
||||
/** @license React v17.0.2
|
||||
* react-is.production.min.js
|
||||
*
|
||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
/** @license React v17.0.2
|
||||
* react-jsx-runtime.production.min.js
|
||||
*
|
||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
||||
|
@ -99,7 +93,7 @@ PERFORMANCE OF THIS SOFTWARE.
|
|||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
/** @license React v17.0.1
|
||||
/** @license React v17.0.2
|
||||
* react.production.min.js
|
||||
*
|
||||
* Copyright (c) Facebook, Inc. and its affiliates.
|
1
app/vmselect/vmui/static/js/3.b7e923a6.chunk.js
Normal file
|
@ -0,0 +1 @@
|
|||
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{288:function(e,t,n){"use strict";n.r(t),n.d(t,"getCLS",(function(){return y})),n.d(t,"getFCP",(function(){return g})),n.d(t,"getFID",(function(){return C})),n.d(t,"getLCP",(function(){return k})),n.d(t,"getTTFB",(function(){return D}));var i,r,a,o,u=function(e,t){return{name:e,value:void 0===t?-1:t,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},c=function(e,t){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var n=new PerformanceObserver((function(e){return e.getEntries().map(t)}));return n.observe({type:e,buffered:!0}),n}}catch(e){}},f=function(e,t){var n=function n(i){"pagehide"!==i.type&&"hidden"!==document.visibilityState||(e(i),t&&(removeEventListener("visibilitychange",n,!0),removeEventListener("pagehide",n,!0)))};addEventListener("visibilitychange",n,!0),addEventListener("pagehide",n,!0)},s=function(e){addEventListener("pageshow",(function(t){t.persisted&&e(t)}),!0)},m=function(e,t,n){var i;return function(r){t.value>=0&&(r||n)&&(t.delta=t.value-(i||0),(t.delta||void 0===i)&&(i=t.value,e(t)))}},v=-1,p=function(){return"hidden"===document.visibilityState?0:1/0},d=function(){f((function(e){var t=e.timeStamp;v=t}),!0)},l=function(){return v<0&&(v=p(),d(),s((function(){setTimeout((function(){v=p(),d()}),0)}))),{get firstHiddenTime(){return v}}},g=function(e,t){var n,i=l(),r=u("FCP"),a=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime<i.firstHiddenTime&&(r.value=e.startTime,r.entries.push(e),n(!0)))},o=window.performance&&performance.getEntriesByName&&performance.getEntriesByName("first-contentful-paint")[0],f=o?null:c("paint",a);(o||f)&&(n=m(e,r,t),o&&a(o),s((function(i){r=u("FCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,n(!0)}))}))})))},h=!1,T=-1,y=function(e,t){h||(g((function(e){T=e.value})),h=!0);var n,i=function(t){T>-1&&e(t)},r=u("CLS",0),a=0,o=[],v=function(e){if(!e.hadRecentInput){var t=o[0],i=o[o.length-1];a&&e.startTime-i.startTime<1e3&&e.startTime-t.startTime<5e3?(a+=e.value,o.push(e)):(a=e.value,o=[e]),a>r.value&&(r.value=a,r.entries=o,n())}},p=c("layout-shift",v);p&&(n=m(i,r,t),f((function(){p.takeRecords().map(v),n(!0)})),s((function(){a=0,T=-1,r=u("CLS",0),n=m(i,r,t)})))},E={passive:!0,capture:!0},w=new Date,L=function(e,t){i||(i=t,r=e,a=new Date,F(removeEventListener),S())},S=function(){if(r>=0&&r<a-w){var e={entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+r};o.forEach((function(t){t(e)})),o=[]}},b=function(e){if(e.cancelable){var t=(e.timeStamp>1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,t){var n=function(){L(e,t),r()},i=function(){r()},r=function(){removeEventListener("pointerup",n,E),removeEventListener("pointercancel",i,E)};addEventListener("pointerup",n,E),addEventListener("pointercancel",i,E)}(t,e):L(t,e)}},F=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(t){return e(t,b,E)}))},C=function(e,t){var n,a=l(),v=u("FID"),p=function(e){e.startTime<a.firstHiddenTime&&(v.value=e.processingStart-e.startTime,v.entries.push(e),n(!0))},d=c("first-input",p);n=m(e,v,t),d&&f((function(){d.takeRecords().map(p),d.disconnect()}),!0),d&&s((function(){var a;v=u("FID"),n=m(e,v,t),o=[],r=-1,i=null,F(addEventListener),a=p,o.push(a),S()}))},P={},k=function(e,t){var n,i=l(),r=u("LCP"),a=function(e){var t=e.startTime;t<i.firstHiddenTime&&(r.value=t,r.entries.push(e)),n()},o=c("largest-contentful-paint",a);if(o){n=m(e,r,t);var v=function(){P[r.id]||(o.takeRecords().map(a),o.disconnect(),P[r.id]=!0,n(!0))};["keydown","click"].forEach((function(e){addEventListener(e,v,{once:!0,capture:!0})})),f(v,!0),s((function(i){r=u("LCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,P[r.id]=!0,n(!0)}))}))}))}},D=function(e){var t,n=u("TTFB");t=function(){try{var t=performance.getEntriesByType("navigation")[0]||function(){var e=performance.timing,t={entryType:"navigation",startTime:0};for(var n in e)"navigationStart"!==n&&"toJSON"!==n&&(t[n]=Math.max(e[n]-e.navigationStart,0));return t}();if(n.value=n.delta=t.responseStart,n.value<0||n.value>performance.now())return;n.entries=[t],e(n)}catch(e){}},"complete"===document.readyState?setTimeout(t,0):addEventListener("pageshow",t)}}}]);
|
|
@ -1 +0,0 @@
|
|||
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{266:function(t,n,e){"use strict";e.r(n),e.d(n,"getCLS",(function(){return l})),e.d(n,"getFCP",(function(){return g})),e.d(n,"getFID",(function(){return h})),e.d(n,"getLCP",(function(){return y})),e.d(n,"getTTFB",(function(){return F}));var i,a,r=function(){return"".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)},o=function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:-1;return{name:t,value:n,delta:0,entries:[],id:r(),isFinal:!1}},u=function(t,n){try{if(PerformanceObserver.supportedEntryTypes.includes(t)){var e=new PerformanceObserver((function(t){return t.getEntries().map(n)}));return e.observe({type:t,buffered:!0}),e}}catch(t){}},s=!1,c=!1,d=function(t){s=!t.persisted},f=function(){addEventListener("pagehide",d),addEventListener("beforeunload",(function(){}))},p=function(t){var n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];c||(f(),c=!0),addEventListener("visibilitychange",(function(n){var e=n.timeStamp;"hidden"===document.visibilityState&&t({timeStamp:e,isUnloading:s})}),{capture:!0,once:n})},v=function(t,n,e,i){var a;return function(){e&&n.isFinal&&e.disconnect(),n.value>=0&&(i||n.isFinal||"hidden"===document.visibilityState)&&(n.delta=n.value-(a||0),(n.delta||n.isFinal||void 0===a)&&(t(n),a=n.value))}},l=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("CLS",0),a=function(t){t.hadRecentInput||(i.value+=t.value,i.entries.push(t),n())},r=u("layout-shift",a);r&&(n=v(t,i,r,e),p((function(t){var e=t.isUnloading;r.takeRecords().map(a),e&&(i.isFinal=!0),n()})))},m=function(){return void 0===i&&(i="hidden"===document.visibilityState?0:1/0,p((function(t){var n=t.timeStamp;return i=n}),!0)),{get timeStamp(){return i}}},g=function(t){var n,e=o("FCP"),i=m(),a=u("paint",(function(t){"first-contentful-paint"===t.name&&t.startTime<i.timeStamp&&(e.value=t.startTime,e.isFinal=!0,e.entries.push(t),n())}));a&&(n=v(t,e,a))},h=function(t){var n=o("FID"),e=m(),i=function(t){t.startTime<e.timeStamp&&(n.value=t.processingStart-t.startTime,n.entries.push(t),n.isFinal=!0,r())},a=u("first-input",i),r=v(t,n,a);a?p((function(){a.takeRecords().map(i),a.disconnect()}),!0):window.perfMetrics&&window.perfMetrics.onFirstInputDelay&&window.perfMetrics.onFirstInputDelay((function(t,i){i.timeStamp<e.timeStamp&&(n.value=t,n.isFinal=!0,n.entries=[{entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+t}],r())}))},S=function(){return a||(a=new Promise((function(t){return["scroll","keydown","pointerdown"].map((function(n){addEventListener(n,t,{once:!0,passive:!0,capture:!0})}))}))),a},y=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("LCP"),a=m(),r=function(t){var e=t.startTime;e<a.timeStamp?(i.value=e,i.entries.push(t)):i.isFinal=!0,n()},s=u("largest-contentful-paint",r);if(s){n=v(t,i,s,e);var c=function(){i.isFinal||(s.takeRecords().map(r),i.isFinal=!0,n())};S().then(c),p(c,!0)}},F=function(t){var n,e=o("TTFB");n=function(){try{var n=performance.getEntriesByType("navigation")[0]||function(){var t=performance.timing,n={entryType:"navigation",startTime:0};for(var e in t)"navigationStart"!==e&&"toJSON"!==e&&(n[e]=Math.max(t[e]-t.navigationStart,0));return n}();e.value=e.delta=n.responseStart,e.entries=[n],e.isFinal=!0,t(e)}catch(t){}},"complete"===document.readyState?setTimeout(n,0):addEventListener("pageshow",n)}}}]);
|
1
app/vmselect/vmui/static/js/main.3039190d.chunk.js
Normal file
|
@ -1 +1 @@
|
|||
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"daeccd9c"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);
|
||||
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"b7e923a6"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);
|
16259
app/vmui/packages/vmui/package-lock.json
generated
|
@ -4,39 +4,43 @@
|
|||
"private": true,
|
||||
"homepage": "./",
|
||||
"dependencies": {
|
||||
"@codemirror/next": "~0.13.1",
|
||||
"@codemirror/autocomplete": "^0.19.4",
|
||||
"@codemirror/basic-setup": "^0.19.0",
|
||||
"@codemirror/commands": "^0.19.5",
|
||||
"@codemirror/highlight": "^0.19.6",
|
||||
"@codemirror/state": "^0.19.2",
|
||||
"@codemirror/view": "^0.19.9",
|
||||
"@date-io/dayjs": "^1.3.13",
|
||||
"@material-ui/core": "^4.11.0",
|
||||
"@material-ui/core": "^4.12.3",
|
||||
"@material-ui/icons": "^4.11.2",
|
||||
"@material-ui/lab": "^4.0.0-alpha.56",
|
||||
"@material-ui/lab": "^4.0.0-alpha.60",
|
||||
"@material-ui/pickers": "^3.3.10",
|
||||
"@testing-library/jest-dom": "^5.11.6",
|
||||
"@testing-library/react": "^11.1.2",
|
||||
"@testing-library/user-event": "^12.2.2",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@testing-library/jest-dom": "^5.14.1",
|
||||
"@testing-library/react": "^12.1.2",
|
||||
"@testing-library/user-event": "^13.5.0",
|
||||
"@types/jest": "^27.0.2",
|
||||
"@types/lodash.debounce": "^4.0.6",
|
||||
"@types/lodash.get": "^4.4.6",
|
||||
"@types/node": "^12.19.4",
|
||||
"@types/node": "^16.11.6",
|
||||
"@types/numeral": "^2.0.2",
|
||||
"@types/qs": "^6.9.6",
|
||||
"@types/react": "^16.9.56",
|
||||
"@types/react-dom": "^16.9.9",
|
||||
"@types/react-measure": "^2.0.6",
|
||||
"codemirror-promql": "^0.10.2",
|
||||
"date-fns": "^2.23.0",
|
||||
"dayjs": "^1.10.4",
|
||||
"@types/qs": "^6.9.7",
|
||||
"@types/react": "^17.0.33",
|
||||
"@types/react-dom": "^17.0.10",
|
||||
"@types/react-measure": "^2.0.8",
|
||||
"codemirror-promql": "^0.18.0",
|
||||
"dayjs": "^1.10.7",
|
||||
"lodash.debounce": "^4.0.8",
|
||||
"lodash.get": "^4.4.2",
|
||||
"numeral": "^2.0.6",
|
||||
"qs": "^6.5.2",
|
||||
"react": "^17.0.1",
|
||||
"react-dom": "^17.0.1",
|
||||
"qs": "^6.10.1",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
"react-measure": "^2.5.2",
|
||||
"react-scripts": "4.0.0",
|
||||
"typescript": "~4.0.5",
|
||||
"react-scripts": "4.0.3",
|
||||
"typescript": "~4.4.4",
|
||||
"uplot": "^1.6.16",
|
||||
"uplot-react": "^1.1.1",
|
||||
"web-vitals": "^0.2.4"
|
||||
"web-vitals": "^2.1.2"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "react-app-rewired start",
|
||||
|
@ -65,12 +69,11 @@
|
|||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.14.5",
|
||||
"@typescript-eslint/eslint-plugin": "^4.14.2",
|
||||
"@typescript-eslint/parser": "^4.14.2",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.3.0",
|
||||
"@typescript-eslint/parser": "^5.2.0",
|
||||
"customize-cra": "^1.0.0",
|
||||
"eslint": "^7.14.0",
|
||||
"eslint-plugin-react": "^7.22.0",
|
||||
"eslint-plugin-react": "^7.26.1",
|
||||
"react-app-rewired": "^2.1.8"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,8 @@ import HomeLayout from "./components/Home/HomeLayout";
|
|||
import {StateProvider} from "./state/common/StateContext";
|
||||
import {AuthStateProvider} from "./state/auth/AuthStateContext";
|
||||
import {GraphStateProvider} from "./state/graph/GraphStateContext";
|
||||
import {createMuiTheme, MuiThemeProvider} from "@material-ui/core";
|
||||
import {MuiThemeProvider} from "@material-ui/core";
|
||||
import {createTheme} from "@material-ui/core/styles";
|
||||
|
||||
import CssBaseline from "@material-ui/core/CssBaseline";
|
||||
|
||||
|
@ -14,7 +15,7 @@ import DayJsUtils from "@date-io/dayjs";
|
|||
|
||||
const App: FC = () => {
|
||||
|
||||
const THEME = createMuiTheme({
|
||||
const THEME = createTheme({
|
||||
typography: {
|
||||
"fontSize": 10
|
||||
}
|
||||
|
|
|
@ -182,7 +182,7 @@ export const AuthDialog: React.FC<DialogProps> = (props) => {
|
|||
InputProps={{
|
||||
onPaste: onBearerPaste
|
||||
}}
|
||||
rowsMax={6}
|
||||
maxRows={6}
|
||||
/>
|
||||
</TabPanel>
|
||||
</Box>
|
||||
|
|
|
@ -47,8 +47,8 @@ const QueryConfigurator: FC = () => {
|
|||
|
||||
const onRunQuery = () => {
|
||||
const { values } = queryHistory;
|
||||
if (query === values[values.length - 1]) return;
|
||||
dispatch({type: "RUN_QUERY"});
|
||||
if (query === values[values.length - 1]) return;
|
||||
dispatch({type: "SET_QUERY_HISTORY_INDEX", payload: values.length});
|
||||
dispatch({type: "SET_QUERY_HISTORY_VALUES", payload: [...values, query]});
|
||||
};
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import {EditorState} from "@codemirror/next/state";
|
||||
import {EditorView, keymap} from "@codemirror/next/view";
|
||||
import {defaultKeymap} from "@codemirror/next/commands";
|
||||
import {EditorState} from "@codemirror/state";
|
||||
import {EditorView, keymap} from "@codemirror/view";
|
||||
import {defaultKeymap} from "@codemirror/commands";
|
||||
import React, {FC, useEffect, useRef, useState} from "react";
|
||||
import { PromQLExtension } from "codemirror-promql";
|
||||
import { basicSetup } from "@codemirror/next/basic-setup";
|
||||
import { basicSetup } from "@codemirror/basic-setup";
|
||||
import {QueryHistory} from "../../../state/common/reducer";
|
||||
|
||||
export interface QueryEditorProps {
|
||||
|
@ -41,7 +41,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
useEffect(() => {
|
||||
const promQL = new PromQLExtension();
|
||||
promQL.activateCompletion(autocomplete);
|
||||
promQL.setComplete({url: server});
|
||||
promQL.setComplete({ remote: { url: server } });
|
||||
|
||||
const listenerExtension = EditorView.updateListener.of(editorUpdate => {
|
||||
if (editorUpdate.docChanged) {
|
||||
|
@ -53,7 +53,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
doc: query,
|
||||
extensions: [
|
||||
basicSetup,
|
||||
keymap(defaultKeymap),
|
||||
keymap.of(defaultKeymap),
|
||||
listenerExtension,
|
||||
promQL.asExtension(),
|
||||
]
|
||||
|
@ -75,7 +75,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
return (
|
||||
<>
|
||||
{/*Class one-line-scroll and other codemirror styles are declared in index.css*/}
|
||||
<div ref={ref} className={oneLiner ? "one-line-scroll" : undefined} onKeyUp={onKeyUp}/>
|
||||
<div ref={ref} className={oneLiner ? "one-line-scroll" : "multi-line-scroll"} onKeyUp={onKeyUp}/>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
|
|
@ -5,6 +5,7 @@ import {InstantMetricResult, MetricResult} from "../../../api/types";
|
|||
import {saveToStorage} from "../../../utils/storage";
|
||||
import {isValidHttpUrl} from "../../../utils/url";
|
||||
import {useAuthState} from "../../../state/auth/AuthStateContext";
|
||||
import {TimeParams} from "../../../types";
|
||||
|
||||
export const useFetchQuery = (): {
|
||||
fetchUrl?: string,
|
||||
|
@ -21,6 +22,7 @@ export const useFetchQuery = (): {
|
|||
const [graphData, setGraphData] = useState<MetricResult[]>();
|
||||
const [liveData, setLiveData] = useState<InstantMetricResult[]>();
|
||||
const [error, setError] = useState<string>();
|
||||
const [prevPeriod, setPrevPeriod] = useState<TimeParams>();
|
||||
|
||||
useEffect(() => {
|
||||
if (error) {
|
||||
|
@ -29,62 +31,78 @@ export const useFetchQuery = (): {
|
|||
}
|
||||
}, [error]);
|
||||
|
||||
const fetchUrl = useMemo(() => {
|
||||
if (period) {
|
||||
if (!serverUrl) {
|
||||
setError("Please enter Server URL");
|
||||
return;
|
||||
}
|
||||
if (!query.trim()) {
|
||||
setError("Please enter a valid Query and execute it");
|
||||
return;
|
||||
}
|
||||
if (isValidHttpUrl(serverUrl)) {
|
||||
const duration = (period.end - period.start)/2;
|
||||
const doublePeriod = {...period, start: period.start - duration, end: period.end + duration};
|
||||
return displayType === "chart"
|
||||
? getQueryRangeUrl(serverUrl, query, doublePeriod, nocache)
|
||||
: getQueryUrl(serverUrl, query, period);
|
||||
const needUpdateData = useMemo(() => {
|
||||
if (!prevPeriod) return true;
|
||||
const duration = (prevPeriod.end - prevPeriod.start) / 3;
|
||||
const factorLimit = duration / (period.end - period.start) >= 0.7;
|
||||
const maxLimit = period.end > (prevPeriod.end + duration);
|
||||
const minLimit = period.start < (prevPeriod.start - duration);
|
||||
return factorLimit || maxLimit || minLimit;
|
||||
}, [period]);
|
||||
|
||||
const fetchData = async () => {
|
||||
if (!fetchUrl) return;
|
||||
setIsLoading(true);
|
||||
setPrevPeriod(period);
|
||||
|
||||
const headers = new Headers();
|
||||
if (authMethod === "BASIC_AUTH") {
|
||||
headers.set("Authorization", "Basic " + btoa(`${basicData?.login || ""}:${basicData?.password || ""}`));
|
||||
}
|
||||
if (authMethod === "BEARER_AUTH") {
|
||||
headers.set("Authorization", bearerData?.token || "");
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(fetchUrl, { headers });
|
||||
if (response.ok) {
|
||||
saveToStorage("LAST_QUERY", query);
|
||||
const resp = await response.json();
|
||||
setError(undefined);
|
||||
displayType === "chart" ? setGraphData(resp.data.result) : setLiveData(resp.data.result);
|
||||
} else {
|
||||
setError("Please provide a valid URL");
|
||||
setError((await response.json())?.error);
|
||||
}
|
||||
} catch (e) {
|
||||
if (e instanceof Error) setError(e.message);
|
||||
}
|
||||
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
||||
const fetchUrl = useMemo(() => {
|
||||
if (!period) return;
|
||||
if (!serverUrl) {
|
||||
setError("Please enter Server URL");
|
||||
} else if (!query.trim()) {
|
||||
setError("Please enter a valid Query and execute it");
|
||||
} else if (isValidHttpUrl(serverUrl)) {
|
||||
const duration = (period.end - period.start) / 2;
|
||||
const bufferPeriod = {...period, start: period.start - duration, end: period.end + duration};
|
||||
return displayType === "chart"
|
||||
? getQueryRangeUrl(serverUrl, query, bufferPeriod, nocache)
|
||||
: getQueryUrl(serverUrl, query, period);
|
||||
} else {
|
||||
setError("Please provide a valid URL");
|
||||
}
|
||||
},
|
||||
[serverUrl, period, displayType]);
|
||||
|
||||
useEffect(() => {
|
||||
setPrevPeriod(undefined);
|
||||
}, [query]);
|
||||
|
||||
// TODO: this should depend on query as well, but need to decide when to do the request.
|
||||
// Doing it on each query change - looks to be a bad idea. Probably can be done on blur
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
if (fetchUrl) {
|
||||
const headers = new Headers();
|
||||
if (authMethod === "BASIC_AUTH") {
|
||||
headers.set("Authorization", "Basic " + btoa(`${basicData?.login || ""}:${basicData?.password || ""}`));
|
||||
}
|
||||
if (authMethod === "BEARER_AUTH") {
|
||||
headers.set("Authorization", bearerData?.token || "");
|
||||
}
|
||||
setIsLoading(true);
|
||||
try {
|
||||
const response = await fetch(fetchUrl, {
|
||||
headers
|
||||
});
|
||||
if (response.ok) {
|
||||
saveToStorage("LAST_QUERY", query);
|
||||
const resp = await response.json();
|
||||
setError(undefined);
|
||||
displayType === "chart" ? setGraphData(resp.data.result) : setLiveData(resp.data.result);
|
||||
} else {
|
||||
setError((await response.json())?.error);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
setError(e.message);
|
||||
}
|
||||
setIsLoading(false);
|
||||
}
|
||||
})();
|
||||
}, [fetchUrl, serverUrl, displayType]);
|
||||
fetchData();
|
||||
}, [serverUrl, displayType]);
|
||||
|
||||
useEffect(() => {
|
||||
if (needUpdateData) {
|
||||
fetchData();
|
||||
}
|
||||
}, [period]);
|
||||
|
||||
return {
|
||||
fetchUrl,
|
||||
|
|
|
@ -1,15 +1,114 @@
|
|||
import React, {FC} from "react";
|
||||
import React, {FC, useEffect, useState} from "react";
|
||||
import {MetricResult} from "../../../api/types";
|
||||
import LineChart from "../../LineChart/LineChart";
|
||||
import {AlignedData as uPlotData, Series as uPlotSeries} from "uplot";
|
||||
import {Legend, LegendItem} from "../../Legend/Legend";
|
||||
import {useAppState} from "../../../state/common/StateContext";
|
||||
import {getNameForMetric} from "../../../utils/metric";
|
||||
import {getColorFromString} from "../../../utils/color";
|
||||
import {useGraphDispatch, useGraphState} from "../../../state/graph/GraphStateContext";
|
||||
import {getHideSeries} from "../../../utils/uPlot";
|
||||
|
||||
export interface GraphViewProps {
|
||||
data?: MetricResult[];
|
||||
}
|
||||
|
||||
const GraphView: FC<GraphViewProps> = ({data = []}) => {
|
||||
const {time: {period}} = useAppState();
|
||||
|
||||
const { yaxis } = useGraphState();
|
||||
const graphDispatch = useGraphDispatch();
|
||||
|
||||
const [timeArray, setTimeArray] = useState<number[]>([]);
|
||||
const [dataChart, setDataChart] = useState<uPlotData>([[]]);
|
||||
const [series, setSeries] = useState<uPlotSeries[]>([]);
|
||||
const [legend, setLegend] = useState<LegendItem[]>([]);
|
||||
const [hideSeries, setHideSeries] = useState<string[]>([]);
|
||||
|
||||
const setTimes = (times: number[]) => {
|
||||
const allTimes = times.sort((a,b) => a-b);
|
||||
const startTime = allTimes[0] || 0;
|
||||
const endTime = allTimes[allTimes.length - 1] || 0;
|
||||
const step = period.step || 1;
|
||||
const length = Math.round((endTime - startTime) / step);
|
||||
setTimeArray(new Array(length).fill(0).map((d, i) => startTime + (step * i)));
|
||||
};
|
||||
|
||||
const setLimitsYaxis = (values: number[]) => {
|
||||
if (!yaxis.limits.enable || (yaxis.limits.range.every(item => !item))) {
|
||||
const allValues = values.flat().sort((a,b) => a-b);
|
||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: [allValues[0], allValues[allValues.length - 1]]});
|
||||
}
|
||||
};
|
||||
|
||||
const getSeriesItem = (d: MetricResult) => {
|
||||
const label = getNameForMetric(d);
|
||||
return {
|
||||
label,
|
||||
width: 1.5,
|
||||
stroke: getColorFromString(label),
|
||||
show: !hideSeries.includes(label)
|
||||
};
|
||||
};
|
||||
|
||||
const getLegendItem = (s: uPlotSeries): LegendItem => ({
|
||||
label: s.label || "",
|
||||
color: s.stroke as string,
|
||||
checked: s.show || false
|
||||
});
|
||||
|
||||
const onChangeLegend = (label: string, metaKey: boolean) => {
|
||||
setHideSeries(getHideSeries({hideSeries, label, metaKey, series}));
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const tempTimes: number[] = [];
|
||||
const tempValues: number[] = [];
|
||||
const tempLegend: LegendItem[] = [];
|
||||
const tempSeries: uPlotSeries[] = [];
|
||||
|
||||
data?.forEach(d => {
|
||||
const seriesItem = getSeriesItem(d);
|
||||
tempSeries.push(seriesItem);
|
||||
tempLegend.push(getLegendItem(seriesItem));
|
||||
|
||||
d.values.forEach(v => {
|
||||
if (tempTimes.indexOf(v[0]) === -1) tempTimes.push(v[0]);
|
||||
tempValues.push(+v[1]);
|
||||
});
|
||||
});
|
||||
|
||||
setTimes(tempTimes);
|
||||
setLimitsYaxis(tempValues);
|
||||
setSeries([{}, ...tempSeries]);
|
||||
setLegend(tempLegend);
|
||||
}, [data]);
|
||||
|
||||
useEffect(() => {
|
||||
const tempLegend: LegendItem[] = [];
|
||||
const tempSeries: uPlotSeries[] = [];
|
||||
data?.forEach(d => {
|
||||
const seriesItem = getSeriesItem(d);
|
||||
tempSeries.push(seriesItem);
|
||||
tempLegend.push(getLegendItem(seriesItem));
|
||||
});
|
||||
setSeries([{}, ...tempSeries]);
|
||||
setLegend(tempLegend);
|
||||
}, [hideSeries]);
|
||||
|
||||
useEffect(() => {
|
||||
setDataChart([timeArray, ...data.map(d => timeArray.map(t => {
|
||||
const v = d.values.find(v => v[0] === t);
|
||||
return v ? +v[1] : null;
|
||||
}))]);
|
||||
}, [timeArray]);
|
||||
|
||||
return <>
|
||||
{(data.length > 0)
|
||||
? <LineChart data={data} />
|
||||
? <div>
|
||||
<LineChart data={dataChart} series={series} metrics={data}/>
|
||||
<Legend labels={legend} onChange={onChangeLegend}/>
|
||||
</div>
|
||||
: <div style={{textAlign: "center"}}>No data to show</div>}
|
||||
</>;
|
||||
};
|
||||
|
|
|
@ -1,67 +1,31 @@
|
|||
import React, {FC, useMemo} from "react";
|
||||
import {Checkbox, FormControlLabel, Typography} from "@material-ui/core";
|
||||
import {MetricCategory} from "../../hooks/useSortedCategories";
|
||||
import {makeStyles} from "@material-ui/core/styles";
|
||||
import React, {FC} from "react";
|
||||
import {hexToRGB} from "../../utils/color";
|
||||
import "./legend.css";
|
||||
|
||||
export interface LegendItem {
|
||||
seriesName: string;
|
||||
labelData: {[key: string]: string};
|
||||
label: string;
|
||||
color: string;
|
||||
checked: boolean;
|
||||
}
|
||||
|
||||
export interface LegendProps {
|
||||
labels: LegendItem[];
|
||||
categories: MetricCategory[];
|
||||
onChange: (index: number) => void;
|
||||
onChange: (legend: string, metaKey: boolean) => void;
|
||||
}
|
||||
|
||||
const useStyles = makeStyles({
|
||||
legendWrapper: {
|
||||
display: "grid",
|
||||
width: "100%",
|
||||
gridTemplateColumns: "repeat(auto-fit)", // experiments like repeat(auto-fit, minmax(200px , auto)) may reduce size but readability as well
|
||||
gridColumnGap: ".5em",
|
||||
paddingLeft: "8px"
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
export const Legend: FC<LegendProps> = ({labels, onChange, categories}) => {
|
||||
const classes = useStyles();
|
||||
|
||||
const commonLabels = useMemo(() => labels.length > 0
|
||||
? categories
|
||||
.filter(c => c.variations === 1)
|
||||
.map(c => `${c.key}: ${labels[0].labelData[c.key]}`)
|
||||
: [], [categories, labels]);
|
||||
|
||||
const uncommonLabels = useMemo(() => categories.filter(c => c.variations !== 1).map(c => c.key), [categories]);
|
||||
|
||||
return <div>
|
||||
<div style={{textAlign: "center"}}>{`Legend for ${commonLabels.join(", ")}`}</div>
|
||||
<div className={classes.legendWrapper}>
|
||||
{labels.map((legendItem: LegendItem, index) =>
|
||||
<div key={legendItem.seriesName}>
|
||||
<FormControlLabel
|
||||
control={
|
||||
<Checkbox
|
||||
size="small"
|
||||
checked={legendItem.checked}
|
||||
onChange={() => {
|
||||
onChange(index);
|
||||
}}
|
||||
style={{
|
||||
color: legendItem.color,
|
||||
padding: "4px"
|
||||
}}
|
||||
/>
|
||||
}
|
||||
label={<Typography variant="body2">{uncommonLabels.map(l => `${l}: ${legendItem.labelData[l]}`).join(", ")}</Typography>}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
export const Legend: FC<LegendProps> = ({labels, onChange}) => {
|
||||
return <div className="legendWrapper">
|
||||
{labels.map((legendItem: LegendItem) =>
|
||||
<div className={legendItem.checked ? "legendItem" : "legendItem legendItemHide"}
|
||||
key={legendItem.label}
|
||||
onClick={(e) => onChange(legendItem.label, e.ctrlKey || e.metaKey)}>
|
||||
<div className="legendMarker"
|
||||
style={{
|
||||
borderColor: legendItem.color,
|
||||
backgroundColor: `rgba(${hexToRGB(legendItem.color)}, 0.1)`
|
||||
}}/>
|
||||
<div className="legendLabel">{legendItem.checked} {legendItem.label}</div>
|
||||
</div>
|
||||
)}
|
||||
</div>;
|
||||
};
|
38
app/vmui/packages/vmui/src/components/Legend/legend.css
Normal file
|
@ -0,0 +1,38 @@
|
|||
.legendWrapper {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.legendItem {
|
||||
display: inline-grid;
|
||||
grid-template-columns: auto auto;
|
||||
grid-gap: 4px;
|
||||
align-items: center;
|
||||
justify-content: start;
|
||||
padding: 5px 10px;
|
||||
background-color: #FFF;
|
||||
cursor: pointer;
|
||||
transition: 0.2s ease;
|
||||
}
|
||||
|
||||
.legendItemHide {
|
||||
text-decoration: line-through;
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.legendItem:hover {
|
||||
background-color: rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.legendMarker {
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
border-width: 2px;
|
||||
border-style: solid;
|
||||
box-sizing: border-box;
|
||||
transition: 0.2s ease;
|
||||
}
|
||||
|
||||
.legendLabel {
|
||||
font-size: 12px;
|
||||
font-weight: 600;
|
||||
}
|
|
@ -1,44 +1,29 @@
|
|||
import React, {FC, useEffect, useMemo, useRef, useState} from "react";
|
||||
import React, {FC, useRef, useState} from "react";
|
||||
import {useAppDispatch, useAppState} from "../../state/common/StateContext";
|
||||
import {GraphViewProps} from "../Home/Views/GraphView";
|
||||
import uPlot, {AlignedData as uPlotData, Options as uPlotOptions, Series as uPlotSeries} from "uplot";
|
||||
import UplotReact from "uplot-react";
|
||||
import "uplot/dist/uPlot.min.css";
|
||||
import numeral from "numeral";
|
||||
import "./legend.css";
|
||||
import "./tooltip.css";
|
||||
import {useGraphDispatch, useGraphState} from "../../state/graph/GraphStateContext";
|
||||
import {getDataChart, getLimitsTimes, getLimitsYaxis, getSeries, setTooltip} from "../../utils/uPlot";
|
||||
import {useGraphState} from "../../state/graph/GraphStateContext";
|
||||
import {setTooltip } from "../../utils/uPlot";
|
||||
import {MetricResult} from "../../api/types";
|
||||
|
||||
const LineChart: FC<GraphViewProps> = ({data = []}) => {
|
||||
export interface LineChartProps {
|
||||
metrics: MetricResult[]
|
||||
data: uPlotData;
|
||||
series: uPlotSeries[]
|
||||
}
|
||||
|
||||
const LineChart: FC<LineChartProps> = ({data, series, metrics = []}) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const {time: {period}} = useAppState();
|
||||
const [scale, setScale] = useState({min: period.start, max: period.end});
|
||||
const { yaxis } = useGraphState();
|
||||
const refContainer = useRef<HTMLDivElement>(null);
|
||||
const [isPanning, setIsPanning] = useState(false);
|
||||
const [zoomPos, setZoomPos] = useState(0);
|
||||
const tooltipIdx = {seriesIdx: 1, dataIdx: 0};
|
||||
const tooltipOffset = {left: 0, top: 0};
|
||||
|
||||
const {yaxis} = useGraphState();
|
||||
const graphDispatch = useGraphDispatch();
|
||||
const setStateLimits = (range: [number, number]) => {
|
||||
if (!yaxis.limits.enable || (yaxis.limits.range.every(item => !item))) {
|
||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: range});
|
||||
}
|
||||
};
|
||||
|
||||
const times = useMemo(() => {
|
||||
const [start, end] = getLimitsTimes(data);
|
||||
const output = [];
|
||||
for (let i = start; i < end; i += period.step || 1) { output.push(i); }
|
||||
return output;
|
||||
}, [data]);
|
||||
|
||||
const series = useMemo((): uPlotSeries[] => getSeries(data), [data]);
|
||||
|
||||
const dataChart = useMemo((): uPlotData => getDataChart(data, times), [data]);
|
||||
const tooltipIdx = { seriesIdx: 1, dataIdx: 0 };
|
||||
const tooltipOffset = { left: 0, top: 0 };
|
||||
|
||||
const tooltip = document.createElement("div");
|
||||
tooltip.className = "u-tooltip";
|
||||
|
@ -48,14 +33,12 @@ const LineChart: FC<GraphViewProps> = ({data = []}) => {
|
|||
tooltipOffset.left = parseFloat(u.over.style.left);
|
||||
tooltipOffset.top = parseFloat(u.over.style.top);
|
||||
u.root.querySelector(".u-wrap")?.appendChild(tooltip);
|
||||
|
||||
// wheel drag pan
|
||||
u.over.addEventListener("mousedown", e => {
|
||||
if (e.button !== 0) return;
|
||||
setIsPanning(true);
|
||||
e.preventDefault();
|
||||
const left0 = e.clientX;
|
||||
|
||||
const onmove = (e: MouseEvent) => {
|
||||
e.preventDefault();
|
||||
const dx = (u.posToVal(1, "x") - u.posToVal(0, "x")) * (e.clientX - left0);
|
||||
|
@ -64,17 +47,14 @@ const LineChart: FC<GraphViewProps> = ({data = []}) => {
|
|||
u.setScale("x", {min, max});
|
||||
setScale({min, max});
|
||||
};
|
||||
|
||||
const onup = () => {
|
||||
setIsPanning(false);
|
||||
document.removeEventListener("mousemove", onmove);
|
||||
document.removeEventListener("mouseup", onup);
|
||||
};
|
||||
|
||||
document.addEventListener("mousemove", onmove);
|
||||
document.addEventListener("mouseup", onup);
|
||||
});
|
||||
|
||||
// wheel scroll zoom
|
||||
u.over.addEventListener("wheel", e => {
|
||||
if (!e.ctrlKey && !e.metaKey) return;
|
||||
|
@ -97,7 +77,7 @@ const LineChart: FC<GraphViewProps> = ({data = []}) => {
|
|||
if (tooltipIdx.dataIdx === u.cursor.idx) return;
|
||||
tooltipIdx.dataIdx = u.cursor.idx || 0;
|
||||
if (tooltipIdx.seriesIdx && tooltipIdx.dataIdx) {
|
||||
setTooltip({u, tooltipIdx, data, series, tooltip, tooltipOffset});
|
||||
setTooltip({u, tooltipIdx, metrics, series, tooltip, tooltipOffset});
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -105,44 +85,47 @@ const LineChart: FC<GraphViewProps> = ({data = []}) => {
|
|||
if (tooltipIdx.seriesIdx === sidx) return;
|
||||
tooltipIdx.seriesIdx = sidx || 0;
|
||||
sidx && tooltipIdx.dataIdx
|
||||
? setTooltip({u, tooltipIdx, data, series, tooltip, tooltipOffset})
|
||||
? setTooltip({u, tooltipIdx, metrics, series, tooltip, tooltipOffset})
|
||||
: tooltip.style.display = "none";
|
||||
};
|
||||
|
||||
useEffect(() => { setStateLimits(getLimitsYaxis(data)); }, [data]);
|
||||
|
||||
useEffect(() => { setScale({min: period.start, max: period.end}); }, [period]);
|
||||
|
||||
useEffect(() => {
|
||||
const duration = (period.end - period.start)/3;
|
||||
const factor = duration / (scale.max - scale.min);
|
||||
if (scale.max > period.end + duration || scale.min < period.start - duration || factor >= 0.7) {
|
||||
dispatch({type: "SET_PERIOD", payload: {from: new Date(scale.min * 1000), to: new Date(scale.max * 1000)}});
|
||||
}
|
||||
}, [scale]);
|
||||
const setScale = ({min, max}: {min: number, max: number}): void => {
|
||||
dispatch({type: "SET_PERIOD", payload: {from: new Date(min * 1000), to: new Date(max * 1000)}});
|
||||
};
|
||||
|
||||
const options: uPlotOptions = {
|
||||
width: refContainer.current ? refContainer.current.offsetWidth : 400,
|
||||
height: 500,
|
||||
series: series,
|
||||
plugins: [{hooks: {ready: onReadyChart, setCursor, setSeries: seriesFocus}}],
|
||||
cursor: {drag: {x: false, y: false}, focus: {prox: 30}},
|
||||
axes: [
|
||||
{space: 80},
|
||||
{
|
||||
show: true,
|
||||
font: "10px Arial",
|
||||
values: (self, ticks) => ticks.map(n => n > 1000 ? numeral(n).format("0.0a") : n)
|
||||
width: refContainer.current ? refContainer.current.offsetWidth : 400, height: 500, series: series,
|
||||
plugins: [{ hooks: { ready: onReadyChart, setCursor, setSeries: seriesFocus }}],
|
||||
cursor: {
|
||||
drag: { x: false, y: false },
|
||||
focus: { prox: 30 },
|
||||
bind: {
|
||||
mouseup: () => null,
|
||||
mousedown: () => null,
|
||||
click: () => null,
|
||||
dblclick: () => null,
|
||||
mouseenter: () => null,
|
||||
}
|
||||
},
|
||||
legend: { show: false },
|
||||
axes: [
|
||||
{ space: 80 },
|
||||
{ show: true, font: "10px Arial",
|
||||
values: (self, ticks) => ticks.map(n => n > 1000 ? numeral(n).format("0.0a") : n) }
|
||||
],
|
||||
scales: {
|
||||
x: {range: () => [scale.min, scale.max]},
|
||||
y: {range: (self, min, max) => yaxis.limits.enable ? yaxis.limits.range : [min, max]}
|
||||
x: { range: () => [period.start, period.end] },
|
||||
y: {
|
||||
range: (self, min, max) => {
|
||||
const offsetFactor = 0.05; // 5%
|
||||
return yaxis.limits.enable ? yaxis.limits.range : [min - (min * offsetFactor), max + (max * offsetFactor)];
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return <div ref={refContainer} style={{pointerEvents: isPanning ? "none" : "auto"}}>
|
||||
{dataChart && <UplotReact options={options} data={dataChart}/>}
|
||||
return <div ref={refContainer} style={{pointerEvents: isPanning ? "none" : "auto", height: "500px"}}>
|
||||
<UplotReact options={options} data={data}/>
|
||||
</div>;
|
||||
};
|
||||
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
.uplot .u-legend {
|
||||
display: grid;
|
||||
align-items: center;
|
||||
justify-content: start;
|
||||
text-align: left;
|
||||
margin-top: 25px;
|
||||
}
|
||||
|
||||
.uplot .u-legend .u-series {
|
||||
font-size: 12px;
|
||||
}
|
|
@ -21,6 +21,12 @@
|
|||
flex-wrap: wrap;
|
||||
align-items: center;
|
||||
font-size: 11px;
|
||||
line-height: 150%;
|
||||
}
|
||||
|
||||
.u-tooltip-data__value {
|
||||
padding: 4px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.u-tooltip__info {
|
||||
|
|
|
@ -21,19 +21,29 @@ code {
|
|||
.cm-activeLine {
|
||||
background-color: inherit !important;
|
||||
}
|
||||
.cm-wrap {
|
||||
.cm-editor {
|
||||
border-radius: 4px;
|
||||
border-color: #b9b9b9;
|
||||
border-style: solid;
|
||||
border-width: 1px;
|
||||
font-size: 10px;
|
||||
}
|
||||
.one-line-scroll .cm-wrap {
|
||||
|
||||
.one-line-scroll .cm-editor {
|
||||
height: 24px;
|
||||
}
|
||||
.cm-content, .cm-gutter { min-height: 51px; }
|
||||
|
||||
.cm-gutters {
|
||||
border-radius: 4px 0 0 4px;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.multi-line-scroll .cm-content,
|
||||
.multi-line-scroll .cm-gutters {
|
||||
min-height: 51px !important;
|
||||
}
|
||||
|
||||
.one-line-scroll .cm-content,
|
||||
.one-line-scroll .cm-gutter {
|
||||
.one-line-scroll .cm-gutters {
|
||||
min-height: auto;
|
||||
}
|
||||
|
|
|
@ -9,4 +9,12 @@ export const getColorFromString = (str: string): string => {
|
|||
colour += ("00" + value.toString(16)).substr(-2);
|
||||
}
|
||||
return colour;
|
||||
};
|
||||
|
||||
export const hexToRGB = (hex: string): string => {
|
||||
if (hex.length != 7) return "0, 0, 0";
|
||||
const r = parseInt(hex.slice(1, 3), 16);
|
||||
const g = parseInt(hex.slice(3, 5), 16);
|
||||
const b = parseInt(hex.slice(5, 7), 16);
|
||||
return `${r}, ${g}, ${b}`;
|
||||
};
|
|
@ -1,23 +1,29 @@
|
|||
import uPlot, {AlignedData, Series} from "uplot";
|
||||
import uPlot, {Series} from "uplot";
|
||||
import {getColorFromString} from "./color";
|
||||
import dayjs from "dayjs";
|
||||
import {MetricResult} from "../api/types";
|
||||
import {getNameForMetric} from "./metric";
|
||||
|
||||
interface SetupTooltip {
|
||||
u: uPlot,
|
||||
data: MetricResult[],
|
||||
metrics: MetricResult[],
|
||||
series: Series[],
|
||||
tooltip: HTMLDivElement,
|
||||
tooltipOffset: {left: number, top: number},
|
||||
tooltipIdx: {seriesIdx: number, dataIdx: number}
|
||||
}
|
||||
|
||||
export const setTooltip = ({ u, tooltipIdx, data, series, tooltip, tooltipOffset }: SetupTooltip) : void => {
|
||||
interface HideSeriesArgs {
|
||||
hideSeries: string[],
|
||||
label: string,
|
||||
metaKey: boolean,
|
||||
series: Series[]
|
||||
}
|
||||
|
||||
export const setTooltip = ({ u, tooltipIdx, metrics, series, tooltip, tooltipOffset }: SetupTooltip) : void => {
|
||||
const {seriesIdx, dataIdx} = tooltipIdx;
|
||||
const dataSeries = u.data[seriesIdx][dataIdx];
|
||||
const dataTime = u.data[0][dataIdx];
|
||||
const metric = data[seriesIdx - 1]?.metric || {};
|
||||
const metric = metrics[seriesIdx - 1]?.metric || {};
|
||||
const color = getColorFromString(series[seriesIdx].label || "");
|
||||
|
||||
const {width, height} = u.over.getBoundingClientRect();
|
||||
|
@ -35,30 +41,18 @@ export const setTooltip = ({ u, tooltipIdx, data, series, tooltip, tooltipOffset
|
|||
const marker = `<div class="u-tooltip__marker" style="background: ${color}"></div>`;
|
||||
tooltip.innerHTML = `<div>${date}</div>
|
||||
<div class="u-tooltip-data">
|
||||
${marker}${metric.__name__ || ""}: <b>${dataSeries}</b>
|
||||
${marker}${metric.__name__ || ""}: <b class="u-tooltip-data__value">${dataSeries}</b>
|
||||
</div>
|
||||
<div class="u-tooltip__info">${info}</div>`;
|
||||
};
|
||||
|
||||
export const getSeries = (data: MetricResult[]): Series[] => [{}, ...data.map(d => ({
|
||||
label: getNameForMetric(d),
|
||||
width: 1.5,
|
||||
stroke: getColorFromString(getNameForMetric(d))
|
||||
}))];
|
||||
|
||||
export const getLimitsTimes = (data: MetricResult[]): [number, number] => {
|
||||
const allTimes = data.map(d => d.values.map(v => v[0])).flat().sort((a,b) => a-b);
|
||||
return [allTimes[0], allTimes[allTimes.length - 1]];
|
||||
};
|
||||
|
||||
export const getLimitsYaxis = (data: MetricResult[]): [number, number] => {
|
||||
const allValues = data.map(d => d.values.map(v => +v[1])).flat().sort((a,b) => a-b);
|
||||
return [allValues[0], allValues[allValues.length - 1]];
|
||||
};
|
||||
|
||||
export const getDataChart = (data: MetricResult[], times: number[]): AlignedData => {
|
||||
return [times, ...data.map(d => times.map(t => {
|
||||
const v = d.values.find(v => v[0] === t);
|
||||
return v ? +v[1] : null;
|
||||
}))];
|
||||
export const getHideSeries = ({hideSeries, label, metaKey, series}: HideSeriesArgs): string[] => {
|
||||
const include = hideSeries.includes(label);
|
||||
const labels = series.map(s => s.label || "").filter(l => l);
|
||||
if (metaKey && include) {
|
||||
return [...labels.filter(l => l !== label)];
|
||||
} else if (metaKey && !include) {
|
||||
return hideSeries.length === series.length - 2 ? [] : [...labels.filter(l => l !== label)];
|
||||
}
|
||||
return include ? hideSeries.filter(l => l !== label) : [...hideSeries, label];
|
||||
};
|
|
@ -18,7 +18,7 @@
|
|||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"noEmit": true,
|
||||
"jsx": "react"
|
||||
"jsx": "react-jsx"
|
||||
},
|
||||
"include": [
|
||||
"src"
|
||||
|
|
|
@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
|
|||
|
||||
ROOT_IMAGE ?= alpine:3.14.2
|
||||
CERTS_IMAGE := alpine:3.14.2
|
||||
GO_BUILDER_IMAGE := golang:1.17.2-alpine
|
||||
GO_BUILDER_IMAGE := golang:1.17.3-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr : _)
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo $(CERTS_IMAGE) | tr : _)
|
||||
|
||||
|
|
|
@ -86,6 +86,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
|||
### Tutorials, guides and how-to articles
|
||||
|
||||
* [PromQL tutorial for beginners and humans](https://valyala.medium.com/promql-tutorial-for-beginners-9ab455142085)
|
||||
* [How to optimize PromQL and MetricsQL queries](https://valyala.medium.com/how-to-optimize-promql-and-metricsql-queries-85a1b75bf986)
|
||||
* [Analyzing Prometheus data with external tools](https://valyala.medium.com/analyzing-prometheus-data-with-external-tools-5f3e5e147639)
|
||||
* [Prometheus Subqueries in VictoriaMetrics](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3)
|
||||
* [How to migrate data from Prometheus to VictoriaMetrics](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043)
|
||||
|
|
|
@ -6,11 +6,29 @@ sort: 15
|
|||
|
||||
## tip
|
||||
|
||||
* FEATURE: vmalert: allow groups with empty rules list like Prometheus does. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1742).
|
||||
* FEATURE: vmalert: allow groups with default `tenant` in `-clusterMode`. Default `tenant` values can be specified via `-defaultTenant.prometheus` and `-defaultTenant.graphite`. See [these docs](https://docs.victoriametrics.com/vmalert.html#multitenancy).
|
||||
* FEATURE: vmagent: add `collapse` and `expand` buttons per each group of targets with the same `job_name` at `http://vmagent:8429/targets` page.
|
||||
* FEATURE: automatically detect timestamp precision (ns, us, ms or s) for the data ingested into VictoriaMetrics via [InfluxDB line protocol](https://docs.victoriametrics.com/#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
* FEATURE: vmagent: add ability to protect `/config` page with auth key via `-configAuthKey` command-line flag. This page may contain sensitive config information, so it may be good to restrict access to this page. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1764).
|
||||
* FEATURE: vmagent: hide passwords and auth tokens at `/config` page like Prometheus does. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1764).
|
||||
* FEATURE: vmagent: add `-promscrape.maxResponseHeadersSize` command-line flag for tuning the maximum HTTP response headers size for Prometheus scrape targets.
|
||||
* FEATURE: vmagent: send data to multiple configured remote storage systems in parallel (e.g. when multiple `-remoteWrite.url` flag values are specified). This should improve data ingestion speed.
|
||||
* FEATURE: vmagent: add `-remoteWrite.maxRowsPerBlock` command-line flag for tuning the number of samples to send to remote storage per each block. Bigger values may improve data ingestion performance at the cost of higher memory usage.
|
||||
* FEATURE: vmagent: distribute Kafka messages among all the partitions when [writing data to Kafka](https://docs.victoriametrics.com/vmagent.html#writing-metrics-to-kafka).
|
||||
* FEATURE: add [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric names.
|
||||
* FEATURE: add [duration_over_time](https://docs.victoriametrics.com/MetricsQL.html#duration_over_time) function for calculating the actual lifetime of the time series with possible gaps. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1780).
|
||||
* FEATURE: add [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset) function, which can be used for implementing simple paging over big number of time series. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1778).
|
||||
|
||||
* BUGFIX: vmagent: reduce the increased memory usage when scraping targets with big number of metrics which periodically change. The memory usage has been increased in v1.68.0 after vmagent started generating staleness markers in [stream parse mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1745).
|
||||
* BUGFIX: vmagent: properly display `proxy_url` config option at `http://vmagent:8429/config` page. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1755).
|
||||
* BUGFIX: fix tests for Apple M1. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1653).
|
||||
|
||||
|
||||
## [v1.68.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.68.0)
|
||||
|
||||
* FEATURE: vmagent: expose `-promscrape.config` contents at `/config` page as Prometheus does. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1695).
|
||||
* FEATURE: vmagent: add `show original labels` button per each scrape target displayed at `http://vmagent;8429/targets` page. This should improve debuggability for service discovery and relabeling issues similar to [this one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1664). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1698).
|
||||
* FEATURE: vmagent: add `show original labels` button per each scrape target displayed at `http://vmagent:8429/targets` page. This should improve debuggability for service discovery and relabeling issues similar to [this one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1664). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1698).
|
||||
* FEATURE: vmagent: shard targets among cluster nodes after the relabeling is applied. This should guarantee that targets with the same set of labels go to the same `vmagent` node in the cluster. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1687).
|
||||
* FEATURE: vmagent: atomatically switch to [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode) if the response from the given target exceeds the command-line flag value `-promscrape.minResponseSizeForStreamParse`. This should reduce memory usage when `vmagent` scrapes targets with non-uniform response sizes (this is the case in Kubernetes monitoring).
|
||||
* FEATURE: vmagent: send Prometheus-like staleness marks in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously staleness marks wern't sent in stream parsing mode. See [these docs](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) for details.
|
||||
|
|
|
@ -7,7 +7,7 @@ sort: 11
|
|||
Below please find public case studies and talks from VictoriaMetrics users. You can also join our [community Slack channel](https://slack.victoriametrics.com/)
|
||||
where you can chat with VictoriaMetrics users to get additional references, reviews and case studies.
|
||||
|
||||
* [AbiosGaming](#aboisgaming)
|
||||
* [AbiosGaming](#abiosgaming)
|
||||
* [adidas](#adidas)
|
||||
* [Adsterra](#adsterra)
|
||||
* [ARNES](#arnes)
|
||||
|
@ -494,7 +494,7 @@ Numbers:
|
|||
|
||||
Numbers:
|
||||
|
||||
* The number of active time series per VictoriaMetrics instance is 50 millios.
|
||||
* The number of active time series per VictoriaMetrics instance is 50 millions.
|
||||
* The total number of time series per VictoriaMetrics instance is 5000 million.
|
||||
* Ingestion rate per VictoriaMetrics instance is 1.1 millions data points per second.
|
||||
* The total number of datapoints per VictoriaMetrics instance is 8.5 trillion.
|
||||
|
|
|
@ -307,7 +307,7 @@ Data replication can be used for increasing storage durability. See [these docs]
|
|||
|
||||
VictoriaMetrics uses lower amounts of CPU, RAM and storage space on production workloads compared to competing solutions (Prometheus, Thanos, Cortex, TimescaleDB, InfluxDB, QuestDB, M3DB) according to [our case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
||||
|
||||
Each node type - `vminsert`, `vmselect` and `vmstorage` - can run on the most suitable hardware. Cluster capacity scales linearly with the available resources. The needed amounts of CPU and RAM per each node type highly depends on the workload - the number of active time series, series churn rate, query types, query qps, etc. It is recommended setting up a test VictoriaMetrics cluster for your production workload and iteratively scaling per-node resources and the number of nodes per node type until the cluster becomes stable. It is recommended setting up [monitoring for the cluster](#monitoring). It helps determining bottlenecks in cluster setup. It is also recommended following [the troubleshooting docs](https://docs.victoriametrics.com/#troubleshooting).
|
||||
Each node type - `vminsert`, `vmselect` and `vmstorage` - can run on the most suitable hardware. Cluster capacity scales linearly with the available resources. The needed amounts of CPU and RAM per each node type highly depends on the workload - the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-active-time-series), [series churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate), query types, query qps, etc. It is recommended setting up a test VictoriaMetrics cluster for your production workload and iteratively scaling per-node resources and the number of nodes per node type until the cluster becomes stable. It is recommended setting up [monitoring for the cluster](#monitoring). It helps determining bottlenecks in cluster setup. It is also recommended following [the troubleshooting docs](https://docs.victoriametrics.com/#troubleshooting).
|
||||
|
||||
The needed storage space for the given retention (the retention is set via `-retentionPeriod` command-line flag at `vmstorage`) can be extrapolated from disk space usage in a test run. For example, if the storage space usage is 10GB after a day-long test run on a production workload, then it will need at least `10GB*100=1TB` of disk space for `-retentionPeriod=100d` (100-days retention period). Storage space usage can be monitored with [the official Grafana dashboard for VictoriaMetrics cluster](#monitoring).
|
||||
|
||||
|
@ -320,7 +320,7 @@ It is recommended leaving the following amounts of spare resources:
|
|||
Some capacity planning tips for VictoriaMetrics cluster:
|
||||
|
||||
* The [replication](#replication-and-data-safety) increases the amounts of needed resources for the cluster by up to `N` times where `N` is replication factor.
|
||||
* Cluster capacity for active time series (e.g. time series with recently ingested samples) can be increased by adding more `vmstorage` nodes and/or by increasing RAM and CPU resources per each `vmstorage` node.
|
||||
* Cluster capacity for [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-active-time-series) can be increased by adding more `vmstorage` nodes and/or by increasing RAM and CPU resources per each `vmstorage` node.
|
||||
* Query latency can be reduced by increasing the number of `vmstorage` nodes and/or by increasing RAM and CPU resources per each `vmselect` node.
|
||||
* The total number of CPU cores needed for all the `vminsert` nodes can be calculated from the ingestion rate: `CPUs = ingestion_rate / 100K`.
|
||||
* The `-rpc.disableCompression` command-line flag at `vminsert` nodes can increase ingestion capacity at the cost of higher network bandwidth usage between `vminsert` and `vmstorage`.
|
||||
|
|
16
docs/FAQ.md
|
@ -95,6 +95,7 @@ VictoriaMetrics also [uses less RAM than Thanos components](https://github.com/t
|
|||
- QuestDB needs more than 20x storage space than VictoriaMetrics. This translates to higher storage costs and slower queries over historical data, which must be read from the disk.
|
||||
- QuestDB is much harder to setup and operate than VictoriaMetrics. Compare [setup instructions for QuestDB](https://questdb.io/docs/get-started/binaries) to [setup instructions for VictoriaMetrics](https://docs.victoriametrics.com/#how-to-start-victoriametrics).
|
||||
- VictoriaMetrics provides [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) query language, which is better suited for typical queries over time series data than SQL-like query language provided by QuestDB. See [this article](https://valyala.medium.com/promql-tutorial-for-beginners-9ab455142085) for details.
|
||||
- VictoriaMetrics can be queried via [Prometheus querying API](https://docs.victoriametrics.com/#prometheus-querying-api-usage) and via [Graphite API](https://docs.victoriametrics.com/#graphite-api-usage).
|
||||
- Thanks to PromQL support, VictoriaMetrics [can be used as a drop-in replacement for Prometheus in Grafana](https://docs.victoriametrics.com/#grafana-setup), while QuestDB needs full rewrite of existing dashboards in Grafana.
|
||||
- Thanks to Prometheus remote_write API support, VictoriaMetrics can be used as a long-term storage for Prometheus or for [vmagent](https://docs.victoriametrics.com/vmagent.html), while QuestDB has no integration with Prometheus.
|
||||
- QuestDB [supports smaller range of popular data ingestion protocols](https://questdb.io/docs/develop/insert-data) compared to VictoriaMetrics (compare to [the list of supported data ingestion protocols for VictoriaMetrics](https://docs.victoriametrics.com/#how-to-import-time-series-data)).
|
||||
|
@ -104,6 +105,7 @@ VictoriaMetrics also [uses less RAM than Thanos components](https://github.com/t
|
|||
## What is the difference between VictoriaMetrics and [Cortex](https://github.com/cortexproject/cortex)?
|
||||
|
||||
VictoriaMetrics is similar to Cortex in the following aspects:
|
||||
|
||||
- Both systems accept data from [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus
|
||||
via standard [remote_write API](https://prometheus.io/docs/practices/remote_write/), i.e. there is no need in running sidecars
|
||||
unlike in [Thanos](https://github.com/thanos-io/thanos) case.
|
||||
|
@ -111,9 +113,10 @@ VictoriaMetrics is similar to Cortex in the following aspects:
|
|||
- Both systems support data replication. See [replication in Cortex](https://github.com/cortexproject/cortex/blob/fe56f1420099aa1bf1ce09316c186e05bddee879/docs/architecture.md#hashing) and [replication in VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#replication-and-data-safety).
|
||||
- Both systems scale horizontally to multiple nodes. See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-resizing-and-scalability) for details.
|
||||
- Both systems support alerting and recording rules via the corresponding tools such as [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
||||
|
||||
- Both systems can be queried via [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/) and integrate perfectly with Grafana.
|
||||
|
||||
The main differences between Cortex and VictoriaMetrics:
|
||||
|
||||
- Cortex re-uses Prometheus source code, while VictoriaMetrics is written from scratch.
|
||||
- Cortex heavily relies on third-party services such as Consul, Memcache, DynamoDB, BigTable, Cassandra, etc.
|
||||
This may increase operational complexity and reduce system reliability comparing to VictoriaMetrics' case,
|
||||
|
@ -127,6 +130,8 @@ The main differences between Cortex and VictoriaMetrics:
|
|||
- Cortex is usually slower and requires more CPU and RAM than VictoriaMetrics. See [this talk from adidas at PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/) and [other case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
||||
- VictoriaMetrics accepts data in multiple popular data ingestion protocols additionally to Prometheus remote_write protocol - InfluxDB, OpenTSDB, Graphite, CSV, JSON, native binary.
|
||||
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-time-series-data) for details.
|
||||
- VictoriaMetrics provides [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) query language, while Cortex provides [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) query language.
|
||||
- VictoriaMetrics can be queried via [Graphite API](https://docs.victoriametrics.com/#graphite-api-usage).
|
||||
|
||||
|
||||
## What is the difference between VictoriaMetrics and [Thanos](https://github.com/thanos-io/thanos)?
|
||||
|
@ -148,6 +153,8 @@ The main differences between Cortex and VictoriaMetrics:
|
|||
- Thanos is usually slower and requires more CPU and RAM than VictoriaMetrics. See [this talk from adidas at PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
- VictoriaMetrics accepts data in multiple popular data ingestion protocols additionally to Prometheus remote_write protocol - InfluxDB, OpenTSDB, Graphite, CSV, JSON, native binary.
|
||||
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-time-series-data) for details.
|
||||
- VictoriaMetrics provides [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) query language, while Thanos provides [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) query language.
|
||||
- VictoriaMetrics can be queried via [Graphite API](https://docs.victoriametrics.com/#graphite-api-usage).
|
||||
|
||||
|
||||
## How does VictoriaMetrics compare to [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/)?
|
||||
|
@ -157,6 +164,7 @@ The main differences between Cortex and VictoriaMetrics:
|
|||
- VictoriaMetrics provides better query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) - than InfluxQL or Flux. See [this tutorial](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085) for details.
|
||||
- VictoriaMetrics accepts data in multiple popular data ingestion protocols additionally to InfluxDB - Prometheus remote_write, OpenTSDB, Graphite, CSV, JSON, native binary.
|
||||
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-time-series-data) for details.
|
||||
- VictoriaMetrics can be queried via [Graphite API](https://docs.victoriametrics.com/#graphite-api-usage).
|
||||
|
||||
|
||||
## How does VictoriaMetrics compare to [TimescaleDB](https://www.timescale.com/)?
|
||||
|
@ -166,6 +174,7 @@ The main differences between Cortex and VictoriaMetrics:
|
|||
- VictoriaMetrics requires up to 10x less CPU and RAM resources than TimescaleDB for processing production data. See [this article](https://abiosgaming.com/press/high-cardinality-aggregations/) for details.
|
||||
- TimescaleDB is [harder to setup, configure and operate](https://docs.timescale.com/timescaledb/latest/how-to-guides/install-timescaledb/self-hosted/ubuntu/installation-apt-ubuntu/) than VictoriaMetrics (see [how to run VictoriaMetrics](https://docs.victoriametrics.com/#how-to-start-victoriametrics)).
|
||||
- VictoriaMetrics accepts data in multiple popular data ingestion protocols - InfluxDB, OpenTSDB, Graphite, CSV, while TimescaleDB supports only SQL inserts.
|
||||
- VictoriaMetrics can be queried via [Graphite API](https://docs.victoriametrics.com/#graphite-api-usage).
|
||||
|
||||
|
||||
## Does VictoriaMetrics use Prometheus technologies like other clustered TSDBs built on top of Prometheus such as [Thanos](https://github.com/thanos-io/thanos) or [Cortex](https://github.com/cortexproject/cortex)?
|
||||
|
@ -286,6 +295,11 @@ High cardinality usually means high number of [active time series](#what-is-acti
|
|||
VictoriaMetrics maintains in-memory cache for mapping of [active time series](#what-is-active-time-series) into internal series ids. The cache size depends on the available memory for VictoriaMetrics in the host system. If the information about all the active time series doesn't fit the cache, then VictoriaMetrics needs to read and unpack the information from disk on every incoming sample for time series missing in the cache. This operation is much slower than the cache lookup, so such insert is named `slow insert`. High percentage of slow inserts on the [official dashboard for VictoriaMetrics](https://docs.victoriametrics.com/#monitoring) indicates on memory shortage for the current number of [active time series](#what-is-active-time-series). Such a condition usually leads to significant slowdown for data ingestion, to significantly increased disk IO and CPU usage. The solution is to add more memory or to reduce the number of [active time series](#what-is-active-time-series). The `/api/v1/status/tsdb` page can be helpful for locating the source of high number of active time seriess - see [these docs](https://docs.victoriametrics.com/#tsdb-stats).
|
||||
|
||||
|
||||
## How to optimize MetricsQL query?
|
||||
|
||||
See [this article](https://valyala.medium.com/how-to-optimize-promql-and-metricsql-queries-85a1b75bf986).
|
||||
|
||||
|
||||
## Why MetricsQL isn't 100% compatible with PromQL?
|
||||
|
||||
[MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) provides better user experience than PromQL. It fixes a few annoying issues in PromQL. This prevents MetricsQL to be 100% compatible with PromQL. See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
|
|
@ -18,7 +18,7 @@ The following functionality is implemented differently in MetricsQL compared to
|
|||
* MetricsQL returns the expected non-empty responses for [rate](#rate) with `step` values smaller than scrape interval. This addresses [this issue from Grafana](https://github.com/grafana/grafana/issues/11451). See also [this blog post](https://www.percona.com/blog/2020/02/28/better-prometheus-rate-function-with-victoriametrics/).
|
||||
* MetricsQL treats `scalar` type the same as `instant vector` without labels, since subtle differences between these types usually confuse users. See [the corresponding Prometheus docs](https://prometheus.io/docs/prometheus/latest/querying/basics/#expression-language-data-types) for details.
|
||||
* MetricsQL removes all the `NaN` values from the output, so some queries like `(-1)^0.5` return empty results in VictoriaMetrics, while returning a series of `NaN` values in Prometheus. Note that Grafana doesn't draw any lines or dots for `NaN` values, so the end result looks the same for both VictoriaMetrics and Prometheus.
|
||||
* MetricsQL keeps metric names after applying functions, which don't change the meaining of the original time series. For example, [min_over_time(foo)](#min_over_time) or [round(foo)](#round) leaves `foo` metric name in the result. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/674) for details.
|
||||
* MetricsQL keeps metric names after applying functions, which don't change the meaning of the original time series. For example, [min_over_time(foo)](#min_over_time) or [round(foo)](#round) leaves `foo` metric name in the result. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/674) for details.
|
||||
|
||||
Read more about the diffferences between PromQL and MetricsQL in [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e).
|
||||
|
||||
|
@ -30,7 +30,7 @@ MetricsQL implements [PromQL](https://medium.com/@valyala/promql-tutorial-for-be
|
|||
|
||||
This functionality can be evaluated at [an editable Grafana dashboard](https://play-grafana.victoriametrics.com/d/4ome8yJmz/node-exporter-on-victoriametrics-demo) or at your own [VictoriaMetrics instance](https://docs.victoriametrics.com/#how-to-start-victoriametrics).
|
||||
|
||||
- Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax. This is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but usually works faster and is easier to use when migrating from Graphite. VictoriaMetrics also can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
- Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax. This is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but usually works faster and is easier to use when migrating from Graphite. VictoriaMetrics also can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details. See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
- Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries)). For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`. It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
- [Aggregate functions](#aggregate-functions) accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point across time series returned by `q1`, `q2` and `q3`.
|
||||
- [offset](https://prometheus.io/docs/prometheus/latest/querying/basics/#offset-modifier), lookbehind window in square brackets and `step` value for [subquery](#subqueries) may refer to the current step aka `$__interval` value from Grafana with `[Ni]` syntax. For instance, `rate(metric[10i] offset 5i)` would return per-second rate over a range covering 10 previous steps with the offset of 5 steps.
|
||||
|
@ -142,6 +142,10 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
`distinct_over_time(series_selector[d])` returns the number of distinct raw sample values on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are stripped from the resulting rollups.
|
||||
|
||||
#### duration_over_time
|
||||
|
||||
`duration_over_time(series_selector[d], max_interval)` returns the duration in seconds when time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) were present over the given lookbehind window `d`. It is expected that intervals between adjacent samples per each series don't exceed the `max_interval`. Otherwise such intervals are considered as gaps and aren't counted. See also [lifetime](#lifetime) and [lag](#lag).
|
||||
|
||||
#### first_over_time
|
||||
|
||||
`first_over_time(series_selector[d])` returns the first raw sample value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). See also [last_over_time](#last_over_time) and [tfirst_over_time](#tfirst_over_time).
|
||||
|
@ -196,7 +200,7 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
#### lag
|
||||
|
||||
`lag(series_selector[d])` returns the duration in seconds between the last sample on the given lookbehind window `d` and the timestamp of the current point. It is calculated independently per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are stripped from the resulting rollups. See also [lifetime](#lifetime).
|
||||
`lag(series_selector[d])` returns the duration in seconds between the last sample on the given lookbehind window `d` and the timestamp of the current point. It is calculated independently per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are stripped from the resulting rollups. See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
|
||||
|
||||
#### last_over_time
|
||||
|
||||
|
@ -204,7 +208,7 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
#### lifetime
|
||||
|
||||
`lifetime(series_selector[d])` calculates the lifetime in seconds per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). The returned lifetime is limited by the given lookbehind window `d`. Metric names are stripped from the resulting rollups. See also [lag](#lag).
|
||||
`lifetime(series_selector[d])` returns the duration in seconds between the last and the first sample on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are stripped from the resulting rollups. See also [duration_over_time](#duration_over_time) and [lag](#lag).
|
||||
|
||||
#### max_over_time
|
||||
|
||||
|
@ -689,6 +693,16 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
`label_del(q, "label1", ..., "labelN")` deletes the given `label*` labels from all the time series returned by `q`.
|
||||
|
||||
#### label_graphite_group
|
||||
|
||||
`label_graphite_group(q, groupNum1, ... groupNumN)` replaces metric names returned from `q` with the given Graphite group values concatenated via `.` char. For example, `label_graphite_group({__graphite__="foo*.bar.*"}, 0, 2)` would substitute `foo<any_value>.bar.<other_value>` metric names with `foo<any_value>.<other_value>`. This function is useful for aggregating Graphite metrics with [aggregate functions](#aggregate-functions). For example, the following query would return per-app memory usage:
|
||||
|
||||
```
|
||||
sum by (__name__) (
|
||||
label_graphite_group({__graphite__="app*.host*.memory_usage"}, 0)
|
||||
)
|
||||
```
|
||||
|
||||
#### label_join
|
||||
|
||||
`label_join(q, "dst_label", "separator", "src_label1", ..., "src_labelN")` joins `src_label*` values with the given `separator` and stores the result in `dst_label`. This is performed individually per each time series returned by `q`. For example, `label_join(up{instance="xxx",job="yyy"}, "foo", "-", "instance", "job")` would store `xxx-yyy` label value into `foo` label. This function is supported by PromQL.
|
||||
|
@ -805,9 +819,14 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
`histogram(q)` calculates [VictoriaMetrics histogram](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) per each group of points with the same timestamp. Useful for visualizing big number of time series via a heatmap. See [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) for more details.
|
||||
|
||||
#### limit_offset
|
||||
|
||||
`limit_offset(limit, offset, q)` skips `offset` time series from series returned by `q` and then returns up to `limit` of the remaining time series. This allows implementing simple paging for `q` time series. See also [limitk](#limitk).
|
||||
|
||||
|
||||
#### limitk
|
||||
|
||||
`limitk(k, q) by (group_labels)` returns up to `k` time series per each `group_labels` out of time series returned by `q`. The returned set of time series remain the same across calls.
|
||||
`limitk(k, q) by (group_labels)` returns up to `k` time series per each `group_labels` out of time series returned by `q`. The returned set of time series remain the same across calls. See also [limit_offset](#limit_offset).
|
||||
|
||||
#### mad
|
||||
|
||||
|
|
|
@ -418,7 +418,7 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||
|
||||
* [Graphite API](#graphite-api-usage)
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||
|
||||
## How to send data from OpenTSDB-compatible agents
|
||||
|
@ -561,8 +561,7 @@ visible to the given tenant. It is expected that the `extra_label` query arg is
|
|||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster
|
||||
and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
||||
|
||||
|
||||
### Graphite Render API usage
|
||||
|
@ -1213,6 +1212,7 @@ Consider setting the following command-line flags:
|
|||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
|
||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||
|
@ -1368,6 +1368,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
|||
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
||||
|
||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
|
@ -1514,7 +1515,7 @@ Report bugs and propose new features [here](https://github.com/VictoriaMetrics/V
|
|||
|
||||
## VictoriaMetrics Logo
|
||||
|
||||
[Zip](VM_logo.zip) contains three folders with different image orientations (main color and inverted version).
|
||||
[Zip](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/VM_logo.zip) contains three folders with different image orientations (main color and inverted version).
|
||||
|
||||
Files included in each folder:
|
||||
|
||||
|
@ -1549,6 +1550,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
```
|
||||
-bigMergeConcurrency int
|
||||
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
|
||||
-configAuthKey string
|
||||
Authorization key for accessing /config page. It must be passed via authKey query arg
|
||||
-csvTrimTimestamp duration
|
||||
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-datadog.maxInsertRequestSize size
|
||||
|
@ -1648,7 +1651,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||
-metricsAuthKey string
|
||||
Auth key for /metrics. It overrides httpAuth settings
|
||||
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-opentsdbHTTPListenAddr string
|
||||
TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty
|
||||
-opentsdbListenAddr string
|
||||
|
@ -1661,7 +1664,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-opentsdbhttpTrimTimestamp duration
|
||||
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-precisionBits int
|
||||
The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss (default 64)
|
||||
-promscrape.cluster.memberNum int
|
||||
|
@ -1716,6 +1719,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s)
|
||||
-promscrape.maxDroppedTargets int
|
||||
The maximum number of droppedTargets to show at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000)
|
||||
-promscrape.maxResponseHeadersSize size
|
||||
The maximum size of http response headers from Prometheus scrape targets
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 4096)
|
||||
-promscrape.maxScrapeSize size
|
||||
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
|
||||
|
|
|
@ -7,10 +7,12 @@ sort: 17
|
|||
## Release version and Docker images
|
||||
|
||||
0. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
|
||||
1. Create release tag with `git tag v1.xx.y` in `master` branch and `git tag v1.xx.y-cluster` in `cluster` branch.
|
||||
2. Run `make release` for creating `*.tar.gz` release archive with the corresponding `_checksums.txt` inside `bin` directory.
|
||||
3. Run `make publish` for creating and publishing Docker images.
|
||||
4. Repeat steps 3-4 for `cluster` branch.
|
||||
1. Create the following release tags:
|
||||
* `git tag v1.xx.y` in `master` branch
|
||||
* `git tag v1.xx.y-cluster` in `cluster` branch
|
||||
* `git tag v1.xx.y-enterprise` in `enterprise` branch
|
||||
* `git tag v1.xx.y-enterprise-cluster` in `enterprise-cluster` branch
|
||||
2. Run `TAG=v1.xx.y make publish-release`. It will create `*.tar.gz` release archives with the corresponding `_checksums.txt` files inside `bin` directory and publish Docker images for the given `TAG`, `TAG-cluster`, `TAG-enterprise` and `TAG-enterprise-cluster`.
|
||||
5. Push release tag to https://github.com/VictoriaMetrics/VictoriaMetrics : `git push origin v1.xx.y`.
|
||||
6. Go to https://github.com/VictoriaMetrics/VictoriaMetrics/releases , create new release from the pushed tag on step 5 and upload `*.tar.gz` archive with the corresponding `_checksums.txt` from step 2.
|
||||
|
||||
|
|
|
@ -422,7 +422,7 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||
|
||||
* [Graphite API](#graphite-api-usage)
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||
|
||||
## How to send data from OpenTSDB-compatible agents
|
||||
|
@ -565,8 +565,7 @@ visible to the given tenant. It is expected that the `extra_label` query arg is
|
|||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster
|
||||
and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
||||
|
||||
|
||||
### Graphite Render API usage
|
||||
|
@ -1217,6 +1216,7 @@ Consider setting the following command-line flags:
|
|||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
|
||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||
|
@ -1372,6 +1372,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
|||
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
||||
|
||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
|
@ -1518,7 +1519,7 @@ Report bugs and propose new features [here](https://github.com/VictoriaMetrics/V
|
|||
|
||||
## VictoriaMetrics Logo
|
||||
|
||||
[Zip](VM_logo.zip) contains three folders with different image orientations (main color and inverted version).
|
||||
[Zip](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/VM_logo.zip) contains three folders with different image orientations (main color and inverted version).
|
||||
|
||||
Files included in each folder:
|
||||
|
||||
|
@ -1553,6 +1554,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
```
|
||||
-bigMergeConcurrency int
|
||||
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
|
||||
-configAuthKey string
|
||||
Authorization key for accessing /config page. It must be passed via authKey query arg
|
||||
-csvTrimTimestamp duration
|
||||
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-datadog.maxInsertRequestSize size
|
||||
|
@ -1652,7 +1655,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||
-metricsAuthKey string
|
||||
Auth key for /metrics. It overrides httpAuth settings
|
||||
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-opentsdbHTTPListenAddr string
|
||||
TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty
|
||||
-opentsdbListenAddr string
|
||||
|
@ -1665,7 +1668,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-opentsdbhttpTrimTimestamp duration
|
||||
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-precisionBits int
|
||||
The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss (default 64)
|
||||
-promscrape.cluster.memberNum int
|
||||
|
@ -1720,6 +1723,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s)
|
||||
-promscrape.maxDroppedTargets int
|
||||
The maximum number of droppedTargets to show at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000)
|
||||
-promscrape.maxResponseHeadersSize size
|
||||
The maximum size of http response headers from Prometheus scrape targets
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 4096)
|
||||
-promscrape.maxScrapeSize size
|
||||
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
|
||||
|
|
|
@ -484,11 +484,11 @@ cat <<EOF | helm install my-grafana grafana/grafana -f -
|
|||
default:
|
||||
victoriametrics:
|
||||
gnetId: 11176
|
||||
revision: 15
|
||||
revision: 16
|
||||
datasource: victoriametrics
|
||||
vmagent:
|
||||
gnetId: 12683
|
||||
revision: 5
|
||||
revision: 6
|
||||
datasource: victoriametrics
|
||||
kubernetes:
|
||||
gnetId: 14205
|
||||
|
|
|
@ -282,7 +282,7 @@ cat <<EOF | helm install my-grafana grafana/grafana -f -
|
|||
default:
|
||||
victoriametrics:
|
||||
gnetId: 10229
|
||||
revision: 20
|
||||
revision: 21
|
||||
datasource: victoriametrics
|
||||
kubernetes:
|
||||
gnetId: 14205
|
||||
|
|
|
@ -456,10 +456,11 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
|
|||
|
||||
* If `vmagent` uses too big amounts of memory, then the following options can help:
|
||||
* Disabling staleness tracking with `-promscrape.noStaleMarkers` option. See [these docs](#prometheus-staleness-markers).
|
||||
* Enabling stream parsing mode. See [these docs](#stream-parsing-mode).
|
||||
* Enabling stream parsing mode if `vmagent` scrapes targets with millions of metrics per target. See [these docs](#stream-parsing-mode).
|
||||
* Reducing the number of output queues with `-remoteWrite.queues` command-line option.
|
||||
* Reducing the amounts of RAM vmagent can use for in-memory buffering with `-memory.allowedPercent` or `-memory.allowedBytes` command-line option. Another option is to reduce memory limits in Docker and/or Kuberntes if `vmagent` runs under these systems.
|
||||
* Reducing the number of CPU cores vmagent can use by passing `GOMAXPROCS=N` environment variable to `vmagent`, where `N` is the desired limit on CPU cores. Another option is to reduce CPU limits in Docker or Kubernetes if `vmagent` runs under these systems.
|
||||
* Passing `-promscrape.dropOriginalLabels` command-line option to `vmagent`, so it drops `"discoveredLabels"` and `"droppedTargets"` lists at `/api/v1/targets` page. This reduces memory usage when scraping big number of targets at the cost of reduced debuggability for improperly configured per-target relabeling.
|
||||
|
||||
* When `vmagent` scrapes many unreliable targets, it can flood the error log with scrape errors. These errors can be suppressed
|
||||
by passing `-promscrape.suppressScrapeErrors` command-line flag to `vmagent`. The most recent scrape error per each target can be observed at `http://vmagent-host:8429/targets`
|
||||
|
@ -468,17 +469,9 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
|
|||
* The `/api/v1/targets` page could be useful for debugging relabeling process for scrape targets.
|
||||
This page contains original labels for targets dropped during relabeling (see "droppedTargets" section in the page output). By default the `-promscrape.maxDroppedTargets` targets are shown here. If your setup drops more targets during relabeling, then increase `-promscrape.maxDroppedTargets` command-line flag value to see all the dropped targets. Note that tracking each dropped target requires up to 10Kb of RAM. Therefore big values for `-promscrape.maxDroppedTargets` may result in increased memory usage if a big number of scrape targets are dropped during relabeling.
|
||||
|
||||
* If `vmagent` scrapes a big number of targets then the `-promscrape.dropOriginalLabels` command-line option may be passed to `vmagent` in order to reduce memory usage.
|
||||
This option drops `"discoveredLabels"` and `"droppedTargets"` lists at `/api/v1/targets` page, which may result in reduced debuggability for improperly configured per-target relabeling.
|
||||
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported at `http://vmagent-host:8429/metrics` page grows constantly. It is also recommended increasing `-remoteWrite.maxBlockSize` and `-remoteWrite.maxRowsPerBlock` command-line options in this case. This can improve data ingestion performance to the configured remote storage systems at the cost of higher memory usage.
|
||||
|
||||
* If `vmagent` scrapes targets with millions of metrics per target (for example, when scraping [federation endpoints](https://prometheus.io/docs/prometheus/latest/federation/)),
|
||||
we recommend enabling [stream parsing mode](#stream-parsing-mode) in order to reduce memory usage during scraping.
|
||||
|
||||
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported at `http://vmagent-host:8429/metrics` page grows constantly.
|
||||
|
||||
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set, try increasing `-remoteWrite.queues`.
|
||||
Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage. Therefore it starts dropping the buffered data
|
||||
if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
|
||||
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set, try increasing `-remoteWrite.queues`. Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage. Therefore it starts dropping the buffered data if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
|
||||
|
||||
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses. The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
|
||||
|
||||
|
@ -711,6 +704,8 @@ vmagent collects metrics data via popular data ingestion protocols and routes th
|
|||
|
||||
See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||
|
||||
-configAuthKey string
|
||||
Authorization key for accessing /config page. It must be passed via authKey query arg
|
||||
-csvTrimTimestamp duration
|
||||
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-datadog.maxInsertRequestSize size
|
||||
|
@ -794,7 +789,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||
-metricsAuthKey string
|
||||
Auth key for /metrics. It overrides httpAuth settings
|
||||
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-opentsdbHTTPListenAddr string
|
||||
TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty
|
||||
-opentsdbListenAddr string
|
||||
|
@ -807,7 +802,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-opentsdbhttpTrimTimestamp duration
|
||||
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-promscrape.cluster.memberNum int
|
||||
The number of number in the cluster of scrapers. It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster
|
||||
-promscrape.cluster.membersCount int
|
||||
|
@ -860,6 +855,9 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s)
|
||||
-promscrape.maxDroppedTargets int
|
||||
The maximum number of droppedTargets to show at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000)
|
||||
-promscrape.maxResponseHeadersSize size
|
||||
The maximum size of http response headers from Prometheus scrape targets
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 4096)
|
||||
-promscrape.maxScrapeSize size
|
||||
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
|
||||
|
@ -899,7 +897,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.maxBlockSize size
|
||||
The maximum size in bytes of unpacked request to send to remote storage. It shouldn't exceed -maxInsertRequestSize from VictoriaMetrics
|
||||
The maximum block size to send to remote storage. Bigger blocks may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxRowsPerBlock
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 8388608)
|
||||
-remoteWrite.maxDailySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
|
@ -908,6 +906,8 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
-remoteWrite.maxHourlySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
-remoteWrite.maxRowsPerBlock int
|
||||
The maximum number of samples to send in each block to remote storage. Higher number may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxBlockSize (default 10000)
|
||||
-remoteWrite.multitenantURL array
|
||||
Base path for multitenant remote storage URL to write data to. See https://docs.victoriametrics.com/vmagent.html#multitenancy for details. Example url: http://<vminsert>:8480 . Pass multiple -remoteWrite.multitenantURL flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
|
|
@ -25,8 +25,8 @@ implementation and aims to be compatible with its syntax.
|
|||
* `vmalert` execute queries against remote datasource which has reliability risks because of network.
|
||||
It is recommended to configure alerts thresholds and rules expressions with understanding that network request
|
||||
may fail;
|
||||
* by default, rules execution is sequential within one group, but persisting of execution results to remote
|
||||
storage is asynchronous. Hence, user shouldn't rely on recording rules chaining when result of previous
|
||||
* by default, rules execution is sequential within one group, but persistence of execution results to remote
|
||||
storage is asynchronous. Hence, user shouldn't rely on chaining of recording rules when result of previous
|
||||
recording rule is reused in next one;
|
||||
|
||||
## QuickStart
|
||||
|
@ -59,7 +59,7 @@ Then configure `vmalert` accordingly:
|
|||
-external.label=replica=a # Multiple external labels may be set
|
||||
```
|
||||
|
||||
See the fill list of configuration flags in [configuration](#configuration) section.
|
||||
See the full list of configuration flags in [configuration](#configuration) section.
|
||||
|
||||
If you run multiple `vmalert` services for the same datastore or AlertManager - do not forget
|
||||
to specify different `external.label` flags in order to define which `vmalert` generated rules or alerts.
|
||||
|
@ -89,7 +89,7 @@ name: <string>
|
|||
[ concurrency: <integer> | default = 1 ]
|
||||
|
||||
# Optional type for expressions inside the rules. Supported values: "graphite" and "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
# By default "prometheus" type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# Optional list of label filters applied to every rule's
|
||||
|
@ -117,14 +117,14 @@ expression and then act according to the Rule type.
|
|||
|
||||
There are two types of Rules:
|
||||
* [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) -
|
||||
Alerting rules allows to define alert conditions via `expr` field and to send notifications
|
||||
Alerting rules allow to define alert conditions via `expr` field and to send notifications to
|
||||
[Alertmanager](https://github.com/prometheus/alertmanager) if execution result is not empty.
|
||||
* [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) -
|
||||
Recording rules allows to define `expr` which result will be than backfilled to configured
|
||||
Recording rules allow to define `expr` which result will be then backfilled to configured
|
||||
`-remoteWrite.url`. Recording rules are used to precompute frequently needed or computationally
|
||||
expensive expressions and save their result as a new set of time series.
|
||||
|
||||
`vmalert` forbids to define duplicates - rules with the same combination of name, expression and labels
|
||||
`vmalert` forbids defining duplicates - rules with the same combination of name, expression and labels
|
||||
within one group.
|
||||
|
||||
#### Alerting rules
|
||||
|
@ -134,12 +134,8 @@ The syntax for alerting rule is the following:
|
|||
# The name of the alert. Must be a valid metric name.
|
||||
alert: <string>
|
||||
|
||||
# Optional type for the rule. Supported values: "graphite", "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# The expression to evaluate. The expression language depends on the type value.
|
||||
# By default PromQL/MetricsQL expression is used. If type="graphite", then the expression
|
||||
# By default PromQL/MetricsQL expression is used. If group.type="graphite", then the expression
|
||||
# must contain valid Graphite expression.
|
||||
expr: <string>
|
||||
|
||||
|
@ -170,12 +166,8 @@ The syntax for recording rules is following:
|
|||
# The name of the time series to output to. Must be a valid metric name.
|
||||
record: <string>
|
||||
|
||||
# Optional type for the rule. Supported values: "graphite", "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# The expression to evaluate. The expression language depends on the type value.
|
||||
# By default MetricsQL expression is used. If type="graphite", then the expression
|
||||
# By default MetricsQL expression is used. If group.type="graphite", then the expression
|
||||
# must contain valid Graphite expression.
|
||||
expr: <string>
|
||||
|
||||
|
@ -194,18 +186,18 @@ the process alerts state will be lost. To avoid this situation, `vmalert` should
|
|||
* `-remoteWrite.url` - URL to VictoriaMetrics (Single) or vminsert (Cluster). `vmalert` will persist alerts state
|
||||
into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
|
||||
These are regular time series and may be queried from VM just as any other time series.
|
||||
The state stored to the configured address on every rule evaluation.
|
||||
The state is stored to the configured address on every rule evaluation.
|
||||
* `-remoteRead.url` - URL to VictoriaMetrics (Single) or vmselect (Cluster). `vmalert` will try to restore alerts state
|
||||
from configured address by querying time series with name `ALERTS_FOR_STATE`.
|
||||
|
||||
Both flags are required for the proper state restoring. Restore process may fail if time series are missing
|
||||
Both flags are required for proper state restoring. Restore process may fail if time series are missing
|
||||
in configured `-remoteRead.url`, weren't updated in the last `1h` (controlled by `-remoteRead.lookback`)
|
||||
or received state doesn't match current `vmalert` rules configuration.
|
||||
|
||||
|
||||
### Multitenancy
|
||||
|
||||
There are the following approaches for alerting and recording rules across
|
||||
The following are the approaches for alerting and recording rules across
|
||||
[multiple tenants](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy):
|
||||
|
||||
* To run a separate `vmalert` instance per each tenant.
|
||||
|
@ -238,12 +230,15 @@ If `-clusterMode` is enabled, then `-datasource.url`, `-remoteRead.url` and `-re
|
|||
contain only the hostname without tenant id. For example: `-datasource.url=http://vmselect:8481`.
|
||||
`vmalert` automatically adds the specified tenant to urls per each recording rule in this case.
|
||||
|
||||
If `-clusterMode` is enabled and the `tenant` in a particular group is missing, then the tenant value
|
||||
is obtained from `-defaultTenant.prometheus` or `-defaultTenant.graphite` depending on the `type` of the group.
|
||||
|
||||
The enterprise version of vmalert is available in `vmutils-*-enterprise.tar.gz` files
|
||||
at [release page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) and in `*-enterprise`
|
||||
tags at [Docker Hub](https://hub.docker.com/r/victoriametrics/vmalert/tags).
|
||||
|
||||
|
||||
### WEB
|
||||
### Web
|
||||
|
||||
`vmalert` runs a web-server (`-httpListenAddr`) for serving metrics and alerts endpoints:
|
||||
* `http://<vmalert-addr>` - UI;
|
||||
|
@ -266,7 +261,7 @@ to set `-datasource.appendTypePrefix` flag to `true`, so vmalert can adjust URL
|
|||
## Rules backfilling
|
||||
|
||||
vmalert supports alerting and recording rules backfilling (aka `replay`). In replay mode vmalert
|
||||
can read the same rules configuration as normally, evaluate them on the given time range and backfill
|
||||
can read the same rules configuration as normal, evaluate them on the given time range and backfill
|
||||
results via remote write to the configured storage. vmalert supports any PromQL/MetricsQL compatible
|
||||
data source for backfilling.
|
||||
|
||||
|
@ -312,8 +307,8 @@ max range per request: 8h20m0s
|
|||
In `replay` mode all groups are executed sequentially one-by-one. Rules within the group are
|
||||
executed sequentially as well (`concurrency` setting is ignored). Vmalert sends rule's expression
|
||||
to [/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries) endpoint
|
||||
of the configured `-datasource.url`. Returned data then processed according to the rule type and
|
||||
backfilled to `-remoteWrite.url` via [Remote Write protocol](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations).
|
||||
of the configured `-datasource.url`. Returned data is then processed according to the rule type and
|
||||
backfilled to `-remoteWrite.url` via [remote Write protocol](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations).
|
||||
Vmalert respects `evaluationInterval` value set by flag or per-group during the replay.
|
||||
Vmalert automatically disables caching on VictoriaMetrics side by sending `nocache=1` param. It allows
|
||||
to prevent cache pollution and unwanted time range boundaries adjustment during backfilling.
|
||||
|
|
|
@ -121,7 +121,8 @@ users:
|
|||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
headers:
|
||||
- "X-Scope-OrgID: abc"```
|
||||
- "X-Scope-OrgID: abc"
|
||||
```
|
||||
|
||||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
This may be useful for passing secrets to the config.
|
||||
|
|
|
@ -13,8 +13,8 @@ Supported storage systems for backups:
|
|||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/docs/mimic/radosgw/s3/) or [Swift](https://www.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`
|
||||
|
||||
`vmbackup` supports incremental and full backups. Incremental backups created automatically if the destination path already contains data from the previous backup.
|
||||
Full backups can be sped up with `-origin` pointing to already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
||||
`vmbackup` supports incremental and full backups. Incremental backups are created automatically if the destination path already contains data from the previous backup.
|
||||
Full backups can be sped up with `-origin` pointing to an already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
||||
data between the existing backup and new backup. It saves time and costs on data transfer.
|
||||
|
||||
Backup process can be interrupted at any time. It is automatically resumed from the interruption point when restarting `vmbackup` with the same args.
|
||||
|
@ -38,7 +38,7 @@ vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-
|
|||
```
|
||||
|
||||
* `</path/to/victoria-metrics-data>` - path to VictoriaMetrics data pointed by `-storageDataPath` command-line flag in single-node VictoriaMetrics or in cluster `vmstorage`.
|
||||
There is no need to stop VictoriaMetrics for creating backups, since they are performed from immutable [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
||||
There is no need to stop VictoriaMetrics for creating backups since they are performed from immutable [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
||||
* `<local-snapshot>` is the snapshot to back up. See [how to create instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots). `vmbackup` can create the snapshot on itself if `-snapshot.createURL` command-line flag is set to an url for creating snapshots. In this case `-snapshotName` flag isn't needed.
|
||||
* `<bucket>` is an already existing name for [GCS bucket](https://cloud.google.com/storage/docs/creating-buckets).
|
||||
* `<path/to/new/backup>` is the destination path where new backup will be placed.
|
||||
|
@ -58,7 +58,7 @@ It saves time and network bandwidth costs by performing server-side copy for the
|
|||
|
||||
### Incremental backups
|
||||
|
||||
Incremental backups performed if `-dst` points to an already existing backup. In this case only new data uploaded to remote storage.
|
||||
Incremental backups are performed if `-dst` points to an already existing backup. In this case only new data is uploaded to remote storage.
|
||||
It saves time and network bandwidth costs when working with big backups:
|
||||
|
||||
```
|
||||
|
@ -91,7 +91,7 @@ Where `<daily-snapshot>` is the snapshot for the last day `<YYYYMMDD>`.
|
|||
This apporach saves network bandwidth costs on hourly backups (since they are incremental) and allows recovering data from either the last hour (`latest` backup)
|
||||
or from any day (`YYYYMMDD` backups). Note that hourly backup shouldn't run when creating daily backup.
|
||||
|
||||
Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs.
|
||||
Do not forget to remove old snapshots and backups when they are no longer needed in order to save storage costs.
|
||||
|
||||
See also [vmbackupmanager tool](https://docs.victoriametrics.com/vmbackupmanager.html) for automating smart backups.
|
||||
|
||||
|
@ -101,19 +101,19 @@ See also [vmbackupmanager tool](https://docs.victoriametrics.com/vmbackupmanager
|
|||
The backup algorithm is the following:
|
||||
|
||||
1. Collect information about files in the `-snapshotName`, in the `-dst` and in the `-origin`.
|
||||
2. Determine files in `-dst`, which are missing in `-snapshotName`, and delete them. These are usually small files, which are already merged into bigger files in the snapshot.
|
||||
3. Determine files from `-snapshotName`, which are missing in `-dst`. These are usually small new files and bigger merged files.
|
||||
4. Determine files from step 3, which exist in the `-origin`, and perform server-side copy of these files from `-origin` to `-dst`.
|
||||
2. Determine which files in `-dst` are missing in `-snapshotName`, and delete them. These are usually small files, which are already merged into bigger files in the snapshot.
|
||||
3. Determine which files in `-snapshotName` are missing in `-dst`. These are usually small new files and bigger merged files.
|
||||
4. Determine which files from step 3 exist in the `-origin`, and perform server-side copy of these files from `-origin` to `-dst`.
|
||||
These are usually the biggest and the oldest files, which are shared between backups.
|
||||
5. Upload the remaining files from step 3 from `-snapshotName` to `-dst`.
|
||||
|
||||
The algorithm splits source files into 100 MB chunks in the backup. Each chunk stored as a separate file in the backup.
|
||||
The algorithm splits source files into 1 GiB chunks in the backup. Each chunk is stored as a separate file in the backup.
|
||||
Such splitting minimizes the amounts of data to re-transfer after temporary errors.
|
||||
|
||||
`vmbackup` relies on [instant snapshot](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) properties:
|
||||
|
||||
- All the files in the snapshot are immutable.
|
||||
- Old files periodically merged into new files.
|
||||
- Old files are periodically merged into new files.
|
||||
- Smaller files have higher probability to be merged.
|
||||
- Consecutive snapshots share many identical files.
|
||||
|
||||
|
|
|
@ -102,11 +102,11 @@ The result on the GCS bucket
|
|||
|
||||
- The root folder
|
||||
|
||||

|
||||

|
||||
|
||||
- The latest folder
|
||||
|
||||

|
||||

|
||||
|
||||
## Backup Retention Policy
|
||||
|
||||
|
@ -121,7 +121,7 @@ Backup retention policy is controlled by:
|
|||
|
||||
Let’s assume we have a backup manager collecting daily backups for the past 10 days.
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
We enable backup retention policy for backup manager by using following configuration:
|
||||
|
@ -147,4 +147,4 @@ info app/vmbackupmanager/retention.go:106 daily backups to delete [daily/2
|
|||
|
||||
The result on the GCS bucket. We see only 3 daily backups:
|
||||
|
||||

|
||||

|
||||
|
|
23
go.mod
|
@ -7,13 +7,16 @@ require (
|
|||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.0
|
||||
github.com/VictoriaMetrics/metricsql v0.27.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.1
|
||||
github.com/VictoriaMetrics/metricsql v0.30.0
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.8
|
||||
github.com/aws/aws-sdk-go v1.41.19
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cheggaaa/pb/v3 v3.0.8
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/golang/snappy v0.0.4
|
||||
|
@ -28,14 +31,14 @@ require (
|
|||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/gozstd v1.14.1
|
||||
github.com/valyala/gozstd v1.14.2
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1
|
||||
golang.org/x/sys v0.0.0-20211020174200-9d6173849985
|
||||
google.golang.org/api v0.59.0
|
||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c // indirect
|
||||
google.golang.org/grpc v1.41.0 // indirect
|
||||
golang.org/x/net v0.0.0-20211105192438-b53810dc28af
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42
|
||||
google.golang.org/api v0.60.0
|
||||
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247 // indirect
|
||||
google.golang.org/grpc v1.42.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
|
|
64
go.sum
|
@ -106,10 +106,10 @@ github.com/VictoriaMetrics/fastcache v1.7.0/go.mod h1:n7Sl+ioh/HlWeYHLSIBIE8TcZF
|
|||
github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0=
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
|
||||
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metrics v1.18.0 h1:vov5NxDHRSXFbdiH4dYLYEjKLoAXXSQ7hcnG8TSD9JQ=
|
||||
github.com/VictoriaMetrics/metrics v1.18.0/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metricsql v0.27.0 h1:S6xWFKEyu+EbPS3tYr1cWeRza61L3e4tYcbBqMakuX0=
|
||||
github.com/VictoriaMetrics/metricsql v0.27.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metricsql v0.30.0 h1:MGLqMcnVU2PTg+eOhqxpizQni6YwsYPbfv7rsHs+kU0=
|
||||
github.com/VictoriaMetrics/metricsql v0.30.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
|
@ -154,8 +154,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
|||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.41.8 h1:j6imzwVyWQYuQxbkPmg2MdMmLB+Zw+U3Ewi59YF8Rwk=
|
||||
github.com/aws/aws-sdk-go v1.41.8/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.41.19 h1:9QR2WTNj5bFdrNjRY9SeoG+3hwQmKXGX16851vdh+N8=
|
||||
github.com/aws/aws-sdk-go v1.41.19/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
||||
|
@ -180,6 +180,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
|
|||
github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
|
@ -199,8 +201,14 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
|
@ -253,7 +261,11 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
|||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.0 h1:WVt4HEPbdRbRD/PKKPbPnIVavO6gk/h673jWyIJ016k=
|
||||
github.com/envoyproxy/go-control-plane v0.10.0/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1+f30UtwtXoFUPzE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -504,6 +516,7 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
|
@ -578,6 +591,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
|||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo=
|
||||
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
|
@ -650,6 +664,7 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH6
|
|||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
|
@ -666,6 +681,7 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
|
|||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
@ -812,6 +828,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
@ -908,6 +925,8 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
|
|||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
|
@ -959,8 +978,8 @@ github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002
|
|||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/gozstd v1.14.1 h1:xkPAeHe8U/w/ocS6PywjkH406lKdratZuxhb1UTgO/s=
|
||||
github.com/valyala/gozstd v1.14.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||
github.com/valyala/gozstd v1.14.2 h1:mtK5+UU774dXzuWtCqukhLyVOCM5NClDU3wUDazx90w=
|
||||
github.com/valyala/gozstd v1.14.2/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||
github.com/valyala/histogram v1.1.2/go.mod h1:CZAr6gK9dbD7hYx2s8WSPh0p5x5wETjC+2b3PJVtEdg=
|
||||
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
|
||||
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
|
||||
|
@ -1042,6 +1061,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
@ -1097,6 +1117,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1155,9 +1176,10 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211105192438-b53810dc28af h1:SMeNJG/vclJ5wyBBd4xupMsSJIHTd1coW9g7q6KOjmY=
|
||||
golang.org/x/net v0.0.0-20211105192438-b53810dc28af/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1173,8 +1195,9 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1276,14 +1299,15 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211020174200-9d6173849985 h1:LOlKVhfDyahgmqa97awczplwkjzNaELFg3zRIJ13RYo=
|
||||
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42 h1:G2DDmludOQZoWbpCr7OKDxnl478ZBGMcOhrv+ooX/Q4=
|
||||
golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1431,8 +1455,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
|
|||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
|
||||
google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE=
|
||||
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
|
||||
google.golang.org/api v0.60.0 h1:eq/zs5WPH4J9undYM9IP1O7dSr7Yh8Y0GtSCpzGzIUk=
|
||||
google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1504,10 +1528,10 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc
|
|||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c h1:FqrtZMB5Wr+/RecOM3uPJNPfWR8Upb5hAPnt7PU6i4k=
|
||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247 h1:ZONpjmFT5e+I/0/xE3XXbG5OIvX2hRYzol04MhKBl2E=
|
||||
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1540,8 +1564,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
|||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
|
|
@ -99,7 +99,9 @@ func TestPositiveFloatToDecimal(t *testing.T) {
|
|||
f(1<<55, 3602879701896396, 1)
|
||||
f(1<<62, 4611686018427387, 3)
|
||||
f(1<<63, 9223372036854775, 3)
|
||||
f(1<<64, 18446744073709548, 3)
|
||||
// Skip this test, since M1 returns 18446744073709551 instead of 18446744073709548
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1653
|
||||
// f(1<<64, 18446744073709548, 3)
|
||||
f(1<<65, 368934881474191, 5)
|
||||
f(1<<66, 737869762948382, 5)
|
||||
f(1<<67, 1475739525896764, 5)
|
||||
|
|
|
@ -40,8 +40,8 @@ var (
|
|||
"See https://www.robustperception.io/using-external-urls-and-proxies-with-prometheus")
|
||||
httpAuthUsername = flag.String("httpAuth.username", "", "Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password")
|
||||
httpAuthPassword = flag.String("httpAuth.password", "", "Password for HTTP Basic Auth. The authentication is disabled if -httpAuth.username is empty")
|
||||
metricsAuthKey = flag.String("metricsAuthKey", "", "Auth key for /metrics. It overrides httpAuth settings")
|
||||
pprofAuthKey = flag.String("pprofAuthKey", "", "Auth key for /debug/pprof. It overrides httpAuth settings")
|
||||
metricsAuthKey = flag.String("metricsAuthKey", "", "Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings")
|
||||
pprofAuthKey = flag.String("pprofAuthKey", "", "Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings")
|
||||
|
||||
disableResponseCompression = flag.Bool("http.disableResponseCompression", false, "Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth")
|
||||
maxGracefulShutdownDuration = flag.Duration("http.maxGracefulShutdownDuration", 7*time.Second, `The maximum duration for a graceful shutdown of the HTTP server. A highly loaded server may require increased value for a graceful shutdown`)
|
||||
|
|
|
@ -11,20 +11,12 @@ import (
|
|||
func logAllFlags() {
|
||||
Infof("build version: %s", buildinfo.Version)
|
||||
Infof("command line flags")
|
||||
isSetMap := make(map[string]bool)
|
||||
flag.Visit(func(f *flag.Flag) {
|
||||
isSetMap[f.Name] = true
|
||||
})
|
||||
flag.VisitAll(func(f *flag.Flag) {
|
||||
lname := strings.ToLower(f.Name)
|
||||
value := f.Value.String()
|
||||
if flagutil.IsSecretFlag(lname) {
|
||||
value = "secret"
|
||||
}
|
||||
isSet := "false"
|
||||
if isSetMap[f.Name] {
|
||||
isSet = "true"
|
||||
}
|
||||
Infof("flag %q=%q (is_set=%s)", f.Name, value, isSet)
|
||||
Infof("flag %q=%q", f.Name, value)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -17,6 +17,51 @@ import (
|
|||
"golang.org/x/oauth2/clientcredentials"
|
||||
)
|
||||
|
||||
// Secret represents a string secret such as password or auth token.
|
||||
//
|
||||
// It is marshaled to "<secret>" string in yaml.
|
||||
//
|
||||
// This is needed for hiding secret strings in /config page output.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1764
|
||||
type Secret struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// NewSecret returns new secret for s.
|
||||
func NewSecret(s string) *Secret {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
return &Secret{
|
||||
s: s,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalYAML implements yaml.Marshaler interface.
|
||||
//
|
||||
// It substitutes the secret with "<secret>" string.
|
||||
func (s *Secret) MarshalYAML() (interface{}, error) {
|
||||
return "<secret>", nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements yaml.Unmarshaler interface.
|
||||
func (s *Secret) UnmarshalYAML(f func(interface{}) error) error {
|
||||
var secret string
|
||||
if err := f(&secret); err != nil {
|
||||
return fmt.Errorf("cannot parse secret: %w", err)
|
||||
}
|
||||
s.s = secret
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the secret in plaintext.
|
||||
func (s *Secret) String() string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return s.s
|
||||
}
|
||||
|
||||
// TLSConfig represents TLS config.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config
|
||||
|
@ -32,23 +77,23 @@ type TLSConfig struct {
|
|||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/
|
||||
type Authorization struct {
|
||||
Type string `yaml:"type,omitempty"`
|
||||
Credentials string `yaml:"credentials,omitempty"`
|
||||
CredentialsFile string `yaml:"credentials_file,omitempty"`
|
||||
Type string `yaml:"type,omitempty"`
|
||||
Credentials *Secret `yaml:"credentials,omitempty"`
|
||||
CredentialsFile string `yaml:"credentials_file,omitempty"`
|
||||
}
|
||||
|
||||
// BasicAuthConfig represents basic auth config.
|
||||
type BasicAuthConfig struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
PasswordFile string `yaml:"password_file,omitempty"`
|
||||
Username string `yaml:"username"`
|
||||
Password *Secret `yaml:"password,omitempty"`
|
||||
PasswordFile string `yaml:"password_file,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPClientConfig represents http client config.
|
||||
type HTTPClientConfig struct {
|
||||
Authorization *Authorization `yaml:"authorization,omitempty"`
|
||||
BasicAuth *BasicAuthConfig `yaml:"basic_auth,omitempty"`
|
||||
BearerToken string `yaml:"bearer_token,omitempty"`
|
||||
BearerToken *Secret `yaml:"bearer_token,omitempty"`
|
||||
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
|
||||
OAuth2 *OAuth2Config `yaml:"oauth2,omitempty"`
|
||||
TLSConfig *TLSConfig `yaml:"tls_config,omitempty"`
|
||||
|
@ -58,7 +103,7 @@ type HTTPClientConfig struct {
|
|||
type ProxyClientConfig struct {
|
||||
Authorization *Authorization `yaml:"proxy_authorization,omitempty"`
|
||||
BasicAuth *BasicAuthConfig `yaml:"proxy_basic_auth,omitempty"`
|
||||
BearerToken string `yaml:"proxy_bearer_token,omitempty"`
|
||||
BearerToken *Secret `yaml:"proxy_bearer_token,omitempty"`
|
||||
BearerTokenFile string `yaml:"proxy_bearer_token_file,omitempty"`
|
||||
TLSConfig *TLSConfig `yaml:"proxy_tls_config,omitempty"`
|
||||
}
|
||||
|
@ -66,11 +111,11 @@ type ProxyClientConfig struct {
|
|||
// OAuth2Config represent OAuth2 configuration
|
||||
type OAuth2Config struct {
|
||||
ClientID string `yaml:"client_id"`
|
||||
ClientSecret string `yaml:"client_secret"`
|
||||
ClientSecretFile string `yaml:"client_secret_file"`
|
||||
Scopes []string `yaml:"scopes"`
|
||||
ClientSecret *Secret `yaml:"client_secret,omitempty"`
|
||||
ClientSecretFile string `yaml:"client_secret_file,omitempty"`
|
||||
Scopes []string `yaml:"scopes,omitempty"`
|
||||
TokenURL string `yaml:"token_url"`
|
||||
EndpointParams map[string]string `yaml:"endpoint_params"`
|
||||
EndpointParams map[string]string `yaml:"endpoint_params,omitempty"`
|
||||
}
|
||||
|
||||
// String returns string representation of o.
|
||||
|
@ -83,10 +128,10 @@ func (o *OAuth2Config) validate() error {
|
|||
if o.ClientID == "" {
|
||||
return fmt.Errorf("client_id cannot be empty")
|
||||
}
|
||||
if o.ClientSecret == "" && o.ClientSecretFile == "" {
|
||||
if o.ClientSecret == nil && o.ClientSecretFile == "" {
|
||||
return fmt.Errorf("ClientSecret or ClientSecretFile must be set")
|
||||
}
|
||||
if o.ClientSecret != "" && o.ClientSecretFile != "" {
|
||||
if o.ClientSecret != nil && o.ClientSecretFile != "" {
|
||||
return fmt.Errorf("ClientSecret and ClientSecretFile cannot be set simultaneously")
|
||||
}
|
||||
if o.TokenURL == "" {
|
||||
|
@ -109,7 +154,7 @@ func newOAuth2ConfigInternal(baseDir string, o *OAuth2Config) (*oauth2ConfigInte
|
|||
oi := &oauth2ConfigInternal{
|
||||
cfg: &clientcredentials.Config{
|
||||
ClientID: o.ClientID,
|
||||
ClientSecret: o.ClientSecret,
|
||||
ClientSecret: o.ClientSecret.String(),
|
||||
TokenURL: o.TokenURL,
|
||||
Scopes: o.Scopes,
|
||||
EndpointParams: urlValuesFromMap(o.EndpointParams),
|
||||
|
@ -238,12 +283,12 @@ func (ac *Config) NewTLSConfig() *tls.Config {
|
|||
|
||||
// NewConfig creates auth config for the given hcc.
|
||||
func (hcc *HTTPClientConfig) NewConfig(baseDir string) (*Config, error) {
|
||||
return NewConfig(baseDir, hcc.Authorization, hcc.BasicAuth, hcc.BearerToken, hcc.BearerTokenFile, hcc.OAuth2, hcc.TLSConfig)
|
||||
return NewConfig(baseDir, hcc.Authorization, hcc.BasicAuth, hcc.BearerToken.String(), hcc.BearerTokenFile, hcc.OAuth2, hcc.TLSConfig)
|
||||
}
|
||||
|
||||
// NewConfig creates auth config for the given pcc.
|
||||
func (pcc *ProxyClientConfig) NewConfig(baseDir string) (*Config, error) {
|
||||
return NewConfig(baseDir, pcc.Authorization, pcc.BasicAuth, pcc.BearerToken, pcc.BearerTokenFile, nil, pcc.TLSConfig)
|
||||
return NewConfig(baseDir, pcc.Authorization, pcc.BasicAuth, pcc.BearerToken.String(), pcc.BearerTokenFile, nil, pcc.TLSConfig)
|
||||
}
|
||||
|
||||
// NewConfig creates auth config from the given args.
|
||||
|
@ -256,7 +301,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
azType = az.Type
|
||||
}
|
||||
if az.CredentialsFile != "" {
|
||||
if az.Credentials != "" {
|
||||
if az.Credentials != nil {
|
||||
return nil, fmt.Errorf("both `credentials`=%q and `credentials_file`=%q are set", az.Credentials, az.CredentialsFile)
|
||||
}
|
||||
filePath := getFilepath(baseDir, az.CredentialsFile)
|
||||
|
@ -271,7 +316,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
authDigest = fmt.Sprintf("custom(type=%q, credsFile=%q)", az.Type, filePath)
|
||||
} else {
|
||||
getAuthHeader = func() string {
|
||||
return azType + " " + az.Credentials
|
||||
return azType + " " + az.Credentials.String()
|
||||
}
|
||||
authDigest = fmt.Sprintf("custom(type=%q, creds=%q)", az.Type, az.Credentials)
|
||||
}
|
||||
|
@ -284,7 +329,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
return nil, fmt.Errorf("missing `username` in `basic_auth` section")
|
||||
}
|
||||
if basicAuth.PasswordFile != "" {
|
||||
if basicAuth.Password != "" {
|
||||
if basicAuth.Password != nil {
|
||||
return nil, fmt.Errorf("both `password`=%q and `password_file`=%q are set in `basic_auth` section", basicAuth.Password, basicAuth.PasswordFile)
|
||||
}
|
||||
filePath := getFilepath(baseDir, basicAuth.PasswordFile)
|
||||
|
@ -303,7 +348,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
} else {
|
||||
getAuthHeader = func() string {
|
||||
// See https://en.wikipedia.org/wiki/Basic_access_authentication
|
||||
token := basicAuth.Username + ":" + basicAuth.Password
|
||||
token := basicAuth.Username + ":" + basicAuth.Password.String()
|
||||
token64 := base64.StdEncoding.EncodeToString([]byte(token))
|
||||
return "Basic " + token64
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func TestNewConfig(t *testing.T) {
|
|||
args: args{
|
||||
oauth: &OAuth2Config{
|
||||
ClientID: "some-id",
|
||||
ClientSecret: "some-secret",
|
||||
ClientSecret: NewSecret("some-secret"),
|
||||
TokenURL: "http://localhost:8511",
|
||||
},
|
||||
},
|
||||
|
@ -49,7 +49,7 @@ func TestNewConfig(t *testing.T) {
|
|||
args: args{
|
||||
oauth: &OAuth2Config{
|
||||
ClientID: "some-id",
|
||||
ClientSecret: "some-secret",
|
||||
ClientSecret: NewSecret("some-secret"),
|
||||
ClientSecretFile: "testdata/test_secretfile.txt",
|
||||
TokenURL: "http://localhost:8511",
|
||||
},
|
||||
|
@ -61,7 +61,7 @@ func TestNewConfig(t *testing.T) {
|
|||
args: args{
|
||||
basicAuth: &BasicAuthConfig{
|
||||
Username: "user",
|
||||
Password: "password",
|
||||
Password: NewSecret("password"),
|
||||
},
|
||||
},
|
||||
expectHeader: "Basic dXNlcjpwYXNzd29yZA==",
|
||||
|
@ -81,7 +81,7 @@ func TestNewConfig(t *testing.T) {
|
|||
args: args{
|
||||
az: &Authorization{
|
||||
Type: "Bearer",
|
||||
Credentials: "Value",
|
||||
Credentials: NewSecret("Value"),
|
||||
},
|
||||
},
|
||||
expectHeader: "Bearer Value",
|
||||
|
|
|
@ -23,7 +23,8 @@ import (
|
|||
var (
|
||||
maxScrapeSize = flagutil.NewBytes("promscrape.maxScrapeSize", 16*1024*1024, "The maximum size of scrape response in bytes to process from Prometheus targets. "+
|
||||
"Bigger responses are rejected")
|
||||
disableCompression = flag.Bool("promscrape.disableCompression", false, "Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. "+
|
||||
maxResponseHeadersSize = flagutil.NewBytes("promscrape.maxResponseHeadersSize", 4096, "The maximum size of http response headers from Prometheus scrape targets")
|
||||
disableCompression = flag.Bool("promscrape.disableCompression", false, "Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. "+
|
||||
"This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. "+
|
||||
"It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control")
|
||||
disableKeepAlive = flag.Bool("promscrape.disableKeepAlive", false, "Whether to disable HTTP keep-alive connections when scraping all the targets. "+
|
||||
|
@ -77,10 +78,11 @@ func newClient(sw *ScrapeWork) *client {
|
|||
if isTLS {
|
||||
tlsCfg = sw.ProxyAuthConfig.NewTLSConfig()
|
||||
}
|
||||
proxyURLOrig := proxyURL
|
||||
getProxyAuthHeader = func() string {
|
||||
return proxyURL.GetAuthHeader(sw.ProxyAuthConfig)
|
||||
return proxyURLOrig.GetAuthHeader(sw.ProxyAuthConfig)
|
||||
}
|
||||
proxyURL = proxy.URL{}
|
||||
proxyURL = &proxy.URL{}
|
||||
}
|
||||
if !strings.Contains(host, ":") {
|
||||
if !isTLS {
|
||||
|
@ -104,22 +106,24 @@ func newClient(sw *ScrapeWork) *client {
|
|||
WriteTimeout: 10 * time.Second,
|
||||
MaxResponseBodySize: maxScrapeSize.N,
|
||||
MaxIdempotentRequestAttempts: 1,
|
||||
ReadBufferSize: maxResponseHeadersSize.N,
|
||||
}
|
||||
var sc *http.Client
|
||||
var proxyURLFunc func(*http.Request) (*url.URL, error)
|
||||
if proxyURL := sw.ProxyURL.URL(); proxyURL != nil {
|
||||
proxyURLFunc = http.ProxyURL(proxyURL)
|
||||
if pu := sw.ProxyURL.URL(); pu != nil {
|
||||
proxyURLFunc = http.ProxyURL(pu)
|
||||
}
|
||||
sc = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
Proxy: proxyURLFunc,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
IdleConnTimeout: 2 * sw.ScrapeInterval,
|
||||
DisableCompression: *disableCompression || sw.DisableCompression,
|
||||
DisableKeepAlives: *disableKeepAlive || sw.DisableKeepAlive,
|
||||
DialContext: statStdDial,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
TLSClientConfig: tlsCfg,
|
||||
Proxy: proxyURLFunc,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
IdleConnTimeout: 2 * sw.ScrapeInterval,
|
||||
DisableCompression: *disableCompression || sw.DisableCompression,
|
||||
DisableKeepAlives: *disableKeepAlive || sw.DisableKeepAlive,
|
||||
DialContext: statStdDial,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
MaxResponseHeaderBytes: int64(maxResponseHeadersSize.N),
|
||||
|
||||
// Set timeout for receiving the first response byte,
|
||||
// since the duration for reading the full response can be much bigger because of stream parsing.
|
||||
|
|
|
@ -125,7 +125,7 @@ type ScrapeConfig struct {
|
|||
Scheme string `yaml:"scheme,omitempty"`
|
||||
Params map[string][]string `yaml:"params,omitempty"`
|
||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||
ProxyURL proxy.URL `yaml:"proxy_url,omitempty"`
|
||||
ProxyURL *proxy.URL `yaml:"proxy_url,omitempty"`
|
||||
RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"`
|
||||
MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs,omitempty"`
|
||||
SampleLimit int `yaml:"sample_limit,omitempty"`
|
||||
|
@ -240,27 +240,29 @@ func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
|||
}
|
||||
|
||||
// loadConfig loads Prometheus config from the given path.
|
||||
func loadConfig(path string) (*Config, error) {
|
||||
func loadConfig(path string) (*Config, []byte, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err)
|
||||
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err)
|
||||
}
|
||||
var c Config
|
||||
if err := c.parseData(data, path); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse Prometheus config from %q: %w", path, err)
|
||||
dataNew, err := c.parseData(data, path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %w", path, err)
|
||||
}
|
||||
return &c, nil
|
||||
return &c, dataNew, nil
|
||||
}
|
||||
|
||||
func loadScrapeConfigFiles(baseDir string, scrapeConfigFiles []string) ([]ScrapeConfig, error) {
|
||||
func loadScrapeConfigFiles(baseDir string, scrapeConfigFiles []string) ([]ScrapeConfig, []byte, error) {
|
||||
var scrapeConfigs []ScrapeConfig
|
||||
var scsData []byte
|
||||
for _, filePath := range scrapeConfigFiles {
|
||||
filePath := getFilepath(baseDir, filePath)
|
||||
paths := []string{filePath}
|
||||
if strings.Contains(filePath, "*") {
|
||||
ps, err := filepath.Glob(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pattern %q in `scrape_config_files`: %w", filePath, err)
|
||||
return nil, nil, fmt.Errorf("invalid pattern %q: %w", filePath, err)
|
||||
}
|
||||
sort.Strings(ps)
|
||||
paths = ps
|
||||
|
@ -268,17 +270,19 @@ func loadScrapeConfigFiles(baseDir string, scrapeConfigFiles []string) ([]Scrape
|
|||
for _, path := range paths {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load %q from `scrape_config_files`: %w", filePath, err)
|
||||
return nil, nil, fmt.Errorf("cannot load %q: %w", path, err)
|
||||
}
|
||||
data = envtemplate.Replace(data)
|
||||
var scs []ScrapeConfig
|
||||
if err = yaml.UnmarshalStrict(data, &scs); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q from `scrape_config_files`: %w", filePath, err)
|
||||
return nil, nil, fmt.Errorf("cannot parse %q: %w", path, err)
|
||||
}
|
||||
scrapeConfigs = append(scrapeConfigs, scs...)
|
||||
scsData = append(scsData, '\n')
|
||||
scsData = append(scsData, data...)
|
||||
}
|
||||
}
|
||||
return scrapeConfigs, nil
|
||||
return scrapeConfigs, scsData, nil
|
||||
}
|
||||
|
||||
// IsDryRun returns true if -promscrape.config.dryRun command-line flag is set
|
||||
|
@ -286,30 +290,31 @@ func IsDryRun() bool {
|
|||
return *dryRun
|
||||
}
|
||||
|
||||
func (cfg *Config) parseData(data []byte, path string) error {
|
||||
func (cfg *Config) parseData(data []byte, path string) ([]byte, error) {
|
||||
if err := unmarshalMaybeStrict(data, cfg); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal data: %w", err)
|
||||
return nil, fmt.Errorf("cannot unmarshal data: %w", err)
|
||||
}
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain abs path for %q: %w", path, err)
|
||||
return nil, fmt.Errorf("cannot obtain abs path for %q: %w", path, err)
|
||||
}
|
||||
cfg.baseDir = filepath.Dir(absPath)
|
||||
|
||||
// Load cfg.ScrapeConfigFiles into c.ScrapeConfigs
|
||||
scs, err := loadScrapeConfigFiles(cfg.baseDir, cfg.ScrapeConfigFiles)
|
||||
scs, scsData, err := loadScrapeConfigFiles(cfg.baseDir, cfg.ScrapeConfigFiles)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load `scrape_config_files` from %q: %w", path, err)
|
||||
return nil, fmt.Errorf("cannot load `scrape_config_files` from %q: %w", path, err)
|
||||
}
|
||||
cfg.ScrapeConfigFiles = nil
|
||||
cfg.ScrapeConfigs = append(cfg.ScrapeConfigs, scs...)
|
||||
dataNew := append(data, scsData...)
|
||||
|
||||
// Check that all the scrape configs have unique JobName
|
||||
m := make(map[string]struct{}, len(cfg.ScrapeConfigs))
|
||||
for i := range cfg.ScrapeConfigs {
|
||||
jobName := cfg.ScrapeConfigs[i].JobName
|
||||
if _, ok := m[jobName]; ok {
|
||||
return fmt.Errorf("duplicate `job_name` in `scrape_configs` loaded from %q: %q", path, jobName)
|
||||
return nil, fmt.Errorf("duplicate `job_name` in `scrape_configs` loaded from %q: %q", path, jobName)
|
||||
}
|
||||
m[jobName] = struct{}{}
|
||||
}
|
||||
|
@ -319,11 +324,11 @@ func (cfg *Config) parseData(data []byte, path string) error {
|
|||
sc := &cfg.ScrapeConfigs[i]
|
||||
swc, err := getScrapeWorkConfig(sc, cfg.baseDir, &cfg.Global)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse `scrape_config` #%d: %w", i+1, err)
|
||||
return nil, fmt.Errorf("cannot parse `scrape_config` #%d: %w", i+1, err)
|
||||
}
|
||||
sc.swc = swc
|
||||
}
|
||||
return nil
|
||||
return dataNew, nil
|
||||
}
|
||||
|
||||
func unmarshalMaybeStrict(data []byte, dst interface{}) error {
|
||||
|
@ -796,7 +801,7 @@ type scrapeWorkConfig struct {
|
|||
metricsPath string
|
||||
scheme string
|
||||
params map[string][]string
|
||||
proxyURL proxy.URL
|
||||
proxyURL *proxy.URL
|
||||
proxyAuthConfig *promauth.Config
|
||||
authConfig *promauth.Config
|
||||
honorLabels bool
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package promscrape
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
@ -67,39 +68,51 @@ func TestLoadStaticConfigs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLoadConfig(t *testing.T) {
|
||||
cfg, err := loadConfig("testdata/prometheus.yml")
|
||||
cfg, data, err := loadConfig("testdata/prometheus.yml")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if cfg == nil {
|
||||
t.Fatalf("expecting non-nil config")
|
||||
}
|
||||
if data == nil {
|
||||
t.Fatalf("expecting non-nil data")
|
||||
}
|
||||
|
||||
cfg, err = loadConfig("testdata/prometheus-with-scrape-config-files.yml")
|
||||
cfg, data, err = loadConfig("testdata/prometheus-with-scrape-config-files.yml")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if cfg == nil {
|
||||
t.Fatalf("expecting non-nil config")
|
||||
}
|
||||
if data == nil {
|
||||
t.Fatalf("expecting non-nil data")
|
||||
}
|
||||
|
||||
// Try loading non-existing file
|
||||
cfg, err = loadConfig("testdata/non-existing-file")
|
||||
cfg, data, err = loadConfig("testdata/non-existing-file")
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if cfg != nil {
|
||||
t.Fatalf("unexpected non-nil config: %#v", cfg)
|
||||
}
|
||||
if data != nil {
|
||||
t.Fatalf("unexpected data wit length=%d: %q", len(data), data)
|
||||
}
|
||||
|
||||
// Try loading invalid file
|
||||
cfg, err = loadConfig("testdata/file_sd_1.yml")
|
||||
cfg, data, err = loadConfig("testdata/file_sd_1.yml")
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if cfg != nil {
|
||||
t.Fatalf("unexpected non-nil config: %#v", cfg)
|
||||
}
|
||||
if data != nil {
|
||||
t.Fatalf("unexpected data wit length=%d: %q", len(data), data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlackboxExporter(t *testing.T) {
|
||||
|
@ -122,9 +135,13 @@ scrape_configs:
|
|||
replacement: black:9115 # The blackbox exporter's real hostname:port.%
|
||||
`
|
||||
var cfg Config
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
allData, err := cfg.parseData([]byte(data), "sss")
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parase data: %s", err)
|
||||
}
|
||||
if string(allData) != data {
|
||||
t.Fatalf("invalid data returned from parseData;\ngot\n%s\nwant\n%s", allData, data)
|
||||
}
|
||||
sws := cfg.getStaticScrapeWork()
|
||||
resetNonEssentialFields(sws)
|
||||
swsExpected := []*ScrapeWork{{
|
||||
|
@ -187,9 +204,13 @@ scrape_configs:
|
|||
- files: [testdata/file_sd.json]
|
||||
`
|
||||
var cfg Config
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
allData, err := cfg.parseData([]byte(data), "sss")
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parase data: %s", err)
|
||||
}
|
||||
if string(allData) != data {
|
||||
t.Fatalf("invalid data returned from parseData;\ngot\n%s\nwant\n%s", allData, data)
|
||||
}
|
||||
sws := cfg.getFileSDScrapeWork(nil)
|
||||
if !equalStaticConfigForScrapeWorks(sws, sws) {
|
||||
t.Fatalf("unexpected non-equal static configs;\nsws:\n%#v", sws)
|
||||
|
@ -203,9 +224,13 @@ scrape_configs:
|
|||
- files: [testdata/file_sd_1.yml]
|
||||
`
|
||||
var cfgNew Config
|
||||
if err := cfgNew.parseData([]byte(dataNew), "sss"); err != nil {
|
||||
allData, err = cfgNew.parseData([]byte(dataNew), "sss")
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
if string(allData) != dataNew {
|
||||
t.Fatalf("invalid data returned from parseData;\ngot\n%s\nwant\n%s", allData, dataNew)
|
||||
}
|
||||
swsNew := cfgNew.getFileSDScrapeWork(sws)
|
||||
if equalStaticConfigForScrapeWorks(swsNew, sws) {
|
||||
t.Fatalf("unexpected equal static configs;\nswsNew:\n%#v\nsws:\n%#v", swsNew, sws)
|
||||
|
@ -218,9 +243,13 @@ scrape_configs:
|
|||
file_sd_configs:
|
||||
- files: [testdata/prometheus.yml]
|
||||
`
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
allData, err = cfg.parseData([]byte(data), "sss")
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
if string(allData) != data {
|
||||
t.Fatalf("invalid data returned from parseData;\ngot\n%s\nwant\n%s", allData, data)
|
||||
}
|
||||
sws = cfg.getFileSDScrapeWork(swsNew)
|
||||
if len(sws) != 0 {
|
||||
t.Fatalf("unexpected non-empty sws:\n%#v", sws)
|
||||
|
@ -233,9 +262,13 @@ scrape_configs:
|
|||
file_sd_configs:
|
||||
- files: [testdata/empty_target_file_sd.yml]
|
||||
`
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
allData, err = cfg.parseData([]byte(data), "sss")
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
if string(allData) != data {
|
||||
t.Fatalf("invalid data returned from parseData;\ngot\n%s\nwant\n%s", allData, data)
|
||||
}
|
||||
sws = cfg.getFileSDScrapeWork(swsNew)
|
||||
if len(sws) != 0 {
|
||||
t.Fatalf("unexpected non-empty sws:\n%#v", sws)
|
||||
|
@ -244,17 +277,25 @@ scrape_configs:
|
|||
|
||||
func getFileSDScrapeWork(data []byte, path string) ([]*ScrapeWork, error) {
|
||||
var cfg Config
|
||||
if err := cfg.parseData(data, path); err != nil {
|
||||
allData, err := cfg.parseData(data, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse data: %w", err)
|
||||
}
|
||||
if !bytes.Equal(allData, data) {
|
||||
return nil, fmt.Errorf("invalid data returned from parseData;\ngot\n%s\nwant\n%s", allData, data)
|
||||
}
|
||||
return cfg.getFileSDScrapeWork(nil), nil
|
||||
}
|
||||
|
||||
func getStaticScrapeWork(data []byte, path string) ([]*ScrapeWork, error) {
|
||||
var cfg Config
|
||||
if err := cfg.parseData(data, path); err != nil {
|
||||
allData, err := cfg.parseData(data, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse data: %w", err)
|
||||
}
|
||||
if !bytes.Equal(allData, data) {
|
||||
return nil, fmt.Errorf("invalid data returned from parseData;\ngot\n%s\nwant\n%s", allData, data)
|
||||
}
|
||||
return cfg.getStaticScrapeWork(), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -44,10 +44,10 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
return nil, err
|
||||
}
|
||||
if token != "" {
|
||||
if hcc.BearerToken != "" {
|
||||
if hcc.BearerToken != nil {
|
||||
return nil, fmt.Errorf("cannot set both token and bearer_token configs")
|
||||
}
|
||||
hcc.BearerToken = token
|
||||
hcc.BearerToken = promauth.NewSecret(token)
|
||||
}
|
||||
if len(sdc.Username) > 0 {
|
||||
if hcc.BasicAuth != nil {
|
||||
|
@ -104,9 +104,9 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
return cfg, nil
|
||||
}
|
||||
|
||||
func getToken(token *string) (string, error) {
|
||||
func getToken(token *promauth.Secret) (string, error) {
|
||||
if token != nil {
|
||||
return *token, nil
|
||||
return token.String(), nil
|
||||
}
|
||||
if tokenFile := os.Getenv("CONSUL_HTTP_TOKEN_FILE"); tokenFile != "" {
|
||||
data, err := ioutil.ReadFile(tokenFile)
|
||||
|
|
|
@ -11,17 +11,17 @@ import (
|
|||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
|
||||
type SDConfig struct {
|
||||
Server string `yaml:"server,omitempty"`
|
||||
Token *string `yaml:"token"`
|
||||
Datacenter string `yaml:"datacenter"`
|
||||
Server string `yaml:"server,omitempty"`
|
||||
Token *promauth.Secret `yaml:"token"`
|
||||
Datacenter string `yaml:"datacenter"`
|
||||
// Namespace only supported at enterprise consul.
|
||||
// https://www.consul.io/docs/enterprise/namespaces
|
||||
Namespace string `yaml:"namespace,omitempty"`
|
||||
Scheme string `yaml:"scheme,omitempty"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
Password *promauth.Secret `yaml:"password"`
|
||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||
ProxyURL proxy.URL `yaml:"proxy_url,omitempty"`
|
||||
ProxyURL *proxy.URL `yaml:"proxy_url,omitempty"`
|
||||
ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"`
|
||||
Services []string `yaml:"services,omitempty"`
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
|
|
|
@ -24,7 +24,7 @@ var SDCheckInterval = flag.Duration("promscrape.digitaloceanSDCheckInterval", ti
|
|||
type SDConfig struct {
|
||||
Server string `yaml:"server,omitempty"`
|
||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||
ProxyURL proxy.URL `yaml:"proxy_url,omitempty"`
|
||||
ProxyURL *proxy.URL `yaml:"proxy_url,omitempty"`
|
||||
ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"`
|
||||
Port int `yaml:"port,omitempty"`
|
||||
}
|
||||
|
|