Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2022-01-07 12:36:24 +02:00
commit 3f5b1084eb
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
170 changed files with 2911 additions and 4175 deletions

View file

@ -4,3 +4,4 @@ gocache-for-docker
victoria-metrics-data victoria-metrics-data
vmstorage-data vmstorage-data
vmselect-cache vmselect-cache
.vscode

1
.gitignore vendored
View file

@ -4,6 +4,7 @@
*.pprof *.pprof
/bin /bin
.idea .idea
.vscode
*.test *.test
*.swp *.swp
/gocache-for-docker /gocache-for-docker

View file

@ -175,7 +175,7 @@
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
Copyright 2019-2021 VictoriaMetrics, Inc. Copyright 2019-2022 VictoriaMetrics, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View file

@ -19,7 +19,7 @@ Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-set
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html). Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). [Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/products/enterprise/). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
## Prominent features ## Prominent features
@ -56,7 +56,7 @@ VictoriaMetrics has the following prominent features:
* [Native binary format](#how-to-import-data-in-native-format). * [Native binary format](#how-to-import-data-in-native-format).
* It supports metrics' relabeling. See [these docs](#relabeling) for details. * It supports metrics' relabeling. See [these docs](#relabeling) for details.
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter). * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html). * It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/products/enterprise/).
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). * It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html). See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
@ -570,7 +570,7 @@ VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series w
### Graphite Render API usage ### Graphite Render API usage
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset [VictoriaMetrics Enterprise](https://victoriametrics.com/products/enterprise/) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset
at `/render` endpoint, which is used by [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/). at `/render` endpoint, which is used by [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/).
When configuring Graphite datasource in Grafana, the `Storage-Step` http request header must be set to a step between Graphite data points stored in VictoriaMetrics. For example, `Storage-Step: 10s` would mean 10 seconds distance between Graphite datapoints stored in VictoriaMetrics. When configuring Graphite datasource in Grafana, the `Storage-Step` http request header must be set to a step between Graphite data points stored in VictoriaMetrics. For example, `Storage-Step: 10s` would mean 10 seconds distance between Graphite datapoints stored in VictoriaMetrics.
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
@ -632,7 +632,7 @@ to your needs or when testing bugfixes.
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics` binary and puts it into the `bin` folder. It builds `victoria-metrics` binary and puts it into the `bin` folder.
@ -648,7 +648,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
### Development ARM build ### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder. It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder.
@ -662,7 +662,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies. `Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder. It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
@ -1171,13 +1171,13 @@ See [these docs](https://docs.victoriametrics.com/guides/guide-vmcluster-multipl
## Downsampling ## Downsampling
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports multi-level downsampling with `-downsampling.period` command-line flag. For example: [VictoriaMetrics Enterprise](https://victoriametrics.com/products/enterprise/) supports multi-level downsampling with `-downsampling.period` command-line flag. For example:
* `-downsampling.period=30d:5m` instructs VictoriaMetrics to [deduplicate](#deduplication) samples older than 30 days with 5 minutes interval. * `-downsampling.period=30d:5m` instructs VictoriaMetrics to [deduplicate](#deduplication) samples older than 30 days with 5 minutes interval.
* `-downsampling.period=30d:5m,180d:1h` instructs VictoriaMetrics to deduplicate samples older than 30 days with 5 minutes interval and to deduplicate samples older than 180 days with 1 hour interval. * `-downsampling.period=30d:5m,180d:1h` instructs VictoriaMetrics to deduplicate samples older than 30 days with 5 minutes interval and to deduplicate samples older than 180 days with 1 hour interval.
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).

View file

@ -301,7 +301,6 @@ You can read more about relabeling in the following articles:
* If the metric disappears from the list of scraped metrics, then stale marker is sent to this particular metric. * If the metric disappears from the list of scraped metrics, then stale marker is sent to this particular metric.
* If the scrape target becomes temporarily unavailable, then stale markers are sent for all the metrics scraped from this target. * If the scrape target becomes temporarily unavailable, then stale markers are sent for all the metrics scraped from this target.
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target. * If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
* Stale markers are sent for all the scraped metrics on graceful shutdown of `vmagent`.
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series) for details. Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series) for details.
@ -521,7 +520,7 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
## Kafka integration ## Kafka integration
[Enterprise version](https://victoriametrics.com/enterprise.html) of `vmagent` can read and write metrics from / to Kafka: [Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` can read and write metrics from / to Kafka:
* [Reading metrics from Kafka](#reading-metrics-from-kafka) * [Reading metrics from Kafka](#reading-metrics-from-kafka)
* [Writing metrics to Kafka](#writing-metrics-to-kafka) * [Writing metrics to Kafka](#writing-metrics-to-kafka)
@ -531,7 +530,7 @@ The enterprise version of vmagent is available for evaluation at [releases](http
### Reading metrics from Kafka ### Reading metrics from Kafka
[Enterprise version](https://victoriametrics.com/enterprise.html) of `vmagent` can read metrics in various formats from Kafka messages. These formats can be configured with `-kafka.consumer.topic.defaultFormat` or `-kafka.consumer.topic.format` command-line options. The following formats are supported: [Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` can read metrics in various formats from Kafka messages. These formats can be configured with `-kafka.consumer.topic.defaultFormat` or `-kafka.consumer.topic.format` command-line options. The following formats are supported:
* `promremotewrite` - [Prometheus remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). Messages in this format can be sent by vmagent - see [these docs](#writing-metrics-to-kafka). * `promremotewrite` - [Prometheus remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). Messages in this format can be sent by vmagent - see [these docs](#writing-metrics-to-kafka).
* `influx` - [InfluxDB line protocol format](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/). * `influx` - [InfluxDB line protocol format](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/).
@ -567,7 +566,7 @@ data_format = "influx"
#### Command-line flags for Kafka consumer #### Command-line flags for Kafka consumer
These command-line flags are available only in [enterprise](https://victoriametrics.com/enterprise.html) version of `vmagent`, which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page (see `vmutils-*-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix. These command-line flags are available only in [enterprise](https://victoriametrics.com/products/enterprise/) version of `vmagent`, which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page (see `vmutils-*-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
``` ```
-kafka.consumer.topic array -kafka.consumer.topic array
@ -600,7 +599,7 @@ These command-line flags are available only in [enterprise](https://victoriametr
### Writing metrics to Kafka ### Writing metrics to Kafka
[Enterprise version](https://victoriametrics.com/enterprise.html) of `vmagent` writes data to Kafka with `at-least-once` semantics if `-remoteWrite.url` contains e.g. Kafka url. For example, if `vmagent` is started with `-remoteWrite.url=kafka://localhost:9092/?topic=prom-rw`, then it would send Prometheus remote_write messages to Kafka bootstrap server at `localhost:9092` with the topic `prom-rw`. These messages can be read later from Kafka by another `vmagent` - see [these docs](#reading-metrics-from-kafka) for details. [Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` writes data to Kafka with `at-least-once` semantics if `-remoteWrite.url` contains e.g. Kafka url. For example, if `vmagent` is started with `-remoteWrite.url=kafka://localhost:9092/?topic=prom-rw`, then it would send Prometheus remote_write messages to Kafka bootstrap server at `localhost:9092` with the topic `prom-rw`. These messages can be read later from Kafka by another `vmagent` - see [these docs](#reading-metrics-from-kafka) for details.
Additional Kafka options can be passed as query params to `-remoteWrite.url`. For instance, `kafka://localhost:9092/?topic=prom-rw&client.id=my-favorite-id` sets `client.id` Kafka option to `my-favorite-id`. The full list of Kafka options is available [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). Additional Kafka options can be passed as query params to `-remoteWrite.url`. For instance, `kafka://localhost:9092/?topic=prom-rw&client.id=my-favorite-id` sets `client.id` Kafka option to `my-favorite-id`. The full list of Kafka options is available [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
@ -629,7 +628,7 @@ We recommend using [binary releases](https://github.com/VictoriaMetrics/Victoria
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds the `vmagent` binary and puts it into the `bin` folder. It builds the `vmagent` binary and puts it into the `bin` folder.
@ -658,7 +657,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
### Development ARM build ### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics) 2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder. It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder.

View file

@ -295,6 +295,17 @@ again:
} }
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="%d"}`, c.sanitizedURL, statusCode)).Inc() metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="%d"}`, c.sanitizedURL, statusCode)).Inc()
if statusCode == 409 || statusCode == 400 { if statusCode == 409 || statusCode == 400 {
body, err := ioutil.ReadAll(resp.Body)
_ = resp.Body.Close()
l := logger.WithThrottler("remoteWriteRejected", 5*time.Second)
if err != nil {
l.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; "+
"failed to read response body: %s",
len(block), c.sanitizedURL, statusCode, err)
} else {
l.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; response body: %s",
len(block), c.sanitizedURL, statusCode, string(body))
}
// Just drop block on 409 and 400 status codes like Prometheus does. // Just drop block on 409 and 400 status codes like Prometheus does.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/873 // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/873
// and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1149 // and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1149

View file

@ -390,6 +390,8 @@ var labelsHashBufPool bytesutil.ByteBufferPool
func logSkippedSeries(labels []prompbmarshal.Label, flagName string, flagValue int) { func logSkippedSeries(labels []prompbmarshal.Label, flagName string, flagValue int) {
select { select {
case <-logSkippedSeriesTicker.C: case <-logSkippedSeriesTicker.C:
// Do not use logger.WithThrottler() here, since this will increase CPU usage
// because every call to logSkippedSeries will result to a call to labelsToString.
logger.Warnf("skip series %s because %s=%d reached", labelsToString(labels), flagName, flagValue) logger.Warnf("skip series %s because %s=%d reached", labelsToString(labels), flagName, flagValue)
default: default:
} }

View file

@ -229,7 +229,7 @@ There are the following approaches exist for alerting and recording rules across
rules to `AccountID=123`. rules to `AccountID=123`.
* To specify `tenant` parameter per each alerting and recording group if * To specify `tenant` parameter per each alerting and recording group if
[enterprise version of vmalert](https://victoriametrics.com/enterprise.html) is used [enterprise version of vmalert](https://victoriametrics.com/products/enterprise/) is used
with `-clusterMode` command-line flag. For example: with `-clusterMode` command-line flag. For example:
```yaml ```yaml

View file

@ -97,6 +97,9 @@ func main() {
if err != nil { if err != nil {
logger.Fatalf("failed to init remoteWrite: %s", err) logger.Fatalf("failed to init remoteWrite: %s", err)
} }
if rw == nil {
logger.Fatalf("remoteWrite.url can't be empty in replay mode")
}
notifier.InitTemplateFunc(eu) notifier.InitTemplateFunc(eu)
groupsCfg, err := config.Parse(*rulePath, *validateTemplates, *validateExpressions) groupsCfg, err := config.Parse(*rulePath, *validateTemplates, *validateExpressions)
if err != nil { if err != nil {

View file

@ -159,7 +159,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmauth` binary and puts it into the `bin` folder. It builds `vmauth` binary and puts it into the `bin` folder.

View file

@ -267,7 +267,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmbackup` binary and puts it into the `bin` folder. It builds `vmbackup` binary and puts it into the `bin` folder.

View file

@ -1,6 +1,6 @@
## vmbackupmanager ## vmbackupmanager
***vmbackupmanager is a part of [enterprise package](https://victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)*** ***vmbackupmanager is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
The VictoriaMetrics backup manager automates regular backup procedures. It supports the following backup intervals: **hourly**, **daily**, **weekly** and **monthly**. Multiple backup intervals may be configured simultaneously. I.e. the backup manager creates hourly backups every hour, while it creates daily backups every day, etc. Backup manager must have read access to the storage data, so best practice is to install it on the same machine (or as a sidecar) where the storage node is installed. The VictoriaMetrics backup manager automates regular backup procedures. It supports the following backup intervals: **hourly**, **daily**, **weekly** and **monthly**. Multiple backup intervals may be configured simultaneously. I.e. the backup manager creates hourly backups every hour, while it creates daily backups every day, etc. Backup manager must have read access to the storage data, so best practice is to install it on the same machine (or as a sidecar) where the storage node is installed.
The backup service makes a backup every hour and puts it to the latest folder and then copies data to the folders which represent the backup intervals (hourly, daily, weekly and monthly) The backup service makes a backup every hour and puts it to the latest folder and then copies data to the folders which represent the backup intervals (hourly, daily, weekly and monthly)

View file

@ -560,6 +560,15 @@ results such as `average`, `rate`, etc.
If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`. If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`.
If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries. If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries.
### Rate limiting
Limiting the rate of data transfer could help to reduce pressure on disk or on destination database.
The rate limit may be set in bytes-per-second via `--vm-rate-limit` flag.
Please note, you can also use [vmagent](https://docs.victoriametrics.com/vmagent.html)
as a proxy between `vmctl` and destination with `-remoteWrite.rateLimit` flag enabled.
## How to build ## How to build
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmctl` is located in `vmutils-*` archives there. It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmctl` is located in `vmutils-*` archives there.
@ -567,7 +576,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmctl` binary and puts it into the `bin` folder. It builds `vmctl` binary and puts it into the `bin` folder.
@ -596,7 +605,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
#### Development ARM build #### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder. It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder.

View file

@ -8,6 +8,7 @@ import (
const ( const (
globalSilent = "s" globalSilent = "s"
globalVerbose = "verbose"
) )
var ( var (
@ -17,6 +18,11 @@ var (
Value: false, Value: false,
Usage: "Whether to run in silent mode. If set to true no confirmation prompts will appear.", Usage: "Whether to run in silent mode. If set to true no confirmation prompts will appear.",
}, },
&cli.BoolFlag{
Name: globalVerbose,
Value: false,
Usage: "Whether to enable verbosity in logs output.",
},
} }
) )
@ -30,7 +36,10 @@ const (
vmBatchSize = "vm-batch-size" vmBatchSize = "vm-batch-size"
vmSignificantFigures = "vm-significant-figures" vmSignificantFigures = "vm-significant-figures"
vmRoundDigits = "vm-round-digits" vmRoundDigits = "vm-round-digits"
// also used in vm-native
vmExtraLabel = "vm-extra-label" vmExtraLabel = "vm-extra-label"
vmRateLimit = "vm-rate-limit"
) )
var ( var (
@ -95,6 +104,11 @@ var (
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" + Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
"will have priority. Flag can be set multiple times, to add few additional labels.", "will have priority. Flag can be set multiple times, to add few additional labels.",
}, },
&cli.Int64Flag{
Name: vmRateLimit,
Usage: "Optional data transfer rate limit in bytes per second.\n" +
"By default the rate limit is disabled. It can be useful for limiting load on configured via '--vmAddr' destination.",
},
} }
) )
@ -354,6 +368,11 @@ var (
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" + Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
"will have priority. Flag can be set multiple times, to add few additional labels.", "will have priority. Flag can be set multiple times, to add few additional labels.",
}, },
&cli.Int64Flag{
Name: vmRateLimit,
Usage: "Optional data transfer rate limit in bytes per second.\n" +
"By default the rate limit is disabled. It can be useful for limiting load on source or destination databases.",
},
} }
) )

View file

@ -30,7 +30,7 @@ func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator st
} }
} }
func (ip *influxProcessor) run(silent bool) error { func (ip *influxProcessor) run(silent, verbose bool) error {
series, err := ip.ic.Explore() series, err := ip.ic.Explore()
if err != nil { if err != nil {
return fmt.Errorf("explore query failed: %s", err) return fmt.Errorf("explore query failed: %s", err)
@ -70,7 +70,7 @@ func (ip *influxProcessor) run(silent bool) error {
case infErr := <-errCh: case infErr := <-errCh:
return fmt.Errorf("influx error: %s", infErr) return fmt.Errorf("influx error: %s", infErr)
case vmErr := <-ip.im.Errors(): case vmErr := <-ip.im.Errors():
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
case seriesCh <- s: case seriesCh <- s:
} }
} }
@ -80,7 +80,9 @@ func (ip *influxProcessor) run(silent bool) error {
ip.im.Close() ip.im.Close()
// drain import errors channel // drain import errors channel
for vmErr := range ip.im.Errors() { for vmErr := range ip.im.Errors() {
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) if vmErr.Err != nil {
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
}
} }
bar.Finish() bar.Finish()
log.Println("Import finished!") log.Println("Import finished!")

View file

@ -0,0 +1,53 @@
package limiter
import (
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
)
// NewLimiter creates a Limiter object
// for the given perSecondLimit
func NewLimiter(perSecondLimit int64) *Limiter {
return &Limiter{perSecondLimit: perSecondLimit}
}
// Limiter controls the amount of budget
// that can be spent according to configured perSecondLimit
type Limiter struct {
perSecondLimit int64
// mu protects budget and deadline from concurrent access.
mu sync.Mutex
// The current budget. It is increased by perSecondLimit every second.
budget int64
// The next deadline for increasing the budget by perSecondLimit
deadline time.Time
}
// Register blocks for amount of time
// needed to process the given dataLen according
// to the configured perSecondLimit.
func (l *Limiter) Register(dataLen int) {
limit := l.perSecondLimit
if limit <= 0 {
return
}
l.mu.Lock()
defer l.mu.Unlock()
for l.budget <= 0 {
if d := time.Until(l.deadline); d > 0 {
t := timerpool.Get(d)
<-t.C
timerpool.Put(t)
}
l.budget += limit
l.deadline = time.Now().Add(time.Second)
}
l.budget -= int64(dataLen)
}

View file

@ -0,0 +1,37 @@
package limiter
import (
"io"
)
// NewWriteLimiter creates a new WriteLimiter object
// for the give writer and Limiter.
func NewWriteLimiter(w io.Writer, limiter *Limiter) *WriteLimiter {
return &WriteLimiter{
writer: w,
limiter: limiter,
}
}
// WriteLimiter limits the amount of bytes written
// per second via Write() method.
// Must be created via NewWriteLimiter.
type WriteLimiter struct {
writer io.Writer
limiter *Limiter
}
// Close implements io.Closer
// also calls Close for wrapped io.WriteCloser
func (wl *WriteLimiter) Close() error {
if c, ok := wl.writer.(io.Closer); ok {
return c.Close()
}
return nil
}
// Write implements io.Writer
func (wl *WriteLimiter) Write(p []byte) (n int, err error) {
wl.limiter.Register(len(p))
return wl.writer.Write(p)
}

View file

@ -18,6 +18,11 @@ import (
) )
func main() { func main() {
var (
err error
importer *vm.Importer
)
start := time.Now() start := time.Now()
app := &cli.App{ app := &cli.App{
Name: "vmctl", Name: "vmctl",
@ -53,7 +58,7 @@ func main() {
} }
otsdbProcessor := newOtsdbProcessor(otsdbClient, importer, c.Int(otsdbConcurrency)) otsdbProcessor := newOtsdbProcessor(otsdbClient, importer, c.Int(otsdbConcurrency))
return otsdbProcessor.run(c.Bool(globalSilent)) return otsdbProcessor.run(c.Bool(globalSilent), c.Bool(globalVerbose))
}, },
}, },
{ {
@ -82,14 +87,14 @@ func main() {
} }
vmCfg := initConfigVM(c) vmCfg := initConfigVM(c)
importer, err := vm.NewImporter(vmCfg) importer, err = vm.NewImporter(vmCfg)
if err != nil { if err != nil {
return fmt.Errorf("failed to create VM importer: %s", err) return fmt.Errorf("failed to create VM importer: %s", err)
} }
processor := newInfluxProcessor(influxClient, importer, processor := newInfluxProcessor(influxClient, importer,
c.Int(influxConcurrency), c.String(influxMeasurementFieldSeparator)) c.Int(influxConcurrency), c.String(influxMeasurementFieldSeparator))
return processor.run(c.Bool(globalSilent)) return processor.run(c.Bool(globalSilent), c.Bool(globalVerbose))
}, },
}, },
{ {
@ -100,7 +105,7 @@ func main() {
fmt.Println("Prometheus import mode") fmt.Println("Prometheus import mode")
vmCfg := initConfigVM(c) vmCfg := initConfigVM(c)
importer, err := vm.NewImporter(vmCfg) importer, err = vm.NewImporter(vmCfg)
if err != nil { if err != nil {
return fmt.Errorf("failed to create VM importer: %s", err) return fmt.Errorf("failed to create VM importer: %s", err)
} }
@ -123,7 +128,7 @@ func main() {
im: importer, im: importer,
cc: c.Int(promConcurrency), cc: c.Int(promConcurrency),
} }
return pp.run(c.Bool(globalSilent)) return pp.run(c.Bool(globalSilent), c.Bool(globalVerbose))
}, },
}, },
{ {
@ -138,6 +143,7 @@ func main() {
} }
p := vmNativeProcessor{ p := vmNativeProcessor{
rateLimit: c.Int64(vmRateLimit),
filter: filter{ filter: filter{
match: c.String(vmNativeFilterMatch), match: c.String(vmNativeFilterMatch),
timeStart: c.String(vmNativeFilterTimeStart), timeStart: c.String(vmNativeFilterTimeStart),
@ -166,12 +172,14 @@ func main() {
go func() { go func() {
<-c <-c
fmt.Println("\r- Execution cancelled") fmt.Println("\r- Execution cancelled")
os.Exit(0) if importer != nil {
importer.Close()
}
}() }()
err := app.Run(os.Args) err = app.Run(os.Args)
if err != nil { if err != nil {
log.Fatal(err) log.Println(err)
} }
log.Printf("Total time: %v", time.Since(start)) log.Printf("Total time: %v", time.Since(start))
} }
@ -188,5 +196,6 @@ func initConfigVM(c *cli.Context) vm.Config {
SignificantFigures: c.Int(vmSignificantFigures), SignificantFigures: c.Int(vmSignificantFigures),
RoundDigits: c.Int(vmRoundDigits), RoundDigits: c.Int(vmRoundDigits),
ExtraLabels: c.StringSlice(vmExtraLabel), ExtraLabels: c.StringSlice(vmExtraLabel),
RateLimit: c.Int64(vmRateLimit),
} }
} }

View file

@ -35,7 +35,7 @@ func newOtsdbProcessor(oc *opentsdb.Client, im *vm.Importer, otsdbcc int) *otsdb
} }
} }
func (op *otsdbProcessor) run(silent bool) error { func (op *otsdbProcessor) run(silent, verbose bool) error {
log.Println("Loading all metrics from OpenTSDB for filters: ", op.oc.Filters) log.Println("Loading all metrics from OpenTSDB for filters: ", op.oc.Filters)
var metrics []string var metrics []string
for _, filter := range op.oc.Filters { for _, filter := range op.oc.Filters {
@ -111,7 +111,7 @@ func (op *otsdbProcessor) run(silent bool) error {
case otsdbErr := <-errCh: case otsdbErr := <-errCh:
return fmt.Errorf("opentsdb error: %s", otsdbErr) return fmt.Errorf("opentsdb error: %s", otsdbErr)
case vmErr := <-op.im.Errors(): case vmErr := <-op.im.Errors():
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
case seriesCh <- queryObj{ case seriesCh <- queryObj{
Tr: tr, StartTime: startTime, Tr: tr, StartTime: startTime,
Series: series, Rt: opentsdb.RetentionMeta{ Series: series, Rt: opentsdb.RetentionMeta{
@ -133,7 +133,9 @@ func (op *otsdbProcessor) run(silent bool) error {
} }
op.im.Close() op.im.Close()
for vmErr := range op.im.Errors() { for vmErr := range op.im.Errors() {
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) if vmErr.Err != nil {
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
}
} }
log.Println("Import finished!") log.Println("Import finished!")
log.Print(op.im.Stats()) log.Print(op.im.Stats())
@ -143,7 +145,7 @@ func (op *otsdbProcessor) run(silent bool) error {
func (op *otsdbProcessor) do(s queryObj) error { func (op *otsdbProcessor) do(s queryObj) error {
start := s.StartTime - s.Tr.Start start := s.StartTime - s.Tr.Start
end := s.StartTime - s.Tr.End end := s.StartTime - s.Tr.End
data, err := op.oc.GetData(s.Series, s.Rt, start, end) data, err := op.oc.GetData(s.Series, s.Rt, start, end, op.oc.MsecsTime)
if err != nil { if err != nil {
return fmt.Errorf("failed to collect data for %v in %v:%v :: %v", s.Series, s.Rt, s.Tr, err) return fmt.Errorf("failed to collect data for %v in %v:%v :: %v", s.Series, s.Rt, s.Tr, err)
} }

View file

@ -46,6 +46,7 @@ type Client struct {
Filters []string Filters []string
Normalize bool Normalize bool
HardTS int64 HardTS int64
MsecsTime bool
} }
// Config contains fields required // Config contains fields required
@ -82,9 +83,9 @@ type MetaResults struct {
// Meta A meta object about a metric // Meta A meta object about a metric
// only contain the tags/etc. and no data // only contain the tags/etc. and no data
type Meta struct { type Meta struct {
//tsuid string
Metric string `json:"metric"` Metric string `json:"metric"`
Tags map[string]string `json:"tags"` Tags map[string]string `json:"tags"`
//tsuid string
} }
// OtsdbMetric is a single series in OpenTSDB's returned format // OtsdbMetric is a single series in OpenTSDB's returned format
@ -152,7 +153,7 @@ func (c Client) FindSeries(metric string) ([]Meta, error) {
// GetData actually retrieves data for a series at a specified time range // GetData actually retrieves data for a series at a specified time range
// e.g. /api/query?start=1&end=200&m=sum:1m-avg-none:system.load5{host=host1} // e.g. /api/query?start=1&end=200&m=sum:1m-avg-none:system.load5{host=host1}
func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64) (Metric, error) { func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64, mSecs bool) (Metric, error) {
/* /*
First, build our tag string. First, build our tag string.
It's literally just key=value,key=value,... It's literally just key=value,key=value,...
@ -195,7 +196,7 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64) (
3. bad format of response body 3. bad format of response body
*/ */
if resp.StatusCode != 200 { if resp.StatusCode != 200 {
log.Println(fmt.Sprintf("bad response code from OpenTSDB query %v...skipping", resp.StatusCode)) log.Println(fmt.Sprintf("bad response code from OpenTSDB query %v for %q...skipping", resp.StatusCode, q))
return Metric{}, nil return Metric{}, nil
} }
defer func() { _ = resp.Body.Close() }() defer func() { _ = resp.Body.Close() }()
@ -272,7 +273,11 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64) (
then convert the timestamp back to something reasonable. then convert the timestamp back to something reasonable.
*/ */
for ts, val := range output[0].Dps { for ts, val := range output[0].Dps {
if !mSecs {
data.Timestamps = append(data.Timestamps, ts*1000)
} else {
data.Timestamps = append(data.Timestamps, ts) data.Timestamps = append(data.Timestamps, ts)
}
data.Values = append(data.Values, val) data.Values = append(data.Values, val)
} }
return data, nil return data, nil
@ -283,6 +288,7 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64) (
func NewClient(cfg Config) (*Client, error) { func NewClient(cfg Config) (*Client, error) {
var retentions []Retention var retentions []Retention
offsetPrint := int64(time.Now().Unix()) offsetPrint := int64(time.Now().Unix())
// convert a number of days to seconds
offsetSecs := cfg.Offset * 24 * 60 * 60 offsetSecs := cfg.Offset * 24 * 60 * 60
if cfg.MsecsTime { if cfg.MsecsTime {
// 1000000 == Nanoseconds -> Milliseconds difference // 1000000 == Nanoseconds -> Milliseconds difference
@ -318,6 +324,7 @@ func NewClient(cfg Config) (*Client, error) {
Filters: cfg.Filters, Filters: cfg.Filters,
Normalize: cfg.Normalize, Normalize: cfg.Normalize,
HardTS: cfg.HardTS, HardTS: cfg.HardTS,
MsecsTime: cfg.MsecsTime,
} }
return client, nil return client, nil
} }

View file

@ -87,6 +87,34 @@ func convertRetention(retention string, offset int64, msecTime bool) (Retention,
if len(chunks) != 3 { if len(chunks) != 3 {
return Retention{}, fmt.Errorf("invalid retention string: %q", retention) return Retention{}, fmt.Errorf("invalid retention string: %q", retention)
} }
queryLengthDuration, err := convertDuration(chunks[2])
if err != nil {
return Retention{}, fmt.Errorf("invalid ttl (second order) duration string: %q: %s", chunks[2], err)
}
// set ttl in milliseconds, unless we aren't using millisecond time in OpenTSDB...then use seconds
queryLength := queryLengthDuration.Milliseconds()
if !msecTime {
queryLength = queryLength / 1000
}
queryRange := queryLength
// bump by the offset so we don't look at empty ranges any time offset > ttl
queryLength += offset
// first/second order aggregations for queries defined in chunk 0...
aggregates := strings.Split(chunks[0], "-")
if len(aggregates) != 3 {
return Retention{}, fmt.Errorf("invalid aggregation string: %q", chunks[0])
}
aggTimeDuration, err := convertDuration(aggregates[1])
if err != nil {
return Retention{}, fmt.Errorf("invalid aggregation time duration string: %q: %s", aggregates[1], err)
}
aggTime := aggTimeDuration.Milliseconds()
if !msecTime {
aggTime = aggTime / 1000
}
rowLengthDuration, err := convertDuration(chunks[1]) rowLengthDuration, err := convertDuration(chunks[1])
if err != nil { if err != nil {
return Retention{}, fmt.Errorf("invalid row length (first order) duration string: %q: %s", chunks[1], err) return Retention{}, fmt.Errorf("invalid row length (first order) duration string: %q: %s", chunks[1], err)
@ -96,27 +124,35 @@ func convertRetention(retention string, offset int64, msecTime bool) (Retention,
if !msecTime { if !msecTime {
rowLength = rowLength / 1000 rowLength = rowLength / 1000
} }
ttlDuration, err := convertDuration(chunks[2])
if err != nil { var querySize int64
return Retention{}, fmt.Errorf("invalid ttl (second order) duration string: %q: %s", chunks[2], err) /*
The idea here is to ensure each individual query sent to OpenTSDB is *at least*
large enough to ensure no single query requests essentially 0 data.
*/
if rowLength > aggTime {
/*
We'll look at 2x the row size for each query we perform
This is a strange function, but the logic works like this:
1. we discover the "number" of ranges we should split the time range into
This is found with queryRange / (rowLength * 4)...kind of a percentage query
2. we discover the actual size of each "chunk"
This is second division step
*/
querySize = int64(queryRange / (queryRange / (rowLength * 4)))
} else {
/*
Unless the aggTime (how long a range of data we're requesting per individual point)
is greater than the row size. Then we'll need to use that to determine
how big each individual query should be
*/
querySize = int64(queryRange / (queryRange / (aggTime * 4)))
} }
// set ttl in milliseconds, unless we aren't using millisecond time in OpenTSDB...then use seconds
ttl := ttlDuration.Milliseconds()
if !msecTime {
ttl = ttl / 1000
}
// bump by the offset so we don't look at empty ranges any time offset > ttl
ttl += offset
var timeChunks []TimeRange var timeChunks []TimeRange
var i int64 var i int64
for i = offset; i <= ttl; i = i + rowLength { for i = offset; i <= queryLength; i = i + querySize {
timeChunks = append(timeChunks, TimeRange{Start: i + rowLength, End: i}) timeChunks = append(timeChunks, TimeRange{Start: i + querySize, End: i})
}
// first/second order aggregations for queries defined in chunk 0...
aggregates := strings.Split(chunks[0], "-")
if len(aggregates) != 3 {
return Retention{}, fmt.Errorf("invalid aggregation string: %q", chunks[0])
} }
ret := Retention{FirstOrder: aggregates[0], ret := Retention{FirstOrder: aggregates[0],

View file

@ -8,7 +8,7 @@ func TestConvertRetention(t *testing.T) {
/* /*
2592000 seconds in 30 days 2592000 seconds in 30 days
3600 in one hour 3600 in one hour
2592000 / 3600 = 720 individual query "ranges" should exist, plus one because time ranges can be weird 2592000 / 14400 = 180 individual query "ranges" should exist, plus one because time ranges can be weird
First order should == "sum" First order should == "sum"
Second order should == "avg" Second order should == "avg"
AggTime should == "1m" AggTime should == "1m"
@ -17,8 +17,8 @@ func TestConvertRetention(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Error parsing valid retention string: %v", err) t.Fatalf("Error parsing valid retention string: %v", err)
} }
if len(res.QueryRanges) != 721 { if len(res.QueryRanges) != 181 {
t.Fatalf("Found %v query ranges. Should have found 720", len(res.QueryRanges)) t.Fatalf("Found %v query ranges. Should have found 181", len(res.QueryRanges))
} }
if res.FirstOrder != "sum" { if res.FirstOrder != "sum" {
t.Fatalf("Incorrect first order aggregation %q. Should have been 'sum'", res.FirstOrder) t.Fatalf("Incorrect first order aggregation %q. Should have been 'sum'", res.FirstOrder)

View file

@ -25,7 +25,7 @@ type prometheusProcessor struct {
cc int cc int
} }
func (pp *prometheusProcessor) run(silent bool) error { func (pp *prometheusProcessor) run(silent, verbose bool) error {
blocks, err := pp.cl.Explore() blocks, err := pp.cl.Explore()
if err != nil { if err != nil {
return fmt.Errorf("explore failed: %s", err) return fmt.Errorf("explore failed: %s", err)
@ -66,7 +66,7 @@ func (pp *prometheusProcessor) run(silent bool) error {
return fmt.Errorf("prometheus error: %s", promErr) return fmt.Errorf("prometheus error: %s", promErr)
case vmErr := <-pp.im.Errors(): case vmErr := <-pp.im.Errors():
close(blockReadersCh) close(blockReadersCh)
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
case blockReadersCh <- br: case blockReadersCh <- br:
} }
} }
@ -77,7 +77,9 @@ func (pp *prometheusProcessor) run(silent bool) error {
pp.im.Close() pp.im.Close()
// drain import errors channel // drain import errors channel
for vmErr := range pp.im.Errors() { for vmErr := range pp.im.Errors() {
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) if vmErr.Err != nil {
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
}
} }
bar.Finish() bar.Finish()
log.Println("Import finished!") log.Println("Import finished!")

View file

@ -23,11 +23,29 @@ func prompt(question string) bool {
return false return false
} }
func wrapErr(vmErr *vm.ImportError) error { func wrapErr(vmErr *vm.ImportError, verbose bool) error {
var errTS string var errTS string
var maxTS, minTS int64
for _, ts := range vmErr.Batch { for _, ts := range vmErr.Batch {
if minTS < ts.Timestamps[0] || minTS == 0 {
minTS = ts.Timestamps[0]
}
if maxTS < ts.Timestamps[len(ts.Timestamps)-1] {
maxTS = ts.Timestamps[len(ts.Timestamps)-1]
}
if verbose {
errTS += fmt.Sprintf("%s for timestamps range %d - %d\n", errTS += fmt.Sprintf("%s for timestamps range %d - %d\n",
ts.String(), ts.Timestamps[0], ts.Timestamps[len(ts.Timestamps)-1]) ts.String(), ts.Timestamps[0], ts.Timestamps[len(ts.Timestamps)-1])
} }
return fmt.Errorf("%s with error: %s", errTS, vmErr.Err) }
var verboseMsg string
if !verbose {
verboseMsg = "(enable `--verbose` output to get more details)"
}
if vmErr.Err == nil {
return fmt.Errorf("%s\n\tLatest delivered batch for timestamps range %d - %d %s\n%s",
vmErr.Err, minTS, maxTS, verboseMsg, errTS)
}
return fmt.Errorf("%s\n\tImporting batch failed for timestamps range %d - %d %s\n%s",
vmErr.Err, minTS, maxTS, verboseMsg, errTS)
} }

View file

@ -13,6 +13,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
) )
@ -47,6 +48,9 @@ type Config struct {
RoundDigits int RoundDigits int
// ExtraLabels that will be added to all imported series. Must be in label=value format. // ExtraLabels that will be added to all imported series. Must be in label=value format.
ExtraLabels []string ExtraLabels []string
// RateLimit defines a data transfer speed in bytes per second.
// Is applied to each worker (see Concurrency) independently.
RateLimit int64
} }
// Importer performs insertion of timeseries // Importer performs insertion of timeseries
@ -63,6 +67,8 @@ type Importer struct {
input chan *TimeSeries input chan *TimeSeries
errors chan *ImportError errors chan *ImportError
rl *limiter.Limiter
wg sync.WaitGroup wg sync.WaitGroup
once sync.Once once sync.Once
@ -123,6 +129,7 @@ func NewImporter(cfg Config) (*Importer, error) {
compress: cfg.Compress, compress: cfg.Compress,
user: cfg.User, user: cfg.User,
password: cfg.Password, password: cfg.Password,
rl: limiter.NewLimiter(cfg.RateLimit),
close: make(chan struct{}), close: make(chan struct{}),
input: make(chan *TimeSeries, cfg.Concurrency*4), input: make(chan *TimeSeries, cfg.Concurrency*4),
errors: make(chan *ImportError, cfg.Concurrency), errors: make(chan *ImportError, cfg.Concurrency),
@ -149,9 +156,11 @@ func NewImporter(cfg Config) (*Importer, error) {
// ImportError is type of error generated // ImportError is type of error generated
// in case of unsuccessful import request // in case of unsuccessful import request
type ImportError struct { type ImportError struct {
// The batch of timeseries that failed // The batch of timeseries processed by importer at the moment
Batch []*TimeSeries Batch []*TimeSeries
// The error that appeared during insert // The error that appeared during insert
// If err is nil - no error happened and Batch
// Is the latest delivered Batch.
Err error Err error
} }
@ -180,12 +189,13 @@ func (im *Importer) startWorker(batchSize, significantFigures, roundDigits int)
for { for {
select { select {
case <-im.close: case <-im.close:
if err := im.Import(batch); err != nil { exitErr := &ImportError{
im.errors <- &ImportError{
Batch: batch, Batch: batch,
Err: err,
} }
if err := im.Import(batch); err != nil {
exitErr.Err = err
} }
im.errors <- exitErr
return return
case ts := <-im.input: case ts := <-im.input:
// init waitForBatch when first // init waitForBatch when first
@ -301,12 +311,13 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
w := io.Writer(pw) w := io.Writer(pw)
if im.compress { if im.compress {
zw, err := gzip.NewWriterLevel(pw, 1) zw, err := gzip.NewWriterLevel(w, 1)
if err != nil { if err != nil {
return fmt.Errorf("unexpected error when creating gzip writer: %s", err) return fmt.Errorf("unexpected error when creating gzip writer: %s", err)
} }
w = zw w = zw
} }
w = limiter.NewWriteLimiter(w, im.rl)
bw := bufio.NewWriterSize(w, 16*1024) bw := bufio.NewWriterSize(w, 16*1024)
var totalSamples, totalBytes int var totalSamples, totalBytes int
@ -321,8 +332,8 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
if err := bw.Flush(); err != nil { if err := bw.Flush(); err != nil {
return err return err
} }
if im.compress { if closer, ok := w.(io.Closer); ok {
err := w.(*gzip.Writer).Close() err := closer.Close()
if err != nil { if err != nil {
return err return err
} }

View file

@ -7,12 +7,15 @@ import (
"log" "log"
"net/http" "net/http"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/cheggaaa/pb/v3" "github.com/cheggaaa/pb/v3"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
) )
type vmNativeProcessor struct { type vmNativeProcessor struct {
filter filter filter filter
rateLimit int64
dst *vmNativeClient dst *vmNativeClient
src *vmNativeClient src *vmNativeClient
@ -84,7 +87,12 @@ func (p *vmNativeProcessor) run() error {
bar := pb.ProgressBarTemplate(barTpl).Start64(0) bar := pb.ProgressBarTemplate(barTpl).Start64(0)
barReader := bar.NewProxyReader(exportReader) barReader := bar.NewProxyReader(exportReader)
_, err = io.Copy(pw, barReader) w := io.Writer(pw)
if p.rateLimit > 0 {
rl := limiter.NewLimiter(p.rateLimit)
w = limiter.NewWriteLimiter(pw, rl)
}
_, err = io.Copy(w, barReader)
if err != nil { if err != nil {
return fmt.Errorf("failed to write into %q: %s", p.dst.addr, err) return fmt.Errorf("failed to write into %q: %s", p.dst.addr, err)
} }

View file

@ -1,6 +1,6 @@
# vmgateway # vmgateway
***vmgateway is a part of [enterprise package](https://victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)*** ***vmgateway is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
<img alt="vmgateway" src="vmgateway-overview.jpeg"> <img alt="vmgateway" src="vmgateway-overview.jpeg">
@ -14,7 +14,7 @@
* Provides access by tenantID in the Cluster version * Provides access by tenantID in the Cluster version
* Allows for separate write/read/admin access to data * Allows for separate write/read/admin access to data
`vmgateway` is included in our [enterprise packages](https://victoriametrics.com/enterprise.html). `vmgateway` is included in our [enterprise packages](https://victoriametrics.com/products/enterprise/).
## Access Control ## Access Control

View file

@ -163,7 +163,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmrestore` binary and puts it into the `bin` folder. It builds `vmrestore` binary and puts it into the `bin` folder.

View file

@ -423,7 +423,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#querying-exemplars // Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#querying-exemplars
queryExemplarsRequests.Inc() queryExemplarsRequests.Inc()
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, "%s", `{"status":"success","data":null}`) fmt.Fprintf(w, "%s", `{"status":"success","data":[]}`)
return true return true
case "/api/v1/admin/tsdb/delete_series": case "/api/v1/admin/tsdb/delete_series":
deleteRequests.Inc() deleteRequests.Inc()

View file

@ -1,12 +1,12 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.a33903a8.css", "main.css": "./static/css/main.79ff1ad2.css",
"main.js": "./static/js/main.23f635e5.js", "main.js": "./static/js/main.22df0342.js",
"static/js/27.85f0e2b0.chunk.js": "./static/js/27.85f0e2b0.chunk.js", "static/js/27.85f0e2b0.chunk.js": "./static/js/27.85f0e2b0.chunk.js",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.a33903a8.css", "static/css/main.79ff1ad2.css",
"static/js/main.23f635e5.js" "static/js/main.22df0342.js"
] ]
} }

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.23f635e5.js"></script><link href="./static/css/main.a33903a8.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.22df0342.js"></script><link href="./static/css/main.79ff1ad2.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

View file

@ -0,0 +1 @@
body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif}code{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}.MuiAccordionSummary-content{margin:0!important}.uplot,.uplot *,.uplot :after,.uplot :before{box-sizing:border-box}.uplot{font-family:system-ui,-apple-system,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;line-height:1.5;width:-webkit-min-content;width:min-content}.u-title{font-size:18px;font-weight:700;text-align:center}.u-wrap{position:relative;-webkit-user-select:none;-ms-user-select:none;user-select:none}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{display:block;height:100%;position:relative;width:100%}.u-axis{position:absolute}.u-legend{font-size:14px;margin:auto;text-align:center}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{display:inline-block;vertical-align:middle}.u-legend .u-marker{background-clip:padding-box!important;height:1em;margin-right:4px;width:1em}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{background:rgba(0,0,0,.07)}.u-cursor-x,.u-cursor-y,.u-select{pointer-events:none;position:absolute}.u-cursor-x,.u-cursor-y{left:0;top:0;will-change:transform;z-index:100}.u-hz .u-cursor-x,.u-vt .u-cursor-y{border-right:1px dashed #607d8b;height:100%}.u-hz .u-cursor-y,.u-vt .u-cursor-x{border-bottom:1px dashed #607d8b;width:100%}.u-cursor-pt{background-clip:padding-box!important;border:0 solid;border-radius:50%;left:0;pointer-events:none;position:absolute;top:0;will-change:transform;z-index:100}.u-axis.u-off,.u-cursor-pt.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-select.u-off,.u-tooltip{display:none}.u-tooltip{grid-gap:12px;word-wrap:break-word;background:rgba(57,57,57,.9);border-radius:4px;color:#fff;font-family:monospace;font-size:10px;font-weight:500;line-height:1.4em;max-width:300px;padding:8px;pointer-events:none;position:absolute;z-index:100}.u-tooltip-data{align-items:center;display:flex;flex-wrap:wrap;font-size:11px;line-height:150%}.u-tooltip-data__value{font-weight:700;padding:4px}.u-tooltip__info{grid-gap:4px;display:grid}.u-tooltip__marker{height:12px;margin-right:4px;width:12px}.legendWrapper{grid-gap:20px;cursor:default;display:grid;grid-template-columns:repeat(auto-fit,minmax(400px,1fr));margin-top:20px;position:relative}.legendGroup{margin-bottom:24px}.legendGroupTitle{align-items:center;display:grid;font-size:11px;grid-template-columns:43px auto;padding:10px}.legendGroupQuery{grid-column:1/3;opacity:.6}.legendGroupLine{margin-right:10px}.legendItem{grid-gap:6px;align-items:start;background-color:#fff;cursor:pointer;display:inline-grid;grid-template-columns:auto auto;justify-content:start;padding:5px 10px;transition:.2s ease}.legendItemHide{opacity:.5;text-decoration:line-through}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{border-style:solid;border-width:2px;box-sizing:border-box;height:12px;margin:3px 0;transition:.2s ease;width:12px}.legendLabel{font-size:11px;font-weight:400}.legendWrapperHotkey{align-items:center;display:flex;font-size:11px}.legendWrapperHotkey p{margin-right:20px}.legendWrapperHotkey code{word-wrap:break-word;background-color:#f2f2f2;border:1px solid #dedede;border-radius:2px;color:#0a0a0a;display:inline;font-size:10px;font-weight:400;max-width:100%;padding:4px 6px}

View file

@ -1 +0,0 @@
body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif}code{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}.MuiAccordionSummary-content{margin:0!important}.cm-activeLine{background-color:inherit!important}.cm-editor{border:none;border-radius:4px;font-size:10px}.cm-gutters{border:none!important;border-radius:4px 0 0 4px;height:100%;overflow:hidden}.cm-activeLineGutter,.cm-gutters{background-color:#fff!important}.query-editor .cm-scroller{align-items:center!important}.query-editor .cm-editor.cm-focused{outline:none}.query-editor-container{border:1px solid #b9b9b9;border-radius:4px;padding:12px;position:relative}.query-editor-container_focus{border:1px solid #3f51b5}.query-editor-container_error{border-color:#ff4141}.query-editor-container-one-line .query-editor .cm-editor{height:22px}.query-editor-container-one-line{padding:6px}.query-editor-label{background-color:#fff;color:rgba(0,0,0,.6);font-size:12px;font-weight:400;left:4px;letter-spacing:normal;line-height:1;max-width:calc(133% - 24px);overflow:hidden;padding:0 5px;position:absolute;text-overflow:ellipsis;top:-.71875em;-webkit-transform:scale(.75);transform:scale(.75);white-space:nowrap;z-index:1}.query-editor-container_error .query-editor-label{color:#ff4141}.uplot,.uplot *,.uplot :after,.uplot :before{box-sizing:border-box}.uplot{font-family:system-ui,-apple-system,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;line-height:1.5;width:-webkit-min-content;width:min-content}.u-title{font-size:18px;font-weight:700;text-align:center}.u-wrap{position:relative;-webkit-user-select:none;-ms-user-select:none;user-select:none}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{display:block;height:100%;position:relative;width:100%}.u-axis{position:absolute}.u-legend{font-size:14px;margin:auto;text-align:center}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{display:inline-block;vertical-align:middle}.u-legend .u-marker{background-clip:padding-box!important;height:1em;margin-right:4px;width:1em}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{background:rgba(0,0,0,.07)}.u-cursor-x,.u-cursor-y,.u-select{pointer-events:none;position:absolute}.u-cursor-x,.u-cursor-y{left:0;top:0;will-change:transform;z-index:100}.u-hz .u-cursor-x,.u-vt .u-cursor-y{border-right:1px dashed #607d8b;height:100%}.u-hz .u-cursor-y,.u-vt .u-cursor-x{border-bottom:1px dashed #607d8b;width:100%}.u-cursor-pt{background-clip:padding-box!important;border:0 solid;border-radius:50%;left:0;pointer-events:none;position:absolute;top:0;will-change:transform;z-index:100}.u-axis.u-off,.u-cursor-pt.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-select.u-off,.u-tooltip{display:none}.u-tooltip{grid-gap:12px;word-wrap:break-word;background:rgba(57,57,57,.9);border-radius:4px;color:#fff;font-family:monospace;font-size:10px;font-weight:500;line-height:1.4em;max-width:300px;padding:8px;pointer-events:none;position:absolute;z-index:100}.u-tooltip-data{align-items:center;display:flex;flex-wrap:wrap;font-size:11px;line-height:150%}.u-tooltip-data__value{font-weight:700;padding:4px}.u-tooltip__info{grid-gap:4px;display:grid}.u-tooltip__marker{height:12px;margin-right:4px;width:12px}.legendWrapper{grid-gap:20px;cursor:default;display:grid;grid-template-columns:repeat(auto-fit,minmax(400px,1fr));margin-top:20px}.legendGroup{margin-bottom:24px}.legendGroupTitle{align-items:center;display:flex;font-size:11px;padding:10px 0 5px}.legendGroupLine{margin:0 10px}.legendItem{grid-gap:6px;align-items:start;background-color:#fff;cursor:pointer;display:inline-grid;grid-template-columns:auto auto;justify-content:start;padding:5px 10px;transition:.2s ease}.legendItemHide{opacity:.5;text-decoration:line-through}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{border-style:solid;border-width:2px;box-sizing:border-box;height:12px;margin:3px 0;transition:.2s ease;width:12px}.legendLabel{font-size:11px;font-weight:400}

File diff suppressed because one or more lines are too long

View file

@ -20,7 +20,7 @@ object-assign
* @license MIT * @license MIT
*/ */
/** @license MUI v5.2.4 /** @license MUI v5.2.6
* *
* This source code is licensed under the MIT license found in the * This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree. * LICENSE file in the root directory of this source tree.

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -4,17 +4,11 @@
"private": true, "private": true,
"homepage": "./", "homepage": "./",
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^0.19.9",
"@codemirror/basic-setup": "^0.19.1",
"@codemirror/commands": "^0.19.6",
"@codemirror/highlight": "^0.19.6",
"@codemirror/state": "^0.19.6",
"@codemirror/view": "^0.19.29",
"@date-io/dayjs": "^2.11.0", "@date-io/dayjs": "^2.11.0",
"@emotion/react": "^11.7.1", "@emotion/react": "^11.7.1",
"@emotion/styled": "^11.6.0", "@emotion/styled": "^11.6.0",
"@mui/icons-material": "^5.2.4", "@mui/icons-material": "^5.2.5",
"@mui/lab": "^5.0.0-alpha.60", "@mui/lab": "^5.0.0-alpha.62",
"@mui/material": "^5.2.4", "@mui/material": "^5.2.4",
"@mui/styles": "^5.2.3", "@mui/styles": "^5.2.3",
"@testing-library/jest-dom": "^5.16.1", "@testing-library/jest-dom": "^5.16.1",
@ -24,13 +18,12 @@
"@types/lodash.debounce": "^4.0.6", "@types/lodash.debounce": "^4.0.6",
"@types/lodash.get": "^4.4.6", "@types/lodash.get": "^4.4.6",
"@types/lodash.throttle": "^4.1.6", "@types/lodash.throttle": "^4.1.6",
"@types/node": "^17.0.1", "@types/node": "^17.0.6",
"@types/numeral": "^2.0.2", "@types/numeral": "^2.0.2",
"@types/qs": "^6.9.7", "@types/qs": "^6.9.7",
"@types/react": "^17.0.37", "@types/react": "^17.0.38",
"@types/react-dom": "^17.0.11", "@types/react-dom": "^17.0.11",
"@types/react-measure": "^2.0.8", "@types/react-measure": "^2.0.8",
"codemirror-promql": "^0.18.0",
"dayjs": "^1.10.7", "dayjs": "^1.10.7",
"lodash.debounce": "^4.0.8", "lodash.debounce": "^4.0.8",
"lodash.get": "^4.4.2", "lodash.get": "^4.4.2",
@ -73,11 +66,11 @@
] ]
}, },
"devDependencies": { "devDependencies": {
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.5", "@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.7",
"@typescript-eslint/eslint-plugin": "^5.7.0", "@typescript-eslint/eslint-plugin": "^5.8.1",
"@typescript-eslint/parser": "^5.7.0", "@typescript-eslint/parser": "^5.8.0",
"customize-cra": "^1.0.0", "customize-cra": "^1.0.0",
"eslint-plugin-react": "^7.27.1", "eslint-plugin-react": "^7.28.0",
"react-app-rewired": "^2.1.8" "react-app-rewired": "^2.1.11"
} }
} }

View file

@ -5,3 +5,5 @@ export const getQueryRangeUrl = (server: string, query: string, period: TimePara
export const getQueryUrl = (server: string, query: string, period: TimeParams): string => export const getQueryUrl = (server: string, query: string, period: TimeParams): string =>
`${server}/api/v1/query?query=${encodeURIComponent(query)}&start=${period.start}&end=${period.end}&step=${period.step}`; `${server}/api/v1/query?query=${encodeURIComponent(query)}&start=${period.start}&end=${period.end}&step=${period.step}`;
export const getQueryOptions = (server: string) => `${server}/api/v1/label/__name__/values`;

View file

@ -0,0 +1,67 @@
import React, {FC} from "react";
import {AppBar, Box, Link, Toolbar, Typography} from "@mui/material";
import {ExecutionControls} from "../Home/Configurator/Time/ExecutionControls";
import {DisplayTypeSwitch} from "../Home/Configurator/DisplayTypeSwitch";
import Logo from "../common/Logo";
import makeStyles from "@mui/styles/makeStyles";
import {setQueryStringWithoutPageReload} from "../../utils/query-string";
const useStyles = makeStyles({
logo: {
position: "relative",
display: "flex",
alignItems: "center",
color: "#fff",
cursor: "pointer",
"&:hover": {
textDecoration: "underline"
}
},
issueLink: {
position: "absolute",
bottom: "6px",
textAlign: "center",
fontSize: "10px",
opacity: ".4",
color: "inherit",
textDecoration: "underline",
transition: ".2s opacity",
"&:hover": {
opacity: ".8",
}
}
});
const Header: FC = () => {
const classes = useStyles();
const onClickLogo = () => {
setQueryStringWithoutPageReload("");
window.location.reload();
};
return <AppBar position="static">
<Toolbar>
<Box display="grid" alignItems="center" justifyContent="center">
<Box onClick={onClickLogo} className={classes.logo}>
<Logo style={{color: "inherit", marginRight: "6px"}}/>
<Typography variant="h5">
<span style={{fontWeight: "bolder"}}>VM</span>
<span style={{fontWeight: "lighter"}}>UI</span>
</Typography>
</Box>
<Link className={classes.issueLink} target="_blank"
href="https://github.com/VictoriaMetrics/VictoriaMetrics/issues/new">
create an issue
</Link>
</Box>
<Box ml={4} flexGrow={1}>
<ExecutionControls/>
</Box>
<DisplayTypeSwitch/>
</Toolbar>
</AppBar>;
};
export default Header;

View file

@ -16,11 +16,12 @@ import {ErrorTypes} from "../../../../types";
export interface QueryConfiguratorProps { export interface QueryConfiguratorProps {
error?: ErrorTypes | string; error?: ErrorTypes | string;
queryOptions: string[]
} }
const QueryConfigurator: FC<QueryConfiguratorProps> = ({error}) => { const QueryConfigurator: FC<QueryConfiguratorProps> = ({error, queryOptions}) => {
const {serverUrl, query, queryHistory, time: {duration}, queryControls: {autocomplete}} = useAppState(); const {serverUrl, query, queryHistory, queryControls: {autocomplete}} = useAppState();
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const [expanded, setExpanded] = useState(true); const [expanded, setExpanded] = useState(true);
const queryContainer = useRef<HTMLDivElement>(null); const queryContainer = useRef<HTMLDivElement>(null);
@ -91,10 +92,8 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({error}) => {
{query.map((q, i) => {query.map((q, i) =>
<Box key={i} display="grid" gridTemplateColumns="1fr auto" gap="4px" width="100%" <Box key={i} display="grid" gridTemplateColumns="1fr auto" gap="4px" width="100%"
mb={i === query.length - 1 ? 0 : 2}> mb={i === query.length - 1 ? 0 : 2}>
<QueryEditor server={serverUrl} query={query[i]} index={i} oneLiner={!expanded} <QueryEditor query={query[i]} index={i} autocomplete={autocomplete} queryOptions={queryOptions}
autocomplete={autocomplete} queryHistory={queryHistory[i]} error={error} error={error} setHistoryIndex={setHistoryIndex} runQuery={onRunQuery} setQuery={onSetQuery}/>
setHistoryIndex={setHistoryIndex} runQuery={onRunQuery}
setQuery={onSetQuery}/>
{i === 0 && <Tooltip title="Execute Query"> {i === 0 && <Tooltip title="Execute Query">
<IconButton onClick={onRunQuery}> <IconButton onClick={onRunQuery}>
<PlayCircleOutlineIcon/> <PlayCircleOutlineIcon/>
@ -123,7 +122,7 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({error}) => {
</Box>} </Box>}
</Grid> </Grid>
<Grid item xs> <Grid item xs>
<TimeSelector setDuration={onSetDuration} duration={duration}/> <TimeSelector setDuration={onSetDuration}/>
</Grid> </Grid>
<Grid item xs={12} pt={1}> <Grid item xs={12} pt={1}>
<AdditionalSettings/> <AdditionalSettings/>

View file

@ -1,11 +1,7 @@
import {EditorState} from "@codemirror/state"; import React, {FC, useEffect, useState} from "react";
import {EditorView, keymap} from "@codemirror/view";
import {defaultKeymap} from "@codemirror/commands";
import React, {FC, useEffect, useRef, useState} from "react";
import {PromQLExtension} from "codemirror-promql";
import {basicSetup} from "@codemirror/basic-setup";
import {QueryHistory} from "../../../../state/common/reducer";
import {ErrorTypes} from "../../../../types"; import {ErrorTypes} from "../../../../types";
import {Autocomplete, TextField} from "@mui/material";
import {queryToBreakLine} from "../../../../utils/query-string";
export interface QueryEditorProps { export interface QueryEditorProps {
setHistoryIndex: (step: number, index: number) => void; setHistoryIndex: (step: number, index: number) => void;
@ -13,71 +9,37 @@ export interface QueryEditorProps {
runQuery: () => void; runQuery: () => void;
query: string; query: string;
index: number; index: number;
queryHistory: QueryHistory;
server: string;
oneLiner?: boolean; oneLiner?: boolean;
autocomplete: boolean; autocomplete: boolean;
error?: ErrorTypes | string; error?: ErrorTypes | string;
queryOptions: string[];
} }
const QueryEditor: FC<QueryEditorProps> = ({ const QueryEditor: FC<QueryEditorProps> = ({
index, index,
query, query,
queryHistory,
setHistoryIndex, setHistoryIndex,
setQuery, setQuery,
runQuery, runQuery,
server,
oneLiner = false,
autocomplete, autocomplete,
error error,
queryOptions
}) => { }) => {
const ref = useRef<HTMLDivElement>(null); const [value, setValue] = useState(query);
const [downMetaKeys, setDownMetaKeys] = useState<string[]>([]);
const [editorView, setEditorView] = useState<EditorView>();
const [focusEditor, setFocusEditor] = useState(false);
// init editor view on load
useEffect(() => { useEffect(() => {
if (ref.current) { setValue(queryToBreakLine(query));
setEditorView(new EditorView( }, [query]);
{
parent: ref.current
})
);
}
return () => editorView?.destroy();
}, []);
// update state on change of autocomplete server const handleKeyDown = (e: React.KeyboardEvent<HTMLDivElement>): void => {
useEffect(() => { if (e.ctrlKey || e.metaKey) setDownMetaKeys([...downMetaKeys, e.key]);
const promQL = new PromQLExtension(); };
promQL.activateCompletion(autocomplete);
promQL.setComplete({remote: {url: server}});
const listenerExtension = EditorView.updateListener.of(editorUpdate => { const handleKeyUp = (e: React.KeyboardEvent<HTMLDivElement>): void => {
if (editorUpdate.focusChanged) {
setFocusEditor(editorView?.hasFocus || false);
}
if (editorUpdate.docChanged) {
setQuery(editorUpdate.state.doc.toJSON().map(el => el.trim()).join(""), index);
}
});
editorView?.setState(EditorState.create({
doc: query,
extensions: [
basicSetup,
keymap.of(defaultKeymap),
listenerExtension,
promQL.asExtension(),
]
}));
}, [server, editorView, autocomplete, queryHistory]);
const onKeyUp = (e: React.KeyboardEvent<HTMLDivElement>): void => {
const {key, ctrlKey, metaKey} = e; const {key, ctrlKey, metaKey} = e;
if (downMetaKeys.includes(key)) setDownMetaKeys(downMetaKeys.filter(k => k !== key));
const ctrlMetaKey = ctrlKey || metaKey; const ctrlMetaKey = ctrlKey || metaKey;
if (key === "Enter" && ctrlMetaKey) { if (key === "Enter" && ctrlMetaKey) {
runQuery(); runQuery();
@ -88,14 +50,25 @@ const QueryEditor: FC<QueryEditorProps> = ({
} }
}; };
return <div className={`query-editor-container return <Autocomplete
${focusEditor ? "query-editor-container_focus" : ""} freeSolo
query-editor-container-${oneLiner ? "one-line" : "multi-line"} fullWidth
${error === ErrorTypes.validQuery ? "query-editor-container_error" : ""}`}> disableClearable
{/*Class one-line-scroll and other codemirror styles are declared in index.css*/} options={autocomplete && !downMetaKeys.length ? queryOptions : []}
<label className="query-editor-label">Query</label> onChange={(event, value) => setQuery(value, index)}
<div className="query-editor" ref={ref} onKeyUp={onKeyUp}/> onKeyDown={handleKeyDown}
</div>; onKeyUp={handleKeyUp}
value={value}
renderInput={(params) =>
<TextField
{...params}
label={`Query ${index + 1}`}
multiline
error={!!error}
onChange={(e) => setQuery(e.target.value, index)}
/>
}
/>;
}; };
export default QueryEditor; export default QueryEditor;

View file

@ -1,5 +1,5 @@
import {useEffect, useMemo, useState} from "react"; import {useEffect, useMemo, useState} from "react";
import {getQueryRangeUrl, getQueryUrl} from "../../../../api/query-range"; import {getQueryOptions, getQueryRangeUrl, getQueryUrl} from "../../../../api/query-range";
import {useAppState} from "../../../../state/common/StateContext"; import {useAppState} from "../../../../state/common/StateContext";
import {InstantMetricResult, MetricBase, MetricResult} from "../../../../api/types"; import {InstantMetricResult, MetricBase, MetricResult} from "../../../../api/types";
import {isValidHttpUrl} from "../../../../utils/url"; import {isValidHttpUrl} from "../../../../utils/url";
@ -17,12 +17,14 @@ export const useFetchQuery = (): {
graphData?: MetricResult[], graphData?: MetricResult[],
liveData?: InstantMetricResult[], liveData?: InstantMetricResult[],
error?: ErrorTypes | string, error?: ErrorTypes | string,
queryOptions: string[],
} => { } => {
const {query, displayType, serverUrl, time: {period}, queryControls: {nocache, autoRefresh}} = useAppState(); const {query, displayType, serverUrl, time: {period}, queryControls: {nocache, autoRefresh}} = useAppState();
const {basicData, bearerData, authMethod} = useAuthState(); const {basicData, bearerData, authMethod} = useAuthState();
const {customStep} = useGraphState(); const {customStep} = useGraphState();
const [queryOptions, setQueryOptions] = useState([]);
const [isLoading, setIsLoading] = useState(false); const [isLoading, setIsLoading] = useState(false);
const [graphData, setGraphData] = useState<MetricResult[]>(); const [graphData, setGraphData] = useState<MetricResult[]>();
const [liveData, setLiveData] = useState<InstantMetricResult[]>(); const [liveData, setLiveData] = useState<InstantMetricResult[]>();
@ -83,6 +85,21 @@ export const useFetchQuery = (): {
setIsLoading(false); setIsLoading(false);
}; };
const fetchOptions = async () => {
if (!serverUrl) return;
const url = getQueryOptions(serverUrl);
try {
const response = await fetch(url);
const resp = await response.json();
if (response.ok) {
setQueryOptions(resp.data);
}
} catch (e) {
if (e instanceof Error) setError(`${e.name}: ${e.message}`);
}
};
const fetchUrl = useMemo(() => { const fetchUrl = useMemo(() => {
const server = appModeEnable ? appServerUrl : serverUrl; const server = appModeEnable ? appServerUrl : serverUrl;
if (!period) return; if (!period) return;
@ -103,27 +120,22 @@ export const useFetchQuery = (): {
}, },
[serverUrl, period, displayType, customStep]); [serverUrl, period, displayType, customStep]);
useEffect(() => {
fetchOptions();
}, [serverUrl]);
useEffect(() => { useEffect(() => {
setPrevPeriod(undefined); setPrevPeriod(undefined);
}, [query]); }, [query]);
// TODO: this should depend on query as well, but need to decide when to do the request. // TODO: this should depend on query as well, but need to decide when to do the request. Doing it on each query change - looks to be a bad idea. Probably can be done on blur
// Doing it on each query change - looks to be a bad idea. Probably can be done on blur
useEffect(() => { useEffect(() => {
fetchData(); fetchData();
}, [serverUrl, displayType, customStep]); }, [serverUrl, displayType, customStep]);
useEffect(() => { useEffect(() => {
if (needUpdateData) { if (needUpdateData) fetchData();
fetchData();
}
}, [period]); }, [period]);
return { return { fetchUrl, isLoading, graphData, liveData, error, queryOptions: queryOptions };
fetchUrl,
isLoading,
graphData,
liveData,
error
};
}; };

View file

@ -0,0 +1,99 @@
import React, {FC, useEffect, useState} from "react";
import {Box, Popover, TextField, Typography} from "@mui/material";
import {checkDurationLimit} from "../../../../utils/time";
import {TimeDurationPopover} from "./TimeDurationPopover";
import {InlineBtn} from "../../../common/InlineBtn";
import {useAppState} from "../../../../state/common/StateContext";
interface TimeDurationSelector {
setDuration: (str: string) => void;
}
const TimeDurationSelector: FC<TimeDurationSelector> = ({setDuration}) => {
const {time: {duration}} = useAppState();
const [anchorEl, setAnchorEl] = React.useState<Element | null>(null);
const [durationString, setDurationString] = useState<string>(duration);
const [durationStringFocused, setFocused] = useState(false);
const open = Boolean(anchorEl);
const handleDurationChange = (event: React.ChangeEvent<HTMLInputElement>) => {
setDurationString(event.target.value);
};
const handlePopoverOpen = (event: React.MouseEvent<Element, MouseEvent>) => {
setAnchorEl(event.currentTarget);
};
const handlePopoverClose = () => {
setAnchorEl(null);
};
const onKeyUp = (event: React.KeyboardEvent<HTMLInputElement>) => {
if (event.key !== "Enter") return;
const target = event.target as HTMLInputElement;
target.blur();
setDurationString(target.value);
};
useEffect(() => {
setDurationString(duration);
}, [duration]);
useEffect(() => {
if (!durationStringFocused) {
const value = checkDurationLimit(durationString);
setDurationString(value);
setDuration(value);
}
}, [durationString, durationStringFocused]);
return <>
<Box>
<TextField label="Duration" value={durationString} onChange={handleDurationChange}
variant="standard"
fullWidth={true}
onKeyUp={onKeyUp}
onBlur={() => {
setFocused(false);
}}
onFocus={() => {
setFocused(true);
}}
/>
</Box>
<Box mt={2}>
<Typography variant="body2">
<span aria-owns={open ? "mouse-over-popover" : undefined}
aria-haspopup="true"
style={{cursor: "pointer"}}
onMouseEnter={handlePopoverOpen}
onMouseLeave={handlePopoverClose}>
Possible options:&nbsp;
</span>
<Popover
open={open}
anchorEl={anchorEl}
anchorOrigin={{
vertical: "bottom",
horizontal: "left",
}}
transformOrigin={{
vertical: "top",
horizontal: "left",
}}
style={{pointerEvents: "none"}} // important
onClose={handlePopoverClose}
disableRestoreFocus
>
<TimeDurationPopover/>
</Popover>
<InlineBtn handler={() => setDurationString("5m")} text="5m"/>,&nbsp;
<InlineBtn handler={() => setDurationString("1h")} text="1h"/>,&nbsp;
<InlineBtn handler={() => setDurationString("1h 30m")} text="1h 30m"/>
</Typography>
</Box>
</>;
};
export default TimeDurationSelector;

View file

@ -1,128 +1,85 @@
import React, {FC, useEffect, useState} from "react"; import React, {FC, useEffect, useState} from "react";
import {Box, Popover, TextField, Typography} from "@mui/material"; import {Box, TextField, Typography} from "@mui/material";
import DateTimePicker from "@mui/lab/DateTimePicker"; import DateTimePicker from "@mui/lab/DateTimePicker";
import {TimeDurationPopover} from "./TimeDurationPopover";
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext"; import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
import {checkDurationLimit, dateFromSeconds, formatDateForNativeInput} from "../../../../utils/time"; import {dateFromSeconds, formatDateForNativeInput} from "../../../../utils/time";
import {InlineBtn} from "../../../common/InlineBtn"; import {InlineBtn} from "../../../common/InlineBtn";
import makeStyles from "@mui/styles/makeStyles"; import makeStyles from "@mui/styles/makeStyles";
import TimeDurationSelector from "./TimeDurationSelector";
import dayjs from "dayjs";
interface TimeSelectorProps { interface TimeSelectorProps {
setDuration: (str: string) => void; setDuration: (str: string) => void;
duration: string;
} }
const useStyles = makeStyles({ const useStyles = makeStyles({
container: { container: {
display: "grid", display: "grid",
gridTemplateColumns: "auto auto", gridTemplateColumns: "200px 1fr",
gridGap: "20px",
height: "100%", height: "100%",
padding: "18px 14px", padding: "20px",
borderRadius: "4px", borderRadius: "4px",
borderColor: "#b9b9b9", borderColor: "#b9b9b9",
borderStyle: "solid", borderStyle: "solid",
borderWidth: "1px" borderWidth: "1px"
} },
timeControls: {
display: "grid",
gridTemplateColumns: "1fr",
gridTemplateRows: "auto 1fr",
gridGap: "16px 0",
},
datePickers: {
display: "grid",
gridTemplateColumns: "repeat(auto-fit, 200px)",
gridGap: "16px 0",
},
datePickerItem: {
minWidth: "200px",
},
}); });
export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => { export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
const classes = useStyles(); const classes = useStyles();
const [durationStringFocused, setFocused] = useState(false);
const [anchorEl, setAnchorEl] = React.useState<Element | null>(null);
const [until, setUntil] = useState<string>(); const [until, setUntil] = useState<string>();
const [from, setFrom] = useState<string>();
const {time: {period: {end}, duration}} = useAppState(); const {time: {period: {end, start}}} = useAppState();
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const [durationString, setDurationString] = useState<string>(duration);
useEffect(() => {
setDurationString(duration);
}, [duration]);
useEffect(() => { useEffect(() => {
setUntil(formatDateForNativeInput(dateFromSeconds(end))); setUntil(formatDateForNativeInput(dateFromSeconds(end)));
}, [end]); }, [end]);
useEffect(() => { useEffect(() => {
if (!durationStringFocused) { setFrom(formatDateForNativeInput(dateFromSeconds(start)));
const value = checkDurationLimit(durationString); }, [start]);
setDurationString(value);
setDuration(value);
}
}, [durationString, durationStringFocused]);
const handleDurationChange = (event: React.ChangeEvent<HTMLInputElement>) => {
setDurationString(event.target.value);
};
const handlePopoverOpen = (event: React.MouseEvent<Element, MouseEvent>) => {
setAnchorEl(event.currentTarget);
};
const handlePopoverClose = () => {
setAnchorEl(null);
};
const onKeyUp = (event: React.KeyboardEvent<HTMLInputElement>) => {
if (event.key !== "Enter") return;
const target = event.target as HTMLInputElement;
target.blur();
setDurationString(target.value);
};
const open = Boolean(anchorEl);
return <Box className={classes.container}> return <Box className={classes.container}>
{/*setup duration*/} {/*setup duration*/}
<Box px={1}>
<Box> <Box>
<TextField label="Duration" value={durationString} onChange={handleDurationChange} <TimeDurationSelector setDuration={setDuration}/>
variant="standard"
fullWidth={true}
onKeyUp={onKeyUp}
onBlur={() => {setFocused(false);}}
onFocus={() => {setFocused(true);}}
/>
</Box>
<Box mt={2}>
<Typography variant="body2">
<span aria-owns={open ? "mouse-over-popover" : undefined}
aria-haspopup="true"
style={{cursor: "pointer"}}
onMouseEnter={handlePopoverOpen}
onMouseLeave={handlePopoverClose}>
Possible options:&nbsp;
</span>
<Popover
open={open}
anchorEl={anchorEl}
anchorOrigin={{
vertical: "bottom",
horizontal: "left",
}}
transformOrigin={{
vertical: "top",
horizontal: "left",
}}
style={{pointerEvents: "none"}} // important
onClose={handlePopoverClose}
disableRestoreFocus
>
<TimeDurationPopover/>
</Popover>
<InlineBtn handler={() => setDurationString("5m")} text="5m"/>,&nbsp;
<InlineBtn handler={() => setDurationString("1h")} text="1h"/>,&nbsp;
<InlineBtn handler={() => setDurationString("1h 30m")} text="1h 30m"/>
</Typography>
</Box>
</Box> </Box>
{/*setup end time*/} {/*setup end time*/}
<Box px={1}> <Box className={classes.timeControls}>
<Box> <Box className={classes.datePickers}>
<Box className={classes.datePickerItem}>
<DateTimePicker
label="From"
ampm={false}
value={from}
onChange={date => dispatch({type: "SET_FROM", payload: date as unknown as Date})}
onError={console.log}
inputFormat="DD/MM/YYYY HH:mm:ss"
mask="__/__/____ __:__:__"
renderInput={(params) => <TextField {...params} variant="standard"/>}
maxDate={dayjs(until)}
/>
</Box>
<Box className={classes.datePickerItem}>
<DateTimePicker <DateTimePicker
label="Until" label="Until"
ampm={false} ampm={false}
@ -134,8 +91,8 @@ export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
renderInput={(params) => <TextField {...params} variant="standard"/>} renderInput={(params) => <TextField {...params} variant="standard"/>}
/> />
</Box> </Box>
</Box>
<Box mt={2}> <Box>
<Typography variant="body2"> <Typography variant="body2">
Will be changed to current time for auto-refresh mode.&nbsp; Will be changed to current time for auto-refresh mode.&nbsp;
<InlineBtn handler={() => dispatch({type: "RUN_QUERY_TO_NOW"})} text="Switch to now"/> <InlineBtn handler={() => dispatch({type: "RUN_QUERY_TO_NOW"})} text="Switch to now"/>

View file

@ -1,55 +1,25 @@
import React, {FC} from "react"; import React, {FC} from "react";
import {Alert, AppBar, Box, CircularProgress, Fade, Link, Toolbar, Typography} from "@mui/material"; import {Alert, Box, CircularProgress, Fade} from "@mui/material";
import {ExecutionControls} from "./Configurator/Time/ExecutionControls";
import {DisplayTypeSwitch} from "./Configurator/DisplayTypeSwitch";
import GraphView from "./Views/GraphView"; import GraphView from "./Views/GraphView";
import TableView from "./Views/TableView"; import TableView from "./Views/TableView";
import {useAppState} from "../../state/common/StateContext"; import {useAppState} from "../../state/common/StateContext";
import QueryConfigurator from "./Configurator/Query/QueryConfigurator"; import QueryConfigurator from "./Configurator/Query/QueryConfigurator";
import {useFetchQuery} from "./Configurator/Query/useFetchQuery"; import {useFetchQuery} from "./Configurator/Query/useFetchQuery";
import JsonView from "./Views/JsonView"; import JsonView from "./Views/JsonView";
import Header from "../Header/Header";
const HomeLayout: FC = () => { const HomeLayout: FC = () => {
const {displayType, time: {period}} = useAppState(); const {displayType, time: {period}} = useAppState();
const {isLoading, liveData, graphData, error} = useFetchQuery(); const {isLoading, liveData, graphData, error, queryOptions} = useFetchQuery();
return ( return (
<Box id="homeLayout"> <Box id="homeLayout">
<AppBar position="static"> <Header/>
<Toolbar>
<Box display="flex">
<Typography variant="h5">
<span style={{fontWeight: "bolder"}}>VM</span>
<span style={{fontWeight: "lighter"}}>UI</span>
</Typography>
<div style={{
fontSize: "10px",
marginTop: "-2px"
}}>
<div>BETA</div>
</div>
</Box>
<div style={{
fontSize: "10px",
position: "absolute",
top: "40px",
opacity: ".4"
}}>
<Link color="inherit" href="https://github.com/VictoriaMetrics/VictoriaMetrics/issues/new" target="_blank">
Create an issue
</Link>
</div>
<Box ml={4} flexGrow={1}>
<ExecutionControls/>
</Box>
<DisplayTypeSwitch/>
</Toolbar>
</AppBar>
<Box p={4} display="grid" gridTemplateRows="auto 1fr" gap={"20px"} style={{minHeight: "calc(100vh - 64px)"}}> <Box p={4} display="grid" gridTemplateRows="auto 1fr" gap={"20px"} style={{minHeight: "calc(100vh - 64px)"}}>
<Box> <Box>
<QueryConfigurator error={error}/> <QueryConfigurator error={error} queryOptions={queryOptions}/>
</Box> </Box>
<Box height={"100%"}> <Box height={"100%"}>
{isLoading && <Fade in={isLoading} style={{ {isLoading && <Fade in={isLoading} style={{

View file

@ -17,15 +17,17 @@ const Legend: FC<LegendProps> = ({labels, onChange}) => {
return Array.from(new Set(labels.map(l => l.group))); return Array.from(new Set(labels.map(l => l.group)));
}, [labels]); }, [labels]);
return <div className="legendWrapper"> return <>
<div className="legendWrapper">
{groups.map((group) => <div className="legendGroup" key={group}> {groups.map((group) => <div className="legendGroup" key={group}>
<div className="legendGroupTitle"> <div className="legendGroupTitle">
<span className="legendGroupQuery">Query {group}</span>
<svg className="legendGroupLine" width="33" height="3" version="1.1" xmlns="http://www.w3.org/2000/svg"> <svg className="legendGroupLine" width="33" height="3" version="1.1" xmlns="http://www.w3.org/2000/svg">
<line strokeWidth="3" x1="0" y1="0" x2="33" y2="0" stroke="#363636" <line strokeWidth="3" x1="0" y1="0" x2="33" y2="0" stroke="#363636"
strokeDasharray={getDashLine(group).join(",")} strokeDasharray={getDashLine(group).join(",")}
/> />
</svg> </svg>
<b>&quot;{query[group - 1]}&quot;</b>: <b>&quot;{query[group - 1]}&quot;:</b>
</div> </div>
<div> <div>
{labels.filter(l => l.group === group).map((legendItem: LegendItem) => {labels.filter(l => l.group === group).map((legendItem: LegendItem) =>
@ -42,7 +44,13 @@ const Legend: FC<LegendProps> = ({labels, onChange}) => {
)} )}
</div> </div>
</div>)} </div>)}
</div>; </div>
<div className="legendWrapperHotkey">
<p><code>Left click</code> - select series</p>
<p><code>Ctrl</code> + <code>Left click</code> - toggle multiple series</p>
</div>
</>;
}; };
export default Legend; export default Legend;

View file

@ -1,4 +1,5 @@
.legendWrapper { .legendWrapper {
position: relative;
display: grid; display: grid;
grid-template-columns: repeat(auto-fit, minmax(400px, 1fr)); grid-template-columns: repeat(auto-fit, minmax(400px, 1fr));
grid-gap: 20px; grid-gap: 20px;
@ -11,14 +12,20 @@
} }
.legendGroupTitle { .legendGroupTitle {
display: flex; display: grid;
grid-template-columns: 43px auto;
align-items: center; align-items: center;
padding: 10px 0 5px; padding: 10px;
font-size: 11px; font-size: 11px;
} }
.legendGroupQuery {
grid-column: 1/3;
opacity: 0.6;
}
.legendGroupLine { .legendGroupLine {
margin: 0 10px; margin-right: 10px;
} }
.legendItem { .legendItem {
@ -56,3 +63,26 @@
font-size: 11px; font-size: 11px;
font-weight: normal; font-weight: normal;
} }
.legendWrapperHotkey {
display: flex;
align-items: center;
font-size: 11px;
}
.legendWrapperHotkey p {
margin-right: 20px;
}
.legendWrapperHotkey code {
display: inline;
max-width: 100%;
padding: 4px 6px;
border: 1px solid #dedede;
background-color: #f2f2f2;
border-radius: 2px;
font-weight: 400;
font-size: 10px;
color: #0a0a0a;
word-wrap: break-word;
}

View file

@ -0,0 +1,19 @@
import React, {FC} from "react";
import {SvgIcon} from "@mui/material";
interface LogoProps {
style?: React.CSSProperties
}
const Logo: FC<LogoProps> = ({style}) => (
<SvgIcon style={style} viewBox="0 0 20 24">
<path
d="M8.27 10.58a2.8 2.8 0 0 0 1.7.59h.07c.65-.01 1.3-.26 1.69-.6 2.04-1.73 7.95-7.15 7.95-7.15C21.26 1.95 16.85.48 10.04.47h-.08C3.15.48-1.26 1.95.32 3.42c0 0 5.91 5.42 7.95 7.16"/>
<path
d="M11.73 13.51a2.8 2.8 0 0 1-1.7.6h-.06a2.8 2.8 0 0 1-1.7-.6C6.87 12.31 1.87 7.8 0 6.08v2.61c0 .29.11.67.3.85 1.28 1.17 6.2 5.67 7.97 7.18a2.8 2.8 0 0 0 1.7.6h.06c.66-.02 1.3-.27 1.7-.6 1.77-1.5 6.69-6.01 7.96-7.18.2-.18.3-.56.3-.85V6.08a615.27 615.27 0 0 1-8.26 7.43"/>
<path
d="M11.73 19.66a2.8 2.8 0 0 1-1.7.59h-.06a2.8 2.8 0 0 1-1.7-.6c-1.4-1.2-6.4-5.72-8.27-7.43v2.62c0 .28.11.66.3.84 1.28 1.17 6.2 5.68 7.97 7.19a2.8 2.8 0 0 0 1.7.59h.06c.66-.01 1.3-.26 1.7-.6 1.77-1.5 6.69-6 7.96-7.18.2-.18.3-.56.3-.84v-2.62a614.96 614.96 0 0 1-8.26 7.44"/>
</SvgIcon>
);
export default Logo;

View file

@ -15,81 +15,3 @@ code {
.MuiAccordionSummary-content { .MuiAccordionSummary-content {
margin: 0 !important; margin: 0 !important;
} }
/*Codemirror classes*/
/* TODO: find better way to override codemirror styles */
.cm-activeLine {
background-color: inherit !important;
}
.cm-editor {
border: none;
border-radius: 4px;
font-size: 10px;
}
.cm-gutters {
border-radius: 4px 0 0 4px;
height: 100%;
overflow: hidden;
background-color: #FFFFFF !important;
border: none !important;
}
.cm-activeLineGutter {
background-color: #FFFFFF !important;
}
.query-editor .cm-scroller {
align-items: center !important;
}
.query-editor .cm-editor.cm-focused {
outline: none;
}
.query-editor-container {
position: relative;
padding: 12px;
border: 1px solid #b9b9b9;
border-radius: 4px;
}
.query-editor-container_focus {
border: 1px solid #3F51B5;
}
.query-editor-container_error {
border-color: #FF4141;
}
.query-editor-container-one-line .query-editor .cm-editor {
height: 22px;
}
.query-editor-container-one-line {
padding: 6px;
}
.query-editor-label {
font-weight: 400;
font-size: 12px;
line-height: 1;
letter-spacing: normal;
color: rgba(0, 0, 0, 0.6);
padding: 0 5px;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
max-width: calc(133% - 24px);
position: absolute;
left: 4px;
top: -0.71875em;
z-index: 1;
background-color: #FFFFFF;
transform: scale(0.75);
}
.query-editor-container_error .query-editor-label {
color: #FF4141;
}

View file

@ -1,10 +1,18 @@
/* eslint max-lines: 0 */ /* eslint max-lines: 0 */
import {DisplayType} from "../../components/Home/Configurator/DisplayTypeSwitch"; import {DisplayType} from "../../components/Home/Configurator/DisplayTypeSwitch";
import {TimeParams, TimePeriod} from "../../types"; import {TimeParams, TimePeriod} from "../../types";
import {dateFromSeconds, formatDateToLocal, getDateNowUTC, getDurationFromPeriod, getTimeperiodForDuration} from "../../utils/time"; import {
dateFromSeconds,
formatDateToLocal,
getDateNowUTC,
getDurationFromPeriod,
getTimeperiodForDuration,
getDurationFromMilliseconds
} from "../../utils/time";
import {getFromStorage} from "../../utils/storage"; import {getFromStorage} from "../../utils/storage";
import {getDefaultServer} from "../../utils/default-server-url"; import {getDefaultServer} from "../../utils/default-server-url";
import {getQueryArray, getQueryStringValue} from "../../utils/query-string"; import {breakLineToQuery, getQueryArray, getQueryStringValue} from "../../utils/query-string";
import dayjs from "dayjs";
export interface TimeState { export interface TimeState {
duration: string; duration: string;
@ -37,6 +45,7 @@ export type Action =
| { type: "SET_QUERY_HISTORY", payload: QueryHistory[] } | { type: "SET_QUERY_HISTORY", payload: QueryHistory[] }
| { type: "SET_DURATION", payload: string } | { type: "SET_DURATION", payload: string }
| { type: "SET_UNTIL", payload: Date } | { type: "SET_UNTIL", payload: Date }
| { type: "SET_FROM", payload: Date }
| { type: "SET_PERIOD", payload: TimePeriod } | { type: "SET_PERIOD", payload: TimePeriod }
| { type: "RUN_QUERY"} | { type: "RUN_QUERY"}
| { type: "RUN_QUERY_TO_NOW"} | { type: "RUN_QUERY_TO_NOW"}
@ -79,7 +88,7 @@ export function reducer(state: AppState, action: Action): AppState {
case "SET_QUERY": case "SET_QUERY":
return { return {
...state, ...state,
query: action.payload query: action.payload.map(q => breakLineToQuery(q))
}; };
case "SET_QUERY_HISTORY": case "SET_QUERY_HISTORY":
return { return {
@ -109,6 +118,21 @@ export function reducer(state: AppState, action: Action): AppState {
period: getTimeperiodForDuration(state.time.duration, action.payload) period: getTimeperiodForDuration(state.time.duration, action.payload)
} }
}; };
case "SET_FROM":
// eslint-disable-next-line no-case-declarations
const durationFrom = getDurationFromMilliseconds(state.time.period.end*1000 - action.payload.valueOf());
return {
...state,
queryControls: {
...state.queryControls,
autoRefresh: false // since we're considering this to action to be fired from period selection on chart
},
time: {
...state.time,
duration: durationFrom,
period: getTimeperiodForDuration(durationFrom, dayjs(state.time.period.end*1000).toDate())
}
};
case "SET_PERIOD": case "SET_PERIOD":
// eslint-disable-next-line no-case-declarations // eslint-disable-next-line no-case-declarations
const duration = getDurationFromPeriod(action.payload); const duration = getDurationFromPeriod(action.payload);

View file

@ -2,7 +2,7 @@ import {MetricBase} from "../api/types";
export const getNameForMetric = (result: MetricBase): string => { export const getNameForMetric = (result: MetricBase): string => {
if (Object.keys(result.metric).length === 0) { if (Object.keys(result.metric).length === 0) {
return "Query result"; // a bit better than just {} for case of aggregation functions return `Query ${result.group} result`; // a bit better than just {} for case of aggregation functions
} }
const { __name__: name, ...freeFormFields } = result.metric; const { __name__: name, ...freeFormFields } = result.metric;
return `${name || ""} {${Object.entries(freeFormFields).map(e => `${e[0]}: ${e[1]}`).join(", ")}}`; return `${name || ""} {${Object.entries(freeFormFields).map(e => `${e[0]}: ${e[1]}`).join(", ")}}`;

View file

@ -48,7 +48,7 @@ export const setQueryStringValue = (newValue: Record<string, unknown>): void =>
newQsValue.push(`g${i}.${queryKey}=${valueEncoded}`); newQsValue.push(`g${i}.${queryKey}=${valueEncoded}`);
} }
}); });
newQsValue.push(`g${i}.expr=${q}`); newQsValue.push(`g${i}.expr=${breakLineToQuery(q)}`);
}); });
setQueryStringWithoutPageReload(newQsValue.join("&")); setQueryStringWithoutPageReload(newQsValue.join("&"));
@ -69,3 +69,7 @@ export const getQueryArray = (): string[] => {
return getQueryStringValue(`g${i}.expr`, "") as string; return getQueryStringValue(`g${i}.expr`, "") as string;
}); });
}; };
export const breakLineToQuery = (q: string): string => q.replace(/\n/g, "%20");
export const queryToBreakLine = (q: string): string => q.replace(/%20/g, "\n");

View file

@ -75,7 +75,7 @@ export const formatDateForNativeInput = (date: Date): string => dayjs(date).form
export const getDateNowUTC = (): Date => new Date(dayjs().utc().format(dateIsoFormat)); export const getDateNowUTC = (): Date => new Date(dayjs().utc().format(dateIsoFormat));
const getDurationFromMilliseconds = (ms: number): string => { export const getDurationFromMilliseconds = (ms: number): string => {
const milliseconds = Math.floor(ms % 1000); const milliseconds = Math.floor(ms % 1000);
const seconds = Math.floor((ms / 1000) % 60); const seconds = Math.floor((ms / 1000) % 60);
const minutes = Math.floor((ms / 1000 / 60) % 60); const minutes = Math.floor((ms / 1000 / 60) % 60);

View file

@ -32,12 +32,13 @@ export const getHideSeries = ({hideSeries, legend, metaKey, series}: HideSeriesA
const label = `${legend.group}.${legend.label}`; const label = `${legend.group}.${legend.label}`;
const include = includesHideSeries(legend.label, legend.group, hideSeries); const include = includesHideSeries(legend.label, legend.group, hideSeries);
const labels = series.map(s => `${s.scale}.${s.label}`); const labels = series.map(s => `${s.scale}.${s.label}`);
if (metaKey && include) { if (metaKey) {
return [...labels.filter(l => l !== label)];
} else if (metaKey && !include) {
return hideSeries.length >= series.length - 1 ? [] : [...labels.filter(l => l !== label)];
}
return include ? hideSeries.filter(l => l !== label) : [...hideSeries, label]; return include ? hideSeries.filter(l => l !== label) : [...hideSeries, label];
} else if (hideSeries.length) {
return include ? [...labels.filter(l => l !== label)] : [];
} else {
return [...labels.filter(l => l !== label)];
}
}; };
export const includesHideSeries = (label: string, group: string | number, hideSeries: string[]): boolean => { export const includesHideSeries = (label: string, group: string | number, hideSeries: string[]): boolean => {

View file

@ -0,0 +1,9 @@
RELEASE_NAME := vm-oneclick-droplet
VM_VERSION ?= $(shell git describe --abbrev=0 --tags)
.PHONY: $(MAKECMDGOALS)
release-victoria-metrics-digitalocean-oneclick-droplet:
cp ./files/etc/update-motd.d/99-one-click.tpl ./files/etc/update-motd.d/99-one-click
sed -i -e "s/VM_VERSION/${VM_VERSION}/g" ./files/etc/update-motd.d/99-one-click
packer build template.pkr.hcl

View file

@ -0,0 +1,54 @@
## Application summary
VictoriaMetrics is a fast and scalable open source time series database and monitoring solution.
## Description
VictoriaMetrics is a free [open source time series database](https://en.wikipedia.org/wiki/Time_series_database) (TSDB) and monitoring solution, designed to collect, store and process real-time metrics.
It supports the [Prometheus](https://en.wikipedia.org/wiki/Prometheus_(software)) pull model and various push protocols ([Graphite](https://en.wikipedia.org/wiki/Graphite_(software)), [InfluxDB](https://en.wikipedia.org/wiki/InfluxDB), OpenTSDB) for data ingestion. It is optimized for storage with high-latency IO, low IOPS and time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
For reading the data and evaluating alerting rules, VictoriaMetrics supports the PromQL, [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) and Graphite query languages. VictoriaMetrics Single is fully autonomous and can be used as a long-term storage for time series.
[VictoriaMetrics Single](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html) = Hassle-free monitoring solution. Easily handles 10M+ of active time series on a single instance. Perfect for small and medium environments.
## Getting started after deploying VictoriaMetrics Single
### Config
VictoriaMetrics configuration is located at `/etc/victoriametrics/single/scrape.yml` on the droplet.
This One Click app uses 8428, 2003, 4242 and 8089 ports to accept metrics from different protocols. It's recommended to disable ports for protocols which are not needed. [Ubuntu firewall](https://help.ubuntu.com/community/UFW) can be used to easily disable access for specific ports.
### Scraping metrics
VictoriaMetrics supports metrics scraping in the same way as Prometheus does. Check the configuration file to edit scraping targets. See more details about scraping at [How to scrape Prometheus exporters](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-scrape-prometheus-exporters-such-as-node-exporter).
### Sending metrics
Besides scraping, VictoriaMetrics accepts write requests for various ingestion protocols. This One Click app supports the following protocols:
- [Datadog](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent), [Influx (telegraph)](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf), [JSON](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format), [CSV](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-csv-data), [Prometheus](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) on port :8428
- [Graphite (statsd)](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) on port :2003 tcp/udp
- [OpenTSDB](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents) on port :4242
- Influx (telegraph) on port :8089 tcp/udp
See more details and examples in [official documentation](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html).
### UI
VictoriaMetrics provides a [User Interface (UI)](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmui) for query troubleshooting and exploration. The UI is available at `http://your_droplet_public_ipv4:8428/vmui`. It lets users explore query results via graphs and tables.
To check it, open the following in your browser `http://your_droplet_public_ipv4:8428/vmui` and then enter `vm_app_uptime_seconds` to the Query Field to Execute the Query.
Run the following command to query and retrieve a result from VictoriaMetrics Single with `curl`:
```bash
curl -sg http://your_droplet_public_ipv4:8428/api/v1/query_range?query=vm_app_uptime_seconds | jq
```
### Accessing
Once the Droplet is created, you can use DigitalOcean's web console to start a session or SSH directly to the server as root:
```bash
ssh root@your_droplet_public_ipv4
```

View file

@ -0,0 +1,28 @@
## Release guide for DigitalOcean 1-ClickApp Droplet
### Build image
To build the snapshot in DigitalOcean account you will need API Token and [packer](https://learn.hashicorp.com/tutorials/packer/get-started-install-cli).
API Token can be generated on [https://cloud.digitalocean.com/account/api/tokens](https://cloud.digitalocean.com/account/api/tokens) or use already generated from OnePassword.
Set variable `DIGITALOCEAN_API_TOKEN` for environment:
```bash
export DIGITALOCEAN_API_TOKEN="your_token_here"
```
or set it by with make:
```bash
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="your_token_here"
```
### Update information on Vendor Portal
After packer build finished you need to update a product page.
1. Go to [https://cloud.digitalocean.com/vendorportal](https://cloud.digitalocean.com/vendorportal).
2. Choose a product that you need to update.
3. Enter newer information for this release and choose a droplet's snapshot which was builded recently.
4. Submit updates for approve on DigitalOcean Marketplace.

View file

@ -0,0 +1,29 @@
[Unit]
Description=VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
# https://docs.victoriametrics.com
After=network.target
[Service]
Type=simple
User=victoriametrics
Group=victoriametrics
WorkingDirectory=/var/lib/victoria-metrics-data
StartLimitBurst=5
StartLimitInterval=0
Restart=on-failure
RestartSec=5
EnvironmentFile=-/etc/victoriametrics/single/victoriametrics.conf
ExecStart=/usr/bin/victoria-metrics-prod $ARGS
ExecStop=/bin/kill -s SIGTERM $MAINPID
ExecReload=/bin/kill -HUP $MAINPID
# See docs https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#tuning
ProtectSystem=full
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=vmsingle
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,36 @@
#!/bin/sh
#
# Configured as part of the DigitalOcean 1-Click Image build process
myip=$(hostname -I | awk '{print$1}')
cat <<EOF
********************************************************************************
Welcome to DigitalOcean's 1-Click VictoriaMetrics Droplet.
To keep this Droplet secure, the UFW firewall is enabled.
All ports are BLOCKED except 22 (SSH), 80 (HTTP), and 443 (HTTPS), 8428 (VictoriaMetrics HTTP), 8089 (VictoriaMetrics Influx),
4242 (VictoriaMetrics OpenTSDB), 2003 (VictoriaMetrics Graphite)
In a web browser, you can view:
* The VictoriaMetrics 1-Click Quickstart guide: https://kutt.it/1click-quickstart
On the server:
* The default VictoriaMetrics root is located at /var/lib/victoria-metrics-data
* VictoriaMetrics is running on ports: 8428, 8089, 4242, 2003 and they are bound to the local interface.
********************************************************************************
# This image includes version VM_VERSION of VictoriaMetrics.
# See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/VM_VERSION
# Welcome to VictoriaMetrics droplet!
# Website: https://victoriametrics.com
# Documentation: https://docs.victoriametrics.com
# VictoriaMetrics Github : https://github.com/VictoriaMetrics/VictoriaMetrics
# VictoriaMetrics Slack Community: https://slack.victoriametrics.com
# VictoriaMetrics Telegram Community: https://t.me/VictoriaMetrics_en
# VictoriaMetrics config: /etc/victoriametrics/single/victoriametrics.conf
********************************************************************************
EOF

View file

@ -0,0 +1,36 @@
#!/bin/sh
#
# Configured as part of the DigitalOcean 1-Click Image build process
myip=$(hostname -I | awk '{print$1}')
cat <<EOF
********************************************************************************
Welcome to DigitalOcean's 1-Click VictoriaMetrics Droplet.
To keep this Droplet secure, the UFW firewall is enabled.
All ports are BLOCKED except 22 (SSH), 80 (HTTP), and 443 (HTTPS), 8428 (VictoriaMetrics HTTP), 8089 (VictoriaMetrics Influx),
4242 (VictoriaMetrics OpenTSDB), 2003 (VictoriaMetrics Graphite)
In a web browser, you can view:
* The VictoriaMetrics 1-Click Quickstart guide: https://kutt.it/1click-quickstart
On the server:
* The default VictoriaMetrics root is located at /var/lib/victoria-metrics-data
* VictoriaMetrics is running on ports: 8428, 8089, 4242, 2003 and they are bound to the local interface.
********************************************************************************
# This image includes version VM_VERSION of VictoriaMetrics.
# See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/VM_VERSION
# Welcome to VictoriaMetrics droplet!
# Website: https://victoriametrics.com
# Documentation: https://docs.victoriametrics.com
# VictoriaMetrics Github : https://github.com/VictoriaMetrics/VictoriaMetrics
# VictoriaMetrics Slack Community: https://slack.victoriametrics.com
# VictoriaMetrics Telegram Community: https://t.me/VictoriaMetrics_en
# VictoriaMetrics config: /etc/victoriametrics/single/victoriametrics.conf
********************************************************************************
EOF

View file

@ -0,0 +1,7 @@
# Scrape config example
#
scrape_configs:
- job_name: self_scrape
scrape_interval: 10s
static_configs:
- targets: ['127.0.0.1:8428']

View file

@ -0,0 +1 @@
ARGS="-promscrape.config=/etc/victoriametrics/single/scrape.yml -storageDataPath=/var/lib/victoria-metrics-data -retentionPeriod=12 -httpListenAddr=:8428 -graphiteListenAddr=:2003 -opentsdbListenAddr=:4242 -influxListenAddr=:8089 -enableTCP6"

View file

@ -0,0 +1,10 @@
#!/bin/sh
systemctl start vmsingle.service
# Remove the ssh force logout command
sed -e '/Match User root/d' \
-e '/.*ForceCommand.*droplet.*/d' \
-i /etc/ssh/sshd_config
systemctl restart ssh

View file

@ -0,0 +1,15 @@
#!/bin/sh
# Create victoriametrics user
groupadd -r victoriametrics
useradd -g victoriametrics -d /var/lib/victoria-metrics-data -s /sbin/nologin --system victoriametrics
mkdir -p /var/lib/victoria-metrics-data
chown -R victoriametrics:victoriametrics /var/lib/victoria-metrics-data
rm -rf /var/lib/apt/lists/*
apt update
DEBIAN_FRONTEND=noninteractive apt -y full-upgrade
DEBIAN_FRONTEND=noninteractive apt -y install curl git wget software-properties-common
rm -rf /var/log/kern.log
rm -rf /var/log/ufw.log

View file

@ -0,0 +1,16 @@
#!/bin/sh
sed -e 's|DEFAULT_FORWARD_POLICY=.*|DEFAULT_FORWARD_POLICY="ACCEPT"|g' \
-i /etc/default/ufw
ufw allow ssh comment "SSH port"
ufw allow http comment "HTTP port"
ufw allow https comment "HTTPS port"
ufw allow 8428 comment "VictoriaMetrics Single HTTP port"
ufw allow 8089/tcp comment "TCP Influx Listen port for VictoriaMetrics"
ufw allow 8089/udp comment "UDP Influx Listen port for VictoriaMetrics"
ufw allow 2003/tcp comment "TCP Graphite Listen port for VictoriaMetrics"
ufw allow 2003/udp comment "UDP Graphite Listen port for VictoriaMetrics"
ufw allow 4242 comment "OpenTSDB Listen port for VictoriaMetrics"
ufw --force enable

View file

@ -0,0 +1,13 @@
#!/bin/sh
# Wait for cloud-init
cloud-init status --wait
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/${VM_VER}/victoria-metrics-amd64-${VM_VER}.tar.gz -O /tmp/victoria-metrics.tar.gz
tar xvf /tmp/victoria-metrics.tar.gz -C /usr/bin
chmod +x /usr/bin/victoria-metrics-prod
chown root:root /usr/bin/victoria-metrics-prod
# Enable VictoriaMetrics on boot
systemctl enable vmsingle.service

View file

@ -0,0 +1,7 @@
#!/bin/bash
#
# Remove log files generated during instance creation and
# configuration.
rm -rf /var/log/kern.log
rm -rf /var/log/ufw.log

View file

@ -0,0 +1,44 @@
#!/bin/bash
# Ensure /tmp exists and has the proper permissions before
# checking for security updates
# https://github.com/digitalocean/marketplace-partners/issues/94
if [[ ! -d /tmp ]]; then
mkdir /tmp
fi
chmod 1777 /tmp
apt-get -y update
apt-get -y upgrade
rm -rf /tmp/* /var/tmp/*
history -c
cat /dev/null > /root/.bash_history
unset HISTFILE
apt-get -y autoremove
apt-get -y autoclean
find /var/log -mtime -1 -type f -exec truncate -s 0 {} \;
rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-????????
rm -rf /var/lib/cloud/instances/*
rm -f /root/.ssh/authorized_keys /etc/ssh/*key*
touch /etc/ssh/revoked_keys
chmod 600 /etc/ssh/revoked_keys
# Securely erase the unused portion of the filesystem
GREEN='\033[0;32m'
NC='\033[0m'
printf "\n${GREEN}Writing zeros to the remaining disk space to securely
erase the unused portion of the file system.
Depending on your disk size this may take several minutes.
The secure erase will complete successfully when you see:${NC}
dd: writing to '/zerofile': No space left on device\n
Beginning secure erase now\n"
dd if=/dev/zero of=/zerofile &
PID=$!
while [ -d /proc/$PID ]
do
printf "."
sleep 5
done
sync; rm /zerofile; sync
cat /dev/null > /var/log/lastlog; cat /dev/null > /var/log/wtmp

View file

@ -0,0 +1,682 @@
#!/bin/bash
# DigitalOcean Marketplace Image Validation Tool
# © 2021 DigitalOcean LLC.
# This code is licensed under Apache 2.0 license (see LICENSE.md for details)
VERSION="v. 1.6"
RUNDATE=$( date )
# Script should be run with SUDO
if [ "$EUID" -ne 0 ]
then echo "[Error] - This script must be run with sudo or as the root user."
exit 1
fi
STATUS=0
PASS=0
WARN=0
FAIL=0
# $1 == command to check for
# returns: 0 == true, 1 == false
cmdExists() {
if command -v "$1" > /dev/null 2>&1; then
return 0
else
return 1
fi
}
function getDistro {
if [ -f /etc/os-release ]; then
# freedesktop.org and systemd
. /etc/os-release
OS=$NAME
VER=$VERSION_ID
elif type lsb_release >/dev/null 2>&1; then
# linuxbase.org
OS=$(lsb_release -si)
VER=$(lsb_release -sr)
elif [ -f /etc/lsb-release ]; then
# For some versions of Debian/Ubuntu without lsb_release command
. /etc/lsb-release
OS=$DISTRIB_ID
VER=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
# Older Debian/Ubuntu/etc.
OS=Debian
VER=$(cat /etc/debian_version)
elif [ -f /etc/SuSe-release ]; then
# Older SuSE/etc.
:
elif [ -f /etc/redhat-release ]; then
# Older Red Hat, CentOS, etc.
VER=$( cat /etc/redhat-release | cut -d" " -f3 | cut -d "." -f1)
d=$( cat /etc/redhat-release | cut -d" " -f1 | cut -d "." -f1)
if [[ $d == "CentOS" ]]; then
OS="CentOS Linux"
fi
else
# Fall back to uname, e.g. "Linux <version>", also works for BSD, etc.
OS=$(uname -s)
VER=$(uname -r)
fi
}
function loadPasswords {
SHADOW=$(cat /etc/shadow)
}
function checkAgent {
# Check for the presence of the do-agent in the filesystem
if [ -d /var/opt/digitalocean/do-agent ];then
echo -en "\e[41m[FAIL]\e[0m DigitalOcean Monitoring Agent detected.\n"
((FAIL++))
STATUS=2
if [[ $OS == "CentOS Linux" ]]; then
echo "The agent can be removed with 'sudo yum remove do-agent' "
elif [[ $OS == "Ubuntu" ]]; then
echo "The agent can be removed with 'sudo apt-get purge do-agent' "
fi
else
echo -en "\e[32m[PASS]\e[0m DigitalOcean Monitoring agent was not found\n"
((PASS++))
fi
}
function checkLogs {
cp_ignore="/var/log/cpanel-install.log"
echo -en "\nChecking for log files in /var/log\n\n"
# Check if there are log archives or log files that have not been recently cleared.
for f in /var/log/*-????????; do
[[ -e $f ]] || break
if [ $f != $cp_ignore ]; then
echo -en "\e[93m[WARN]\e[0m Log archive ${f} found\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
done
for f in /var/log/*.[0-9];do
[[ -e $f ]] || break
echo -en "\e[93m[WARN]\e[0m Log archive ${f} found\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
done
for f in /var/log/*.log; do
[[ -e $f ]] || break
if [[ "${f}" = '/var/log/lfd.log' && "$( cat "${f}" | egrep -v '/var/log/messages has been reset| Watching /var/log/messages' | wc -c)" -gt 50 ]]; then
if [ $f != $cp_ignore ]; then
echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [[ "${f}" != '/var/log/lfd.log' && "$( cat "${f}" | wc -c)" -gt 50 ]]; then
if [ $f != $cp_ignore ]; then
echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
}
function checkTMP {
# Check the /tmp directory to ensure it is empty. Warn on any files found.
return 1
}
function checkRoot {
user="root"
uhome="/root"
for usr in $SHADOW
do
IFS=':' read -r -a u <<< "$usr"
if [[ "${u[0]}" == "${user}" ]]; then
if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then
echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account.\n"
((FAIL++))
STATUS=2
fi
fi
done
if [ -d ${uhome}/ ]; then
if [ -d ${uhome}/.ssh/ ]; then
if ls ${uhome}/.ssh/*> /dev/null 2>&1; then
for key in ${uhome}/.ssh/*
do
if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then
if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n"
akey=$(cat ${key})
echo "File Contents:"
echo $akey
echo "--------------"
((FAIL++))
STATUS=2
fi
elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then
if [ "$( cat "${key}" | wc -c)" -gt 0 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n"
akey=$(cat ${key})
echo "File Contents:"
echo $akey
echo "--------------"
((FAIL++))
STATUS=2
else
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory at \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
else
if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a populated known_hosts file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n"
fi
if [ -f /root/.bash_history ];then
BH_S=$( cat /root/.bash_history | wc -c)
if [[ $BH_S -lt 200 ]]; then
echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n"
((FAIL++))
STATUS=2
fi
return 1;
else
echo -en "\e[32m[PASS]\e[0m The Root User's Bash History is not present\n"
((PASS++))
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n"
fi
echo -en "\n\n"
return 1
}
function checkUsers {
# Check each user-created account
for user in $(awk -F: '$3 >= 1000 && $1 != "nobody" {print $1}' /etc/passwd;)
do
# Skip some other non-user system accounts
if [[ $user == "centos" ]]; then
:
elif [[ $user == "nfsnobody" ]]; then
:
else
echo -en "\nChecking user: ${user}...\n"
for usr in $SHADOW
do
IFS=':' read -r -a u <<< "$usr"
if [[ "${u[0]}" == "${user}" ]]; then
if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then
echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account. Only system users are allowed on the image.\n"
((FAIL++))
STATUS=2
fi
fi
done
#echo "User Found: ${user}"
uhome="/home/${user}"
if [ -d "${uhome}/" ]; then
if [ -d "${uhome}/.ssh/" ]; then
if ls "${uhome}/.ssh/*"> /dev/null 2>&1; then
for key in ${uhome}/.ssh/*
do
if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then
if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n"
akey=$(cat ${key})
echo "File Contents:"
echo $akey
echo "--------------"
((FAIL++))
STATUS=2
fi
elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then
if [ "$( cat "${key}" | wc -c)" -gt 0 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n"
akey=$(cat ${key})
echo "File Contents:"
echo $akey
echo "--------------"
((FAIL++))
STATUS=2
else
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory named \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
else
if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a known_hosts file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n"
fi
# Check for an uncleared .bash_history for this user
if [ -f "${uhome}/.bash_history" ]; then
BH_S=$( cat "${uhome}/.bash_history" | wc -c )
if [[ $BH_S -lt 200 ]]; then
echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n"
((FAIL++))
STATUS=2
fi
echo -en "\n\n"
fi
fi
done
}
function checkFirewall {
if [[ $OS == "Ubuntu" ]]; then
fw="ufw"
ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //")
if [[ $ufwa == "active" ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
elif [[ $OS == "CentOS Linux" ]]; then
if [ -f /usr/lib/systemd/system/csf.service ]; then
fw="csf"
if [[ $(systemctl status $fw >/dev/null 2>&1) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
elif cmdExists "firewall-cmd"; then
if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
fw="firewalld"
if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
fi
elif [[ "$OS" =~ Debian.* ]]; then
# user could be using a number of different services for managing their firewall
# we will check some of the most common
if cmdExists 'ufw'; then
fw="ufw"
ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //")
if [[ $ufwa == "active" ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
elif cmdExists "firewall-cmd"; then
fw="firewalld"
if [[ $(systemctl is-active --quiet $fw) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
# user could be using vanilla iptables, check if kernel module is loaded
fw="iptables"
if [[ $(lsmod | grep -q '^ip_tables' 2>/dev/null) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
fi
fi
}
function checkUpdates {
if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then
# Ensure /tmp exists and has the proper permissions before
# checking for security updates
# https://github.com/digitalocean/marketplace-partners/issues/94
if [[ ! -d /tmp ]]; then
mkdir /tmp
fi
chmod 1777 /tmp
echo -en "\nUpdating apt package database to check for security updates, this may take a minute...\n\n"
apt-get -y update > /dev/null
uc=$(apt-get --just-print upgrade | grep -i "security" | wc -l)
if [[ $uc -gt 0 ]]; then
update_count=$(( ${uc} / 2 ))
else
update_count=0
fi
if [[ $update_count -gt 0 ]]; then
echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n"
echo -en
echo -en "Here is a list of the security updates that are not installed:\n"
sleep 2
apt-get --just-print upgrade | grep -i security | awk '{print $2}' | awk '!seen[$0]++'
echo -en
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n\n"
fi
elif [[ $OS == "CentOS Linux" ]]; then
echo -en "\nChecking for available security updates, this may take a minute...\n\n"
update_count=$(yum check-update --security --quiet | wc -l)
if [[ $update_count -gt 0 ]]; then
echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n"
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n"
((PASS++))
fi
else
echo "Error encountered"
exit 1
fi
return 1;
}
function checkCloudInit {
if hash cloud-init 2>/dev/null; then
CI="\e[32m[PASS]\e[0m Cloud-init is installed.\n"
((PASS++))
else
CI="\e[41m[FAIL]\e[0m No valid verison of cloud-init was found.\n"
((FAIL++))
STATUS=2
fi
return 1
}
function checkMongoDB {
# Check if MongoDB is installed
# If it is, verify the version is allowed (non-SSPL)
if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then
if [[ -f "/usr/bin/mongod" ]]; then
version=$(/usr/bin/mongod --version --quiet | grep "db version" | sed -e "s/^db\ version\ v//")
if version_gt $version 4.0.0; then
if version_gt $version 4.0.3; then
echo -en "\e[41m[FAIL]\e[0m An SSPL version of MongoDB is present, ${version}"
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m The version of MongoDB installed, ${version} is not under the SSPL"
((PASS++))
fi
else
if version_gt $version 3.6.8; then
echo -en "\e[41m[FAIL]\e[0m An SSPL version of MongoDB is present, ${version}"
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m The version of MongoDB installed, ${version} is not under the SSPL"
((PASS++))
fi
fi
else
echo -en "\e[32m[PASS]\e[0m MongoDB is not installed"
((PASS++))
fi
elif [[ $OS == "CentOS Linux" ]]; then
if [[ -f "/usr/bin/mongod" ]]; then
version=$(/usr/bin/mongod --version --quiet | grep "db version" | sed -e "s/^db\ version\ v//")
if version_gt $version 4.0.0; then
if version_gt $version 4.0.3; then
echo -en "\e[41m[FAIL]\e[0m An SSPL version of MongoDB is present"
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m The version of MongoDB installed is not under the SSPL"
((PASS++))
fi
else
if version_gt $version 3.6.8; then
echo -en "\e[41m[FAIL]\e[0m An SSPL version of MongoDB is present"
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m The version of MongoDB installed is not under the SSPL"
((PASS++))
fi
fi
else
echo -en "\e[32m[PASS]\e[0m MongoDB is not installed"
((PASS++))
fi
else
echo "ERROR: Unable to identify distribution"
((FAIL++))
STATUS 2
return 1
fi
}
function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
clear
echo "DigitalOcean Marketplace Image Validation Tool ${VERSION}"
echo "Executed on: ${RUNDATE}"
echo "Checking local system for Marketplace compatibility..."
getDistro
echo -en "\n\e[1mDistribution:\e[0m ${OS}\n"
echo -en "\e[1mVersion:\e[0m ${VER}\n\n"
ost=0
osv=0
if [[ $OS == "Ubuntu" ]]; then
ost=1
if [[ $VER == "20.04" ]]; then
osv=1
elif [[ $VER == "18.04" ]]; then
osv=1
elif [[ $VER == "16.04" ]]; then
osv=1
else
osv=0
fi
elif [[ "$OS" =~ Debian.* ]]; then
ost=1
case "$VER" in
9)
osv=1
;;
10)
osv=1
;;
*)
osv=2
;;
esac
elif [[ $OS == "CentOS Linux" ]]; then
ost=1
if [[ $VER == "8" ]]; then
osv=1
elif [[ $VER == "7" ]]; then
osv=1
elif [[ $VER == "6" ]]; then
osv=1
else
osv=2
fi
else
ost=0
fi
if [[ $ost == 1 ]]; then
echo -en "\e[32m[PASS]\e[0m Supported Operating System Detected: ${OS}\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${OS} is not a supported Operating System\n"
((FAIL++))
STATUS=2
fi
if [[ $osv == 1 ]]; then
echo -en "\e[32m[PASS]\e[0m Supported Release Detected: ${VER}\n"
((PASS++))
elif [[ $ost == 1 ]]; then
echo -en "\e[41m[FAIL]\e[0m ${OS} ${VER} is not a supported Operating System Version\n"
((FAIL++))
STATUS=2
else
echo "Exiting..."
exit 1
fi
checkCloudInit
echo -en "${CI}"
checkFirewall
echo -en "${FW_VER}"
checkUpdates
loadPasswords
checkLogs
echo -en "\n\nChecking all user-created accounts...\n"
checkUsers
echo -en "\n\nChecking the root account...\n"
checkRoot
checkAgent
checkMongoDB
# Summary
echo -en "\n\n---------------------------------------------------------------------------------------------------\n"
if [[ $STATUS == 0 ]]; then
echo -en "Scan Complete.\n\e[32mAll Tests Passed!\e[0m\n"
elif [[ $STATUS == 1 ]]; then
echo -en "Scan Complete. \n\e[93mSome non-critical tests failed. Please review these items.\e[0m\e[0m\n"
else
echo -en "Scan Complete. \n\e[41mOne or more tests failed. Please review these items and re-test.\e[0m\n"
fi
echo "---------------------------------------------------------------------------------------------------"
echo -en "\e[1m${PASS} Tests PASSED\e[0m\n"
echo -en "\e[1m${WARN} WARNINGS\e[0m\n"
echo -en "\e[1m${FAIL} Tests FAILED\e[0m\n"
echo -en "---------------------------------------------------------------------------------------------------\n"
if [[ $STATUS == 0 ]]; then
echo -en "We did not detect any issues with this image. Please be sure to manually ensure that all software installed on the base system is functional, secure and properly configured (or facilities for configuration on first-boot have been created).\n\n"
exit 0
elif [[ $STATUS == 1 ]]; then
echo -en "Please review all [WARN] items above and ensure they are intended or resolved. If you do not have a specific requirement, we recommend resolving these items before image submission\n\n"
exit 0
else
echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the DigitalOcean Marketplace.\n\n"
exit 1
fi

View file

@ -0,0 +1,73 @@
{
"variables": {
"do_api_token": "{{env `DIGITALOCEAN_API_TOKEN`}}",
"image_name": "vm-single-20-04-snapshot-{{timestamp}}",
"apt_packages": "curl git wget software-properties-common net-tools",
"application_name": "vm-single",
"application_version": "{{ env `VM_VERSION` }}"
},
"sensitive-variables": ["do_api_token"],
"builders": [
{
"type": "digitalocean",
"api_token": "{{user `do_api_token`}}",
"image": "ubuntu-20-04-x64",
"region": "nyc3",
"size": "s-1vcpu-1gb",
"ssh_username": "root",
"snapshot_name": "{{user `image_name`}}"
}
],
"provisioners": [
{
"type": "shell",
"inline": [
"cloud-init status --wait"
]
},
{
"type": "file",
"source": "files/etc/",
"destination": "/etc/"
},
{
"type": "file",
"source": "files/var/",
"destination": "/var/"
},
{
"type": "shell",
"environment_vars": [
"DEBIAN_FRONTEND=noninteractive",
"LC_ALL=C",
"LANG=en_US.UTF-8",
"LC_CTYPE=en_US.UTF-8"
],
"inline": [
"apt -qqy update",
"apt -qqy -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' install {{user `apt_packages`}}",
"apt-get -qqy clean"
]
},
{
"type": "shell",
"environment_vars": [
"application_name={{user `application_name`}}",
"application_version={{user `application_version`}}",
"DEBIAN_FRONTEND=noninteractive",
"LC_ALL=C",
"LANG=en_US.UTF-8",
"LC_CTYPE=en_US.UTF-8"
],
"scripts": [
"scripts/01-setup.sh",
"scripts/02-firewall.sh",
"scripts/04-install-victoriametrics.sh",
"scripts/89-cleanup-logs.sh",
"scripts/90-cleanup.sh",
"scripts/99-img-check.sh"
]
}
]
}

View file

@ -0,0 +1,80 @@
variable "token" {
type = string
default = "${env("DIGITALOCEAN_API_TOKEN")}"
description = "DigitalOcean API token used to create droplets."
}
variable "image_id" {
type = string
default = "ubuntu-20-04-x64"
description = "DigitalOcean linux image ID."
}
variable "victoriametrics_version" {
type = string
default = "${env("VM_VERSION")}"
description = "Version number of the desired VictoriaMetrics binary."
}
variable "image_name" {
type = string
default = "victoriametrics-snapshot-{{timestamp}}"
description = "Name of the snapshot created on DigitalOcean."
}
source "digitalocean" "default" {
api_token = "${var.token}"
image = "${var.image_id}"
region = "nyc3"
size = "s-1vcpu-1gb"
snapshot_name = "${var.image_name}"
ssh_username = "root"
}
build {
sources = ["source.digitalocean.default"]
provisioner "file" {
destination = "/etc/"
source = "files/etc/"
}
provisioner "file" {
destination = "/var/"
source = "files/var/"
}
# Setup instance configuration
provisioner "shell" {
environment_vars = [
"DEBIAN_FRONTEND=noninteractive"
]
scripts = [
"scripts/01-setup.sh",
"scripts/02-firewall.sh",
]
}
# Install VictoriaMetrics
provisioner "shell" {
environment_vars = [
"VM_VER=${var.victoriametrics_version}",
"DEBIAN_FRONTEND=noninteractive"
]
scripts = [
"scripts/04-install-victoriametrics.sh",
]
}
# Cleanup and validate instance
provisioner "shell" {
environment_vars = [
"DEBIAN_FRONTEND=noninteractive"
]
scripts = [
"scripts/89-cleanup-logs.sh",
"scripts/90-cleanup.sh",
"scripts/99-img-check.sh"
]
}
}

View file

@ -62,4 +62,4 @@ There are the following channels for providing technical support for VictoriaMet
* [Slack channel](https://slack.victoriametrics.com/) * [Slack channel](https://slack.victoriametrics.com/)
* [Telegram channel](https://t.me/VictoriaMetrics_en) * [Telegram channel](https://t.me/VictoriaMetrics_en)
We also provide [Enterprise support](https://victoriametrics.com/enterprise.html). We also provide [Enterprise support](https://victoriametrics.com/products/enterprise/).

View file

@ -6,10 +6,22 @@ sort: 15
## tip ## tip
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): log error message when remote storage returns 400 or 409 http errors. This should simplify detection and debugging of this case. See [this issue](vmagent_remotewrite_packets_dropped_total).
* FEATURE: [vmrestore](https://docs.victoriametrics.com/vmrestore.html): store `restore-in-progress` file in `-dst` directory while `vmrestore` is running. This file is automatically deleted when `vmrestore` is successfully finished. This helps detecting incompletely restored data on VictoriaMetrics start. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1958).
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): print the last sample timestamp when the data migration is interrupted either by user or by error. This helps continuing the data migration from the interruption moment. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1236).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): make sure that `vmagent` replicas scrape the same targets at different time offsets when [replication is enabled in vmagent clustering mode](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets). This guarantees that the [deduplication](https://docs.victoriametrics.com/#deduplication) consistently leaves samples from the same `vmagent` replica.
* BUGFIX: return the proper response stub from `/api/v1/query_exemplars` handler, which is needed for Grafana v8+. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1999).
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl.html): fix a few edge cases and improve migration speed for OpenTSDB importer. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2019).
* BUGFIX: fix possible data race when searching for time series matching `{key=~"value|"}` filter over time range covering multipe days. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2032). Thanks to @waldoweng for the provided fix.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not send staleness markers on graceful shutdown. This follows Prometheus behavior. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2013#issuecomment-1006994079).
## [v1.71.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.71.0) ## [v1.71.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.71.0)
* FEATURE: [VictoriaMetrics enterprise](https://victoriametrics.com/enterprise.html): add multi-level downsampling support. See [these docs](https://docs.victoriametrics.com/#downsampling) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/36). Released at 20-12-2021
* FEATURE: [VictoriaMetrics enterprise](https://victoriametrics.com/products/enterprise/): add multi-level downsampling support. See [these docs](https://docs.victoriametrics.com/#downsampling) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/36).
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to analyze the correlation between two queries on a single graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1916). * FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to analyze the correlation between two queries on a single graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1916).
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to override the interval between returned datapoints. By default it is automatically calculated depending on the selected time range and horizontal resolution of the graph. Now it is possible to override it with custom values. This may be useful during data exploration and debugging. * FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to override the interval between returned datapoints. By default it is automatically calculated depending on the selected time range and horizontal resolution of the graph. Now it is possible to override it with custom values. This may be useful during data exploration and debugging.
* FEATURE: accept optional `extra_filters[]=series_selector` query args at Prometheus query APIs additionally to `extra_label` query args. This allows enforcing additional filters for all the Prometheus query APIs by using [vmgateway](https://docs.victoriametrics.com/vmgateway.html) or [vmauth](https://docs.victoriametrics.com/vmauth.html). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1863). * FEATURE: accept optional `extra_filters[]=series_selector` query args at Prometheus query APIs additionally to `extra_label` query args. This allows enforcing additional filters for all the Prometheus query APIs by using [vmgateway](https://docs.victoriametrics.com/vmgateway.html) or [vmauth](https://docs.victoriametrics.com/vmauth.html). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1863).
@ -34,6 +46,8 @@ sort: 15
## [v1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0) ## [v1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0)
Released at 02-12-2021
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to pass arbitrary query args to `-datasource.url` on a per-group basis via `params` option. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1892). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to pass arbitrary query args to `-datasource.url` on a per-group basis via `params` option. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1892).
* FEATURE: add `now()` function to MetricsQL. This function returns the current timestamp in seconds. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#now). * FEATURE: add `now()` function to MetricsQL. This function returns the current timestamp in seconds. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#now).
* FEATURE: vmauth: allow using optional `name` field in configs. This field is then used as `username` label value for `vmauth_user_requests_total` metric. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1805). * FEATURE: vmauth: allow using optional `name` field in configs. This field is then used as `username` label value for `vmauth_user_requests_total` metric. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1805).
@ -50,7 +64,7 @@ sort: 15
* FEATURE: expose `/-/healthy` and `/-/ready` endpoints as Prometheus does. This is needed for improving integration with third-party solutions, which rely on these endpoints. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1833). * FEATURE: expose `/-/healthy` and `/-/ready` endpoints as Prometheus does. This is needed for improving integration with third-party solutions, which rely on these endpoints. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1833).
* BUGFIX: vmagent: prevent from scraping duplicate targets if `-promscrape.dropOriginalLabels` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1830). Thanks to @guidao for the fix. * BUGFIX: vmagent: prevent from scraping duplicate targets if `-promscrape.dropOriginalLabels` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1830). Thanks to @guidao for the fix.
* BUGFIX: vmstorage [enterprise](https://victoriametrics.com/enterprise.html): added missing `vm_tenant_used_tenant_bytes` metric, which shows the approximate per-tenant disk usage. See [these docs](https://docs.victoriametrics.com/PerTenantStatistic.html) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1605). * BUGFIX: vmstorage [enterprise](https://victoriametrics.com/products/enterprise/): added missing `vm_tenant_used_tenant_bytes` metric, which shows the approximate per-tenant disk usage. See [these docs](https://docs.victoriametrics.com/PerTenantStatistic.html) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1605).
* BUGFIX: vmauth: properly take into account the value passed to `-maxIdleConnsPerBackend` command-line flag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1300). * BUGFIX: vmauth: properly take into account the value passed to `-maxIdleConnsPerBackend` command-line flag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1300).
* BUGFIX: vmagent: fix [reading data from Kafka](https://docs.victoriametrics.com/vmagent.html#reading-metrics-from-kafka). * BUGFIX: vmagent: fix [reading data from Kafka](https://docs.victoriametrics.com/vmagent.html#reading-metrics-from-kafka).
* BUGFIX: vmalert: fix [replay mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling) in enterprise version. * BUGFIX: vmalert: fix [replay mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling) in enterprise version.
@ -62,6 +76,8 @@ sort: 15
## [v1.69.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.69.0) ## [v1.69.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.69.0)
Released at 08-11-2021
* FEATURE: vmalert: allow groups with empty rules list like Prometheus does. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1742). * FEATURE: vmalert: allow groups with empty rules list like Prometheus does. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1742).
* FEATURE: vmalert: allow groups with default `tenant` in `-clusterMode`. Default `tenant` values can be specified via `-defaultTenant.prometheus` and `-defaultTenant.graphite`. See [these docs](https://docs.victoriametrics.com/vmalert.html#multitenancy). * FEATURE: vmalert: allow groups with default `tenant` in `-clusterMode`. Default `tenant` values can be specified via `-defaultTenant.prometheus` and `-defaultTenant.graphite`. See [these docs](https://docs.victoriametrics.com/vmalert.html#multitenancy).
* FEATURE: vmagent: add `collapse` and `expand` buttons per each group of targets with the same `job_name` at `http://vmagent:8429/targets` page. * FEATURE: vmagent: add `collapse` and `expand` buttons per each group of targets with the same `job_name` at `http://vmagent:8429/targets` page.
@ -83,6 +99,8 @@ sort: 15
## [v1.68.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.68.0) ## [v1.68.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.68.0)
Released at 22-10-2021
* FEATURE: vmagent: expose `-promscrape.config` contents at `/config` page as Prometheus does. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1695). * FEATURE: vmagent: expose `-promscrape.config` contents at `/config` page as Prometheus does. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1695).
* FEATURE: vmagent: add `show original labels` button per each scrape target displayed at `http://vmagent:8429/targets` page. This should improve debuggability for service discovery and relabeling issues similar to [this one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1664). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1698). * FEATURE: vmagent: add `show original labels` button per each scrape target displayed at `http://vmagent:8429/targets` page. This should improve debuggability for service discovery and relabeling issues similar to [this one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1664). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1698).
* FEATURE: vmagent: shard targets among cluster nodes after the relabeling is applied. This should guarantee that targets with the same set of labels go to the same `vmagent` node in the cluster. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1687). * FEATURE: vmagent: shard targets among cluster nodes after the relabeling is applied. This should guarantee that targets with the same set of labels go to the same `vmagent` node in the cluster. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1687).
@ -110,8 +128,10 @@ sort: 15
## [v1.67.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.67.0) ## [v1.67.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.67.0)
Released at 08-10-2021
* FEATURE: add ability to accept metrics from [DataDog agent](https://docs.datadoghq.com/agent/) and [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent). This option simplifies the migration path from DataDog to VictoriaMetrics. See also [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/206). * FEATURE: add ability to accept metrics from [DataDog agent](https://docs.datadoghq.com/agent/) and [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent). This option simplifies the migration path from DataDog to VictoriaMetrics. See also [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/206).
* FEATURE: vmagent [enterprise](https://victoriametrics.com/enterprise.html): add support for data reading and writing from/to [Apache Kafka](https://kafka.apache.org/). See [these docs](https://docs.victoriametrics.com/vmagent.html#kafka-integration). * FEATURE: vmagent [enterprise](https://victoriametrics.com/products/enterprise/): add support for data reading and writing from/to [Apache Kafka](https://kafka.apache.org/). See [these docs](https://docs.victoriametrics.com/vmagent.html#kafka-integration).
* FEATURE: vmui: switch to [μPlot](https://github.com/leeoniya/uPlot) and add ability to naturally scroll and zoom graphs. See [these docs](https://docs.victoriametrics.com/#vmui). Thanks to @Loori-R. * FEATURE: vmui: switch to [μPlot](https://github.com/leeoniya/uPlot) and add ability to naturally scroll and zoom graphs. See [these docs](https://docs.victoriametrics.com/#vmui). Thanks to @Loori-R.
* FEATURE: vmstorage: stop accepting new data if `-storageDataPath` directory contains less than `-storage.minFreeDiskSpaceBytes` of free space. This should prevent from `out of disk space` crashes. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/269). * FEATURE: vmstorage: stop accepting new data if `-storageDataPath` directory contains less than `-storage.minFreeDiskSpaceBytes` of free space. This should prevent from `out of disk space` crashes. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/269).
* FEATURE: calculate quantiles in the same way as Prometheus does in such functions as [quantile_over_time](https://docs.victoriametrics.com/MetricsQL.html#quantile_over_time) and [quantile](https://docs.victoriametrics.com/MetricsQL.html#quantile). Previously results from VictoriaMetrics could be slightly different than results from Prometheus. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1625) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1612) issues. * FEATURE: calculate quantiles in the same way as Prometheus does in such functions as [quantile_over_time](https://docs.victoriametrics.com/MetricsQL.html#quantile_over_time) and [quantile](https://docs.victoriametrics.com/MetricsQL.html#quantile). Previously results from VictoriaMetrics could be slightly different than results from Prometheus. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1625) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1612) issues.
@ -126,6 +146,8 @@ sort: 15
## [v1.66.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.66.2) ## [v1.66.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.66.2)
Released at 23-09-2021
* FEATURE: vmagent: add `vm_promscrape_max_scrape_size_exceeded_errors_total` metric for counting of the failed scrapes due to the exceeded response size (the response size limit can be configured via `-promscrape.maxScrapeSize` command-line flag). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1639). * FEATURE: vmagent: add `vm_promscrape_max_scrape_size_exceeded_errors_total` metric for counting of the failed scrapes due to the exceeded response size (the response size limit can be configured via `-promscrape.maxScrapeSize` command-line flag). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1639).
* BUGFIX: vmalert: properly reload rule groups if only the `interval` config option is changed. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1641). * BUGFIX: vmalert: properly reload rule groups if only the `interval` config option is changed. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1641).
@ -134,6 +156,8 @@ sort: 15
## [v1.66.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.66.1) ## [v1.66.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.66.1)
Released at 22-09-2021
* FEATURE: add `-cluster` and/or `-enterprise` suffixes to `short_version` label at `vm_app_version` metric exposed at `/metrics` page of every VictoriaMetrics component. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1635). * FEATURE: add `-cluster` and/or `-enterprise` suffixes to `short_version` label at `vm_app_version` metric exposed at `/metrics` page of every VictoriaMetrics component. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1635).
* BUGFIX: vmselect: fix accessing [Graphite APIs](https://docs.victoriametrics.com/#graphite-api-usage). The access has been broken in v1.66.0, because `/graphite/*` path prefix accidentally clashed with `/graph*` path prefix used for VictoriaMetrics UI (aka `vmui`). * BUGFIX: vmselect: fix accessing [Graphite APIs](https://docs.victoriametrics.com/#graphite-api-usage). The access has been broken in v1.66.0, because `/graphite/*` path prefix accidentally clashed with `/graph*` path prefix used for VictoriaMetrics UI (aka `vmui`).
@ -142,6 +166,8 @@ sort: 15
## [v1.66.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.66.0) ## [v1.66.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.66.0)
Released at 20-09-2021
* FEATURE: vmalert: add web UI with the list of alerting groups, alerts and alert statuses. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1602). * FEATURE: vmalert: add web UI with the list of alerting groups, alerts and alert statuses. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1602).
* FEATURE: vmalert: add `-rule.maxResolveDuration` command-line flag, which could be used for limiting the auto-resolve duration for the alerting rule. By default it is limited to 3x evaluation interval. This could be too high for big evaluation intervals. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1586). * FEATURE: vmalert: add `-rule.maxResolveDuration` command-line flag, which could be used for limiting the auto-resolve duration for the alerting rule. By default it is limited to 3x evaluation interval. This could be too high for big evaluation intervals. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1586).
* FEATURE: vmalert: add support for Bearer token authorization for `-datasource.url`, `-remoteRead.url` and `-remoteWrite.url`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1608). * FEATURE: vmalert: add support for Bearer token authorization for `-datasource.url`, `-remoteRead.url` and `-remoteWrite.url`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1608).
@ -164,7 +190,7 @@ sort: 15
* FEATURE: add [outliers_mad(tolerance, q)](https://docs.victoriametrics.com/MetricsQL.html#outliers_mad) function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It returns time series with peaks outside the [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) multiplied by `tolerance`. * FEATURE: add [outliers_mad(tolerance, q)](https://docs.victoriametrics.com/MetricsQL.html#outliers_mad) function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It returns time series with peaks outside the [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) multiplied by `tolerance`.
* FEATURE: add `histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It calculates the given `phi*`-quantiles over the given `buckets` and returns time series per each quantile with the corresponding `{phiLabel="phi*"}` label. * FEATURE: add `histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It calculates the given `phi*`-quantiles over the given `buckets` and returns time series per each quantile with the corresponding `{phiLabel="phi*"}` label.
* FEATURE: add `quantiles_over_time("phiLabel", phi1, ..., phiN, series_selector[d])` function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It calculates the given `phi*`-quantiles over raw samples selected by `series_selector` on the given lookbehind window `d`. It returns time series per each quantile with the corresponding `{phiLabel="phi*"}` label. * FEATURE: add `quantiles_over_time("phiLabel", phi1, ..., phiN, series_selector[d])` function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It calculates the given `phi*`-quantiles over raw samples selected by `series_selector` on the given lookbehind window `d`. It returns time series per each quantile with the corresponding `{phiLabel="phi*"}` label.
* FEATURE: [enterprise](https://victoriametrics.com/enterprise.html): do not ask for `-eula` flag if `-version` flag is passed to enteprise app. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1621). * FEATURE: [enterprise](https://victoriametrics.com/products/enterprise/): do not ask for `-eula` flag if `-version` flag is passed to enteprise app. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1621).
* BUGFIX: properly handle queries with multiple filters matching empty labels such as `metric{label1=~"foo|",label2="bar|"}`. This filter must match the following series: `metric`, `metric{label1="foo"}`, `metric{label2="bar"}` and `metric{label1="foo",label2="bar"}`. Previously it was matching only `metric{label1="foo",label2="bar"}`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601). * BUGFIX: properly handle queries with multiple filters matching empty labels such as `metric{label1=~"foo|",label2="bar|"}`. This filter must match the following series: `metric`, `metric{label1="foo"}`, `metric{label2="bar"}` and `metric{label1="foo",label2="bar"}`. Previously it was matching only `metric{label1="foo",label2="bar"}`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601).
* BUGFIX: vmselect: reset connection timeouts after each request to `vmstorage`. This should prevent from `cannot read data in 0.000 seconds: unexpected EOF` warning in logs. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1562). Thanks to @mxlxm . * BUGFIX: vmselect: reset connection timeouts after each request to `vmstorage`. This should prevent from `cannot read data in 0.000 seconds: unexpected EOF` warning in logs. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1562). Thanks to @mxlxm .
@ -178,6 +204,8 @@ sort: 15
## [v1.65.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.65.0) ## [v1.65.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.65.0)
Released at 01-09-2021
* FEATURE: vmagent: add ability to read scrape configs from multiple files specified in `scrape_config_files` section. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1559). * FEATURE: vmagent: add ability to read scrape configs from multiple files specified in `scrape_config_files` section. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1559).
* FEATURE: vmagent: reduce memory usage and CPU usage when [Prometheus staleness tracking](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) is enabled for metrics exported from the deleted or disappeared scrape targets. * FEATURE: vmagent: reduce memory usage and CPU usage when [Prometheus staleness tracking](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) is enabled for metrics exported from the deleted or disappeared scrape targets.
* FEATURE: vmagent: add the ability to limit the number of unique time series scraped per each target. This can be done either globally via `-promscrape.seriesLimitPerTarget` command-line option or on per-target basis via `series_limit` option at `scrape_config` section. See [the updated docs on cardinality limiter](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1561). * FEATURE: vmagent: add the ability to limit the number of unique time series scraped per each target. This can be done either globally via `-promscrape.seriesLimitPerTarget` command-line option or on per-target basis via `series_limit` option at `scrape_config` section. See [the updated docs on cardinality limiter](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1561).
@ -201,6 +229,8 @@ sort: 15
## [v1.64.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.64.1) ## [v1.64.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.64.1)
Released at 19-08-2021
* FEATURE: add `bitmap_and(q, mask)`, `bitmap_or(q, mask)` and `bitmak_xor(q, mask)` functions to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). These functions allow performing bitwise operations over data points in time series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1541). * FEATURE: add `bitmap_and(q, mask)`, `bitmap_or(q, mask)` and `bitmak_xor(q, mask)` functions to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). These functions allow performing bitwise operations over data points in time series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1541).
* FEATURE: vmalert: add `-remoteWrite.disablePathAppend` command-line flag, which can be used when custom `-remoteWrite.url` must be specified. For example, `./vmalert -disablePathAppend -remoteWrite.url='http://foo.bar/a/b/c?d=e'` would write data to `http://foo.bar/a/b/c?d=e` instead of `http://foo.bar/a/b/c?d=e/api/v1/write`. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1536). * FEATURE: vmalert: add `-remoteWrite.disablePathAppend` command-line flag, which can be used when custom `-remoteWrite.url` must be specified. For example, `./vmalert -disablePathAppend -remoteWrite.url='http://foo.bar/a/b/c?d=e'` would write data to `http://foo.bar/a/b/c?d=e` instead of `http://foo.bar/a/b/c?d=e/api/v1/write`. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1536).
* FEATURE: vmagent: add `-promscrape.noStaleMarkers` command-line flag for disabling sending Prometheus stale markers for metrics from disappeared scrape targets. This option may be used for reducing memory usage when scraping big number of metrics with big number of labels and when stale markers aren't needed. * FEATURE: vmagent: add `-promscrape.noStaleMarkers` command-line flag for disabling sending Prometheus stale markers for metrics from disappeared scrape targets. This option may be used for reducing memory usage when scraping big number of metrics with big number of labels and when stale markers aren't needed.
@ -214,6 +244,8 @@ sort: 15
## [v1.64.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.64.0) ## [v1.64.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.64.0)
Released at 15-08-2021
* FEATURE: add support for Prometheus staleness markers. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1526). * FEATURE: add support for Prometheus staleness markers. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1526).
* FEATURE: vmagent: automatically generate Prometheus staleness markers for the scraped metrics when scrape targets disappear in the same way as Prometheus does. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1526). * FEATURE: vmagent: automatically generate Prometheus staleness markers for the scraped metrics when scrape targets disappear in the same way as Prometheus does. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1526).
* FEATURE: add `present_over_time(m[d])` function, which returns 1 if `m` has a least a single sample over the previous duration `d`. This function has been added also to [Prometheus 2.29](https://github.com/prometheus/prometheus/releases/tag/v2.29.0). * FEATURE: add `present_over_time(m[d])` function, which returns 1 if `m` has a least a single sample over the previous duration `d`. This function has been added also to [Prometheus 2.29](https://github.com/prometheus/prometheus/releases/tag/v2.29.0).
@ -239,6 +271,8 @@ sort: 15
## [v1.63.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.63.0) ## [v1.63.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.63.0)
Released at 15-07-2021
* FEATURE: reduce memory usage by up to 30% on production workloads. * FEATURE: reduce memory usage by up to 30% on production workloads.
* FEATURE: vmselect: embed [vmui](https://github.com/VictoriaMetrics/vmui) into a single-node VictoriaMetrics and into `vmselect` component of cluster version. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1413). The web interface is available at the following paths: * FEATURE: vmselect: embed [vmui](https://github.com/VictoriaMetrics/vmui) into a single-node VictoriaMetrics and into `vmselect` component of cluster version. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1413). The web interface is available at the following paths:
* `/vmui/` for a single-node VictoriaMetrics * `/vmui/` for a single-node VictoriaMetrics
@ -259,6 +293,8 @@ sort: 15
## [v1.62.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.62.0) ## [v1.62.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.62.0)
Released at 25-06-2021
* FEATURE: vmagent: add service discovery for Docker (aka [docker_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#docker_sd_config)). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1402). * FEATURE: vmagent: add service discovery for Docker (aka [docker_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#docker_sd_config)). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1402).
* FEATURE: vmagent: add service discovery for DigitalOcean (aka [digitalocean_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config)). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1367). * FEATURE: vmagent: add service discovery for DigitalOcean (aka [digitalocean_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config)). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1367).
* FEATURE: vmagent: change the default value for `-remoteWrite.queues` from 4 to `2 * numCPUs`. This should reduce scrape duration for highly loaded vmagent, which scrapes tens of thousands of targets. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1385). * FEATURE: vmagent: change the default value for `-remoteWrite.queues` from 4 to `2 * numCPUs`. This should reduce scrape duration for highly loaded vmagent, which scrapes tens of thousands of targets. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1385).
@ -281,12 +317,16 @@ sort: 15
## [v1.61.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.61.1) ## [v1.61.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.61.1)
Released at 11-06-2021
* BUGFIX: vmalert: fix recording rules, which were broken in v1.61.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1369). * BUGFIX: vmalert: fix recording rules, which were broken in v1.61.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1369).
* BUGFIX: reset the on-disk cache for mapping from the full metric name to an internal metric id (e.g. `metric_name{labels} -> internal_metric_id`) after deleting metrics via [delete API](https://docs.victoriametrics.com/#how-to-delete-time-series). This should prevent from possible inconsistent state after unclean shutdown. This [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1347). * BUGFIX: reset the on-disk cache for mapping from the full metric name to an internal metric id (e.g. `metric_name{labels} -> internal_metric_id`) after deleting metrics via [delete API](https://docs.victoriametrics.com/#how-to-delete-time-series). This should prevent from possible inconsistent state after unclean shutdown. This [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1347).
## [v1.61.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.61.0) ## [v1.61.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.61.0)
Released at 09-06-2021
* FEATURE: vmalert: add support for backfilling (aka replay) of recording and alerting rules. See [these docs](https://docs.victoriametrics.com/vmalert.html#rules-backfilling) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/836). * FEATURE: vmalert: add support for backfilling (aka replay) of recording and alerting rules. See [these docs](https://docs.victoriametrics.com/vmalert.html#rules-backfilling) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/836).
* FEATURE: vmalert: add a command-line flag `-rule.configCheckInterval` for automatic re-reading of `-rule` files without the need to send SIGHUP signal. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/512). * FEATURE: vmalert: add a command-line flag `-rule.configCheckInterval` for automatic re-reading of `-rule` files without the need to send SIGHUP signal. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/512).
* FEATURE: vmagent: respect the `sample_limit` and `-promscrape.maxScrapeSize` values when scraping targets in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1331). * FEATURE: vmagent: respect the `sample_limit` and `-promscrape.maxScrapeSize` values when scraping targets in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1331).
@ -304,6 +344,8 @@ sort: 15
## [v1.60.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.60.0) ## [v1.60.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.60.0)
Released at 24-05-2021
* FEATURE: add ability to limit the number of unique time series, which can be added to storage per hour and per day. This can help dealing with high cardinality and high churn rate issues. See [these docs](https://docs.victoriametrics.com/#cardinality-limiter). * FEATURE: add ability to limit the number of unique time series, which can be added to storage per hour and per day. This can help dealing with high cardinality and high churn rate issues. See [these docs](https://docs.victoriametrics.com/#cardinality-limiter).
* FEATURE: vmagent: add ability to limit the number of unique time series, which can be sent to remote storage systems per hour and per day. This can help dealing with high cardinality and high churn rate issues. See [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). * FEATURE: vmagent: add ability to limit the number of unique time series, which can be sent to remote storage systems per hour and per day. This can help dealing with high cardinality and high churn rate issues. See [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
* FEATURE: vmalert: add ability to run alerting and recording rules for multiple tenants. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/740) and [these docs](https://docs.victoriametrics.com/vmalert.html#multitenancy). * FEATURE: vmalert: add ability to run alerting and recording rules for multiple tenants. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/740) and [these docs](https://docs.victoriametrics.com/vmalert.html#multitenancy).
@ -339,6 +381,8 @@ sort: 15
## [v1.59.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.59.0) ## [v1.59.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.59.0)
Released at 01-05-2021
* FEATURE: improved new time series registration speed on systems with many CPU cores. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1244). Thanks to @waldoweng for the idea and [draft implementation](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1243). * FEATURE: improved new time series registration speed on systems with many CPU cores. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1244). Thanks to @waldoweng for the idea and [draft implementation](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1243).
* FEATURE: vmalert: use the same technique as Grafana for determining evaluation timestamps for recording rules. This should make consistent graphs for series generated by recording rules compared to graphs generated for queries from recording rules in Grafana. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1232). * FEATURE: vmalert: use the same technique as Grafana for determining evaluation timestamps for recording rules. This should make consistent graphs for series generated by recording rules compared to graphs generated for queries from recording rules in Grafana. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1232).
* FEATURE: vmauth: add ability to set madatory query args in `url_prefix`. For example, `url_prefix: http://vm:8428/?extra_label=team=dev` would add `extra_label=team=dev` query arg to all the incoming requests. See [the example](https://docs.victoriametrics.com/vmauth.html#auth-config) for more details. * FEATURE: vmauth: add ability to set madatory query args in `url_prefix`. For example, `url_prefix: http://vm:8428/?extra_label=team=dev` would add `extra_label=team=dev` query arg to all the incoming requests. See [the example](https://docs.victoriametrics.com/vmauth.html#auth-config) for more details.
@ -355,6 +399,8 @@ Thanks to @johnseekins!
## [v1.58.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.58.0) ## [v1.58.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.58.0)
Released at 08-04-2021
* FEATURE: vminsert and vmagent: add `-sortLabels` command-line flag for sorting metric labels before pushing them to `vmstorage`. This should reduce the size of `MetricName -> internal_series_id` cache (aka `vm_cache_size_bytes{type="storage/tsid"}`) when ingesting samples for the same time series with distinct order of labels. For example, `foo{k1="v1",k2="v2"}` and `foo{k2="v2",k1="v1"}` represent a single time series. Labels sorting is disabled by default, since the majority of established exporters preserve the order of labels for the exported metrics. * FEATURE: vminsert and vmagent: add `-sortLabels` command-line flag for sorting metric labels before pushing them to `vmstorage`. This should reduce the size of `MetricName -> internal_series_id` cache (aka `vm_cache_size_bytes{type="storage/tsid"}`) when ingesting samples for the same time series with distinct order of labels. For example, `foo{k1="v1",k2="v2"}` and `foo{k2="v2",k1="v1"}` represent a single time series. Labels sorting is disabled by default, since the majority of established exporters preserve the order of labels for the exported metrics.
* FEATURE: allow specifying label value alongside label name for the `others sum` time series returned from `topk_*` and `bottomk_*` functions from [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `topk_avg(3, max(process_resident_memory_bytes) by (instance), "instance=other_sum")` would return top 3 series from `max(process_resident_memory_bytes) by (instance)` plus a series containing the sum of other series. The `others sum` series will have `{instance="other_sum"}` label. * FEATURE: allow specifying label value alongside label name for the `others sum` time series returned from `topk_*` and `bottomk_*` functions from [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `topk_avg(3, max(process_resident_memory_bytes) by (instance), "instance=other_sum")` would return top 3 series from `max(process_resident_memory_bytes) by (instance)` plus a series containing the sum of other series. The `others sum` series will have `{instance="other_sum"}` label.
* FEATURE: do not delete `dst_label` when applying `label_copy(q, "src_label", "dst_label")` and `label_move(q, "src_label", "dst_label")` to series without `src_label` and with non-empty `dst_label`. See more details at [MetricsQL docs](https://docs.victoriametrics.com/MetricsQL.html). * FEATURE: do not delete `dst_label` when applying `label_copy(q, "src_label", "dst_label")` and `label_move(q, "src_label", "dst_label")` to series without `src_label` and with non-empty `dst_label`. See more details at [MetricsQL docs](https://docs.victoriametrics.com/MetricsQL.html).
@ -381,6 +427,8 @@ Thanks to @johnseekins!
## [v1.57.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.57.1) ## [v1.57.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.57.1)
Released at 30-03-2021
* FEATURE: publish vmutils for `GOOS=arm` on [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). * FEATURE: publish vmutils for `GOOS=arm` on [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
* BUGFIX: prevent from possible incomplete query results after timed out query. * BUGFIX: prevent from possible incomplete query results after timed out query.
@ -390,6 +438,8 @@ Thanks to @johnseekins!
## [v1.57.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.57.0) ## [v1.57.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.57.0)
Released at 29-03-2021
* FEATURE: optimize query performance by up to 10x on systems with many CPU cores. See [this tweet](https://twitter.com/MetricsVictoria/status/1375064484860067840). * FEATURE: optimize query performance by up to 10x on systems with many CPU cores. See [this tweet](https://twitter.com/MetricsVictoria/status/1375064484860067840).
* FEATURE: add the following metrics at `/metrics` page for every VictoraMetrics app: * FEATURE: add the following metrics at `/metrics` page for every VictoraMetrics app:
* `process_resident_memory_anon_bytes` - RSS share for memory allocated by the process itself. This share cannot be freed by the OS, so it must be taken into account by OOM killer. * `process_resident_memory_anon_bytes` - RSS share for memory allocated by the process itself. This share cannot be freed by the OS, so it must be taken into account by OOM killer.
@ -413,6 +463,8 @@ Thanks to @johnseekins!
## [v1.56.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.56.0) ## [v1.56.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.56.0)
Released at 17-03-2021
* FEATURE: add the following functions to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): * FEATURE: add the following functions to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html):
- `histogram_avg(buckets)` - returns the average value for the given buckets. - `histogram_avg(buckets)` - returns the average value for the given buckets.
- `histogram_stdvar(buckets)` - returns standard variance for the given buckets. - `histogram_stdvar(buckets)` - returns standard variance for the given buckets.
@ -442,12 +494,15 @@ Thanks to @johnseekins!
## [v1.55.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.1) ## [v1.55.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.1)
Released at 03-03-2021
* BUGFIX: vmagent: fix a panic in Kubernetes service discovery when a target is filtered out with relabeling. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1107 * BUGFIX: vmagent: fix a panic in Kubernetes service discovery when a target is filtered out with relabeling. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1107
* BUGFIX: vmagent: fix Kubernetes service discovery for `role: ingress`. See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1110 * BUGFIX: vmagent: fix Kubernetes service discovery for `role: ingress`. See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1110
## [v1.55.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.0) ## [v1.55.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.0)
Released at 02-03-2021
* FEATURE: add `sign(q)` and `clamp(q, min, max)` functions, which are planned to be added in [the upcoming Prometheus release](https://twitter.com/roidelapluie/status/1363428376162295811) . The `last_over_time(m[d])` function is already supported in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). * FEATURE: add `sign(q)` and `clamp(q, min, max)` functions, which are planned to be added in [the upcoming Prometheus release](https://twitter.com/roidelapluie/status/1363428376162295811) . The `last_over_time(m[d])` function is already supported in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
* FEATURE: vmagent: add `scrape_align_interval` config option, which can be used for aligning scrapes to the beginning of the configured interval. See [these docs](https://docs.victoriametrics.com/vmagent.html#troubleshooting) for details. * FEATURE: vmagent: add `scrape_align_interval` config option, which can be used for aligning scrapes to the beginning of the configured interval. See [these docs](https://docs.victoriametrics.com/vmagent.html#troubleshooting) for details.
@ -484,11 +539,15 @@ Thanks to @johnseekins!
## [v1.54.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.1) ## [v1.54.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.1)
Released at 18-02-2021
* BUGFIX: properly handle queries containing a filter on metric name plus any number of negative filters and zero non-negative filters. For example, `node_cpu_seconds_total{mode!="idle"}`. The bug was introduced in [v1.54.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.0). * BUGFIX: properly handle queries containing a filter on metric name plus any number of negative filters and zero non-negative filters. For example, `node_cpu_seconds_total{mode!="idle"}`. The bug was introduced in [v1.54.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.0).
## [v1.54.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.0) ## [v1.54.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.0)
Released at 18-02-2021
* FEATURE: optimize searching for matching metrics for `metric{<label_filters>}` queries if `<label_filters>` contains at least a single filter. For example, the query `up{job="foobar"}` should find the matching time series much faster than previously. * FEATURE: optimize searching for matching metrics for `metric{<label_filters>}` queries if `<label_filters>` contains at least a single filter. For example, the query `up{job="foobar"}` should find the matching time series much faster than previously.
* FEATURE: reduce execution times for `q1 <binary_op> q2` queries by executing `q1` and `q2` in parallel. * FEATURE: reduce execution times for `q1 <binary_op> q2` queries by executing `q1` and `q2` in parallel.
* FEATURE: switch from Go1.15 to [Go1.16](https://golang.org/doc/go1.16) for building prod binaries. * FEATURE: switch from Go1.15 to [Go1.16](https://golang.org/doc/go1.16) for building prod binaries.
@ -499,7 +558,7 @@ Thanks to @johnseekins!
* FEATURE: remove dependency on external programs such as `cat`, `grep` and `cut` when detecting cpu and memory limits inside Docker or LXC container. * FEATURE: remove dependency on external programs such as `cat`, `grep` and `cut` when detecting cpu and memory limits inside Docker or LXC container.
* FEATURE: vmagent: add `__meta_kubernetes_endpoints_label_*`, `__meta_kubernetes_endpoints_labelpresent_*`, `__meta_kubernetes_endpoints_annotation_*` and `__meta_kubernetes_endpoints_annotationpresent_*` labels for `role: endpoints` in Kubernetes service discovery. These labels where added in Prometheus 2.25. * FEATURE: vmagent: add `__meta_kubernetes_endpoints_label_*`, `__meta_kubernetes_endpoints_labelpresent_*`, `__meta_kubernetes_endpoints_annotation_*` and `__meta_kubernetes_endpoints_annotationpresent_*` labels for `role: endpoints` in Kubernetes service discovery. These labels where added in Prometheus 2.25.
* FEATURE: reduce the minimum supported retention period for inverted index (aka `indexdb`) from one month to one day. This should reduce disk space usage for `<-storageDataPath>/indexdb` folder if `-retentionPeriod` is set to values smaller than one month. * FEATURE: reduce the minimum supported retention period for inverted index (aka `indexdb`) from one month to one day. This should reduce disk space usage for `<-storageDataPath>/indexdb` folder if `-retentionPeriod` is set to values smaller than one month.
* FEATURE: vmselect: export per-tenant metrics `vm_vmselect_http_requests_total` and `vm_vmselect_http_requests_duration_ms_total` . Other per-tenant metrics are available as a part of [enterprise package](https://victoriametrics.com/enterprise.html). See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/932 for details. * FEATURE: vmselect: export per-tenant metrics `vm_vmselect_http_requests_total` and `vm_vmselect_http_requests_duration_ms_total` . Other per-tenant metrics are available as a part of [enterprise package](https://victoriametrics.com/products/enterprise/). See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/932 for details.
* BUGFIX: properly convert regexp tag filters containing escaped dots to non-regexp tag filters. For example, `{foo=~"bar\.baz"}` should be converted to `{foo="bar.baz"}`. Previously it was incorrectly converted to `{foo="bar\.baz"}`, which could result in missing time series for this tag filter. * BUGFIX: properly convert regexp tag filters containing escaped dots to non-regexp tag filters. For example, `{foo=~"bar\.baz"}` should be converted to `{foo="bar.baz"}`. Previously it was incorrectly converted to `{foo="bar\.baz"}`, which could result in missing time series for this tag filter.
* BUGFIX: do not spam error logs when discovering Docker Swarm targets without dedicated IP. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1028 . * BUGFIX: do not spam error logs when discovering Docker Swarm targets without dedicated IP. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1028 .
@ -510,11 +569,15 @@ Thanks to @johnseekins!
## [v1.53.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.1) ## [v1.53.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.1)
Released at 03-02-2021
* BUGFIX: vmselect: fix the bug peventing from proper searching by Graphite filter with wildcards such as `{__graphite__="foo.*.bar"}`. * BUGFIX: vmselect: fix the bug peventing from proper searching by Graphite filter with wildcards such as `{__graphite__="foo.*.bar"}`.
## [v1.53.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.0) ## [v1.53.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.0)
Released at 03-02-2021
* FEATURE: added [vmctl tool](https://docs.victoriametrics.com/vmctl.html) to VictoriaMetrics release process. Now it is packaged in `vmutils-*.tar.gz` archive on [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Source code for `vmctl` tool has been moved from [github.com/VictoriaMetrics/vmctl](https://github.com/VictoriaMetrics/vmctl) to [github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmctl). * FEATURE: added [vmctl tool](https://docs.victoriametrics.com/vmctl.html) to VictoriaMetrics release process. Now it is packaged in `vmutils-*.tar.gz` archive on [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Source code for `vmctl` tool has been moved from [github.com/VictoriaMetrics/vmctl](https://github.com/VictoriaMetrics/vmctl) to [github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmctl).
* FEATURE: added `-loggerTimezone` command-line flag for adjusting time zone for timestamps in log messages. By default UTC is used. * FEATURE: added `-loggerTimezone` command-line flag for adjusting time zone for timestamps in log messages. By default UTC is used.
* FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned by `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default. * FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned by `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default.
@ -540,6 +603,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.52.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.52.0) ## [v1.52.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.52.0)
Released at 13-01-2021
* FEATURE: provide a sample list of alerting rules for VictoriaMetrics components. It is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml). * FEATURE: provide a sample list of alerting rules for VictoriaMetrics components. It is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml).
* FEATURE: disable final merge for data for the previous month at the beginning of new month, since it may result in high disk IO and CPU usage. Final merge can be enabled by setting `-finalMergeDelay` command-line flag to positive duration. * FEATURE: disable final merge for data for the previous month at the beginning of new month, since it may result in high disk IO and CPU usage. Final merge can be enabled by setting `-finalMergeDelay` command-line flag to positive duration.
* FEATURE: add `tfirst_over_time(m[d])` and `tlast_over_time(m[d])` functions to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) for returning timestamps for the first and the last data point in `m` over `d` duration. * FEATURE: add `tfirst_over_time(m[d])` and `tlast_over_time(m[d])` functions to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) for returning timestamps for the first and the last data point in `m` over `d` duration.
@ -560,6 +625,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.51.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.51.0) ## [v1.51.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.51.0)
Released at 27-12-2020
* FEATURE: add `/api/v1/status/top_queries` handler, which returns the most frequently executed queries and queries that took the most time for execution. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/907 * FEATURE: add `/api/v1/status/top_queries` handler, which returns the most frequently executed queries and queries that took the most time for execution. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/907
* FEATURE: vmagent: add support for `proxy_url` config option in Prometheus scrape configs. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/503 * FEATURE: vmagent: add support for `proxy_url` config option in Prometheus scrape configs. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/503
* FEATURE: remove parts with stale data as soon as they go outside the configured `-retentionPeriod`. Previously such parts may remain active for long periods of time. This should help reducing disk usage for `-retentionPeriod` smaller than one month. * FEATURE: remove parts with stale data as soon as they go outside the configured `-retentionPeriod`. Previously such parts may remain active for long periods of time. This should help reducing disk usage for `-retentionPeriod` smaller than one month.
@ -572,6 +639,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.50.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.2) ## [v1.50.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.2)
Released at 19-12-2020
* FEATURE: do not publish duplicate Docker images with `-cluster` tag suffix for [vmagent](https://docs.victoriametrics.com/vmagent.html), [vmalert](https://docs.victoriametrics.com/vmalert.html), [vmauth](https://docs.victoriametrics.com/vmauth.html), [vmbackup](https://docs.victoriametrics.com/vmbackup.html) and [vmrestore](https://docs.victoriametrics.com/vmrestore.html), since they are identical to images without `-cluster` tag suffix. * FEATURE: do not publish duplicate Docker images with `-cluster` tag suffix for [vmagent](https://docs.victoriametrics.com/vmagent.html), [vmalert](https://docs.victoriametrics.com/vmalert.html), [vmauth](https://docs.victoriametrics.com/vmauth.html), [vmbackup](https://docs.victoriametrics.com/vmbackup.html) and [vmrestore](https://docs.victoriametrics.com/vmrestore.html), since they are identical to images without `-cluster` tag suffix.
* BUGFIX: vmalert: properly populate template variables. This has been broken in v1.50.0. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/974 * BUGFIX: vmalert: properly populate template variables. This has been broken in v1.50.0. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/974
@ -581,6 +650,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.50.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.1) ## [v1.50.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.1)
Released at 15-12-2020
* FEATURE: vmagent: export `vmagent_remotewrite_blocks_sent_total` and `vmagent_remotewrite_blocks_sent_total` metrics for each `-remoteWrite.url`. * FEATURE: vmagent: export `vmagent_remotewrite_blocks_sent_total` and `vmagent_remotewrite_blocks_sent_total` metrics for each `-remoteWrite.url`.
* BUGFIX: vmagent: properly delete unregistered scrape targets from `/targets` and `/api/v1/targets` pages. They weren't deleted due to the bug in `v1.50.0`. * BUGFIX: vmagent: properly delete unregistered scrape targets from `/targets` and `/api/v1/targets` pages. They weren't deleted due to the bug in `v1.50.0`.
@ -588,6 +659,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.50.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.0) ## [v1.50.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.0)
Released at 15-12-2020
* FEATURE: automatically reset response cache when samples with timestamps older than `now - search.cacheTimestampOffset` are ingested to VictoriaMetrics. This makes unnecessary disabling response cache during data backfilling or resetting it after backfilling is complete as described [in these docs](https://docs.victoriametrics.com/#backfilling). This feature applies only to single-node VictoriaMetrics. It doesn't apply to cluster version of VictoriaMetrics because `vminsert` nodes don't know about `vmselect` nodes where the response cache must be reset. * FEATURE: automatically reset response cache when samples with timestamps older than `now - search.cacheTimestampOffset` are ingested to VictoriaMetrics. This makes unnecessary disabling response cache during data backfilling or resetting it after backfilling is complete as described [in these docs](https://docs.victoriametrics.com/#backfilling). This feature applies only to single-node VictoriaMetrics. It doesn't apply to cluster version of VictoriaMetrics because `vminsert` nodes don't know about `vmselect` nodes where the response cache must be reset.
* FEATURE: vmalert: add `query`, `first` and `value` functions to alert templates. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/539 * FEATURE: vmalert: add `query`, `first` and `value` functions to alert templates. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/539
* FEATURE: vmagent: return user-friendly HTML page when requesting `/targets` page from web browser. The page is returned in the old plaintext format when requesting via curl or similar tool. * FEATURE: vmagent: return user-friendly HTML page when requesting `/targets` page from web browser. The page is returned in the old plaintext format when requesting via curl or similar tool.
@ -613,6 +686,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.49.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.49.0) ## [v1.49.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.49.0)
Released at 05-12-2020
* FEATURE: optimize Consul service discovery speed when discovering big number of services. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/574 * FEATURE: optimize Consul service discovery speed when discovering big number of services. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/574
* FEATURE: add `label_uppercase(q, label1, ... labelN)` and `label_lowercase(q, label1, ... labelN)` function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) * FEATURE: add `label_uppercase(q, label1, ... labelN)` and `label_lowercase(q, label1, ... labelN)` function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html)
for uppercasing and lowercasing values for the given labels. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/936 for uppercasing and lowercasing values for the given labels. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/936
@ -633,6 +708,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.48.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.48.0) ## [v1.48.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.48.0)
Released at 26-11-2020
* FEATURE: added [Snap package for single-node VictoriaMetrics](https://snapcraft.io/victoriametrics). This simplifies installation under Ubuntu to a single command: * FEATURE: added [Snap package for single-node VictoriaMetrics](https://snapcraft.io/victoriametrics). This simplifies installation under Ubuntu to a single command:
```bash ```bash
snap install victoriametrics snap install victoriametrics
@ -659,6 +736,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.47.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.47.0) ## [v1.47.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.47.0)
Released at 16-11-2020
* FEATURE: vmselect: return the original error from `vmstorage` node in query response if `-search.denyPartialResponse` is set. * FEATURE: vmselect: return the original error from `vmstorage` node in query response if `-search.denyPartialResponse` is set.
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/891 See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/891
* FEATURE: vmselect: add `"isPartial":{true|false}` field in JSON output for `/api/v1/*` functions * FEATURE: vmselect: add `"isPartial":{true|false}` field in JSON output for `/api/v1/*` functions
@ -689,6 +768,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0) ## [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0)
Released at 07-11-2020
* FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label/<name>/values` when `start` and `end` args are set. * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label/<name>/values` when `start` and `end` args are set.
* FEATURE: reduce memory usage when query touches big number of time series. * FEATURE: reduce memory usage when query touches big number of time series.
* FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thousands) and the majority of these targets (99%) * FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thousands) and the majority of these targets (99%)
@ -707,6 +788,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.45.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.45.0) ## [v1.45.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.45.0)
Released at 02-11-2020
* FEATURE: allow setting `-retentionPeriod` smaller than one month. I.e. `-retentionPeriod=3d`, `-retentionPeriod=2w`, etc. is supported now. * FEATURE: allow setting `-retentionPeriod` smaller than one month. I.e. `-retentionPeriod=3d`, `-retentionPeriod=2w`, etc. is supported now.
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/173 See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/173
* FEATURE: optimize more cases according to https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization . Now the following cases are optimized too: * FEATURE: optimize more cases according to https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization . Now the following cases are optimized too:
@ -741,6 +824,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.44.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.44.0) ## [v1.44.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.44.0)
Released at 13-10-2020
* FEATURE: automatically add missing label filters to binary operands as described at https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization . * FEATURE: automatically add missing label filters to binary operands as described at https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization .
This should improve performance for queries with missing label filters in binary operands. For example, the following query should work faster now, because it shouldn't This should improve performance for queries with missing label filters in binary operands. For example, the following query should work faster now, because it shouldn't
fetch and discard time series for `node_filesystem_files_free` metric without matching labels for the left side of the expression: fetch and discard time series for `node_filesystem_files_free` metric without matching labels for the left side of the expression:
@ -801,6 +886,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.43.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.43.0) ## [v1.43.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.43.0)
Released at 06-10-2020
* FEATURE: reduce CPU usage for repeated queries over sliding time window when no new time series are added to the database. * FEATURE: reduce CPU usage for repeated queries over sliding time window when no new time series are added to the database.
Typical use cases: repeated evaluation of alerting rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) or dashboard auto-refresh in Grafana. Typical use cases: repeated evaluation of alerting rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) or dashboard auto-refresh in Grafana.
* FEATURE: vmagent: add OpenStack service discovery aka [openstack_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config). * FEATURE: vmagent: add OpenStack service discovery aka [openstack_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config).
@ -820,6 +907,8 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
## [v1.42.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0) ## [v1.42.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0)
Released at 30-09-2020
* FEATURE: use all the available CPU cores when accepting data via a single TCP connection * FEATURE: use all the available CPU cores when accepting data via a single TCP connection
for [all the supported protocols](https://docs.victoriametrics.com/#how-to-import-time-series-data). for [all the supported protocols](https://docs.victoriametrics.com/#how-to-import-time-series-data).
Previously data ingested via a single TCP connection could use only a single CPU core. This could limit data ingestion performance. Previously data ingested via a single TCP connection could use only a single CPU core. This could limit data ingestion performance.

View file

@ -101,7 +101,7 @@ vmstorage-prod
### Development Builds ### Development Builds
1. [Install go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make` from [the repository root](https://github.com/VictoriaMetrics/VictoriaMetrics). It should build `vmstorage`, `vmselect` 2. Run `make` from [the repository root](https://github.com/VictoriaMetrics/VictoriaMetrics). It should build `vmstorage`, `vmselect`
and `vminsert` binaries and put them into the `bin` folder. and `vminsert` binaries and put them into the `bin` folder.
@ -225,7 +225,7 @@ It is recommended setting up alerts in [vmalert](https://docs.victoriametrics.co
* URLs for [Graphite Metrics API](https://graphite-api.readthedocs.io/en/latest/api.html#the-metrics-api): `http://<vmselect>:8481/select/<accountID>/graphite/<suffix>`, where: * URLs for [Graphite Metrics API](https://graphite-api.readthedocs.io/en/latest/api.html#the-metrics-api): `http://<vmselect>:8481/select/<accountID>/graphite/<suffix>`, where:
- `<accountID>` is an arbitrary number identifying data namespace for query (aka tenant) - `<accountID>` is an arbitrary number identifying data namespace for query (aka tenant)
- `<suffix>` may have the following values: - `<suffix>` may have the following values:
- `render` - implements Graphite Render API. See [these docs](https://graphite.readthedocs.io/en/stable/render_api.html). This functionality is available in [Enterprise package](https://victoriametrics.com/enterprise.html). - `render` - implements Graphite Render API. See [these docs](https://graphite.readthedocs.io/en/stable/render_api.html). This functionality is available in [Enterprise package](https://victoriametrics.com/products/enterprise/).
- `metrics/find` - searches Graphite metrics. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find). - `metrics/find` - searches Graphite metrics. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find).
- `metrics/expand` - expands Graphite metrics. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-expand). - `metrics/expand` - expands Graphite metrics. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-expand).
- `metrics/index.json` - returns all the metric names. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-index-json). - `metrics/index.json` - returns all the metric names. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-index-json).
@ -410,7 +410,7 @@ Restoring from backup:
## Downsampling ## Downsampling
Downsampling is available in [enterprise version of VictoriaMetrics](https://victoriametrics.com/enterprise.html). It is configured with `-downsampling.period` command-line flag. The same flag value must be passed to both `vmstorage` and `vmselect` nodes. See [these docs](https://docs.victoriametrics.com/#downsampling) for details. Downsampling is available in [enterprise version of VictoriaMetrics](https://victoriametrics.com/products/enterprise/). It is configured with `-downsampling.period` command-line flag. The same flag value must be passed to both `vmstorage` and `vmselect` nodes. See [these docs](https://docs.victoriametrics.com/#downsampling) for details.
## Profiling ## Profiling

View file

@ -66,6 +66,20 @@ and send data to multiple remote storage systems, vmagent has the following addi
- [Prometheus remote_write proxy](https://docs.victoriametrics.com/vmagent.html#prometheus-remote_write-proxy) - [Prometheus remote_write proxy](https://docs.victoriametrics.com/vmagent.html#prometheus-remote_write-proxy)
## What is the difference between vmagent and Prometheus agent?
Both [vmagent](https://docs.victoriametrics.com/vmagent.html) and [Prometheus agent](https://prometheus.io/blog/2021/11/16/agent/) serve the same purpose - to efficently scrape Prometheus-compatible targets at the edge. The have the following differences:
- vmagent usually requires lower amounts of CPU, RAM and disk IO comparing to Prometheus agent.
- Prometheus agent supports only pull-based data collection (e.g. it can scrape Prometheus-compatible targets), while vmagent supports both pull and push data collection - it can accept data via many popular data ingestion protocols such as InfluxDB line protocol, Graphite protocol, OpenTSDB protocol, DataDog protocol, Prometheus protocol, CSV and JSON - see [these docs](https://docs.victoriametrics.com/vmagent.html#features).
- vmagent can easily scale horizontally to multiple instances for scraping big number of targets - see [these docs](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
- vmagent supports [improved relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling).
- vmagent can limit the number of scraped metrics per target. See [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
- vmagent supports loading scrape configs from multiple files - see [these docs](https://docs.victoriametrics.com/vmagent.html#loading-scrape-configs-from-multiple-files).
- vmagent supports data reading and data writing from/to Kafka - see [these docs](https://docs.victoriametrics.com/vmagent.html#kafka-integration).
- vmagent can read and update scrape configs from http and https urls, while Prometheus agent can read scrape configs only from local filesystem.
## Is it safe to enable [remote write](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) in Prometheus? ## Is it safe to enable [remote write](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) in Prometheus?
Yes. Prometheus continues writing data to local storage after enabling remote write, so all the existing local storage data Yes. Prometheus continues writing data to local storage after enabling remote write, so all the existing local storage data
@ -191,9 +205,12 @@ The following versions are open source and free:
We provide commercial support for both versions. [Contact us](mailto:info@victoriametrics.com) for the pricing. We provide commercial support for both versions. [Contact us](mailto:info@victoriametrics.com) for the pricing.
The following commercial versions of VictoriaMetrics are available:
* [Managed VictoriaMetrics at AWS](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc) (aka managed Prometheus).
The following commercial versions of VictoriaMetrics are planned: The following commercial versions of VictoriaMetrics are planned:
* Managed cluster in the Cloud. * Managed VictoriaMetrics at Google Cloud.
* SaaS version. * Cloud monitoring solution based on VictoriaMetrics.
[Contact us](mailto:info@victoriametrics.com) for more information on our plans. [Contact us](mailto:info@victoriametrics.com) for more information on our plans.

View file

@ -4,7 +4,7 @@ sort: 18
# VictoriaMetrics Cluster Per Tenant Statistic # VictoriaMetrics Cluster Per Tenant Statistic
***The per-tenant statistic is a part of [enterprise package](https://victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)*** ***The per-tenant statistic is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
<img alt="cluster-per-tenant-stat" src="PerTenantStatistic-stats.jpg"> <img alt="cluster-per-tenant-stat" src="PerTenantStatistic-stats.jpg">

View file

@ -4,32 +4,17 @@ sort: 12
# Quick Start # Quick Start
1. If you run Ubuntu please run the `snap install victoriametrics` command to install and start VictoriaMetrics. Then read [these docs](https://snapcraft.io/victoriametrics). The following commands download the latest available [Docker image of VictoriaMetrics](https://hub.docker.com/r/victoriametrics/victoria-metrics) and start it at port 8428, while storing the ingested data at `victoria-metrics-data` subdirectory under the current directory:
Otherwise you can download the latest VictoriaMetrics release from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
or [Docker hub](https://hub.docker.com/r/victoriametrics/victoria-metrics/)
or [build it from sources](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-build-from-sources).
2. This step isn't needed if you run VictoriaMetrics via `snap install victoriametrics` as described above. ```bash
Otherwise, please run the binary or Docker image with your desired command-line flags. You can look at `-help` to see descriptions of all available flags docker pull victoriametrics/victoria-metrics:latest
and their default values. The default flag values should fit the majority of cases. The minimum required flags that must be configured are: docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 victoriametrics/victoria-metrics:latest
```
* `-storageDataPath` - the path to directory where VictoriaMetrics stores your data. Open `http://localhost:8428` in web browser and read [these docs](https://docs.victoriametrics.com/#operation).
* `-retentionPeriod` - data retention.
For example: VictoriaMetrics is also available in binaries (see [this page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)) and in source code (see [how to build VictoriaMetrics from sources](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-build-from-sources)).
`./victoria-metrics-prod -storageDataPath=/var/lib/victoria-metrics-data -retentionPeriod=3` There are also the following versions of VictoriaMetrics available:
* [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) - horizontally scalable VictoriaMetrics, which scales to multiple nodes.
Check [these instructions](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/43) to configure VictoriaMetrics as an OS service. * [Managed VictoriaMetrics at AWS](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc).
We recommended setting up [VictoriaMetrics monitoring](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#monitoring).
3. Configure either [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus to write data to VictoriaMetrics.
We recommended using `vmagent` instead of Prometheus because it is more resource efficient. If you still prefer Prometheus
see [these instructions](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-setup)
for details on how it may be properly configured.
4. To configure Grafana to query VictoriaMetrics instead of Prometheus
please see [these instructions](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#grafana-setup).
There is also [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) and [SaaS playground](https://play.victoriametrics.com/signIn).

View file

@ -19,7 +19,7 @@ Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-set
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html). Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). [Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/products/enterprise/). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
## Prominent features ## Prominent features
@ -56,7 +56,7 @@ VictoriaMetrics has the following prominent features:
* [Native binary format](#how-to-import-data-in-native-format). * [Native binary format](#how-to-import-data-in-native-format).
* It supports metrics' relabeling. See [these docs](#relabeling) for details. * It supports metrics' relabeling. See [these docs](#relabeling) for details.
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter). * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html). * It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/products/enterprise/).
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). * It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html). See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
@ -570,7 +570,7 @@ VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series w
### Graphite Render API usage ### Graphite Render API usage
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset [VictoriaMetrics Enterprise](https://victoriametrics.com/products/enterprise/) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset
at `/render` endpoint, which is used by [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/). at `/render` endpoint, which is used by [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/).
When configuring Graphite datasource in Grafana, the `Storage-Step` http request header must be set to a step between Graphite data points stored in VictoriaMetrics. For example, `Storage-Step: 10s` would mean 10 seconds distance between Graphite datapoints stored in VictoriaMetrics. When configuring Graphite datasource in Grafana, the `Storage-Step` http request header must be set to a step between Graphite data points stored in VictoriaMetrics. For example, `Storage-Step: 10s` would mean 10 seconds distance between Graphite datapoints stored in VictoriaMetrics.
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
@ -632,7 +632,7 @@ to your needs or when testing bugfixes.
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics` binary and puts it into the `bin` folder. It builds `victoria-metrics` binary and puts it into the `bin` folder.
@ -648,7 +648,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
### Development ARM build ### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder. It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder.
@ -662,7 +662,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies. `Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder. It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
@ -1171,13 +1171,13 @@ See [these docs](https://docs.victoriametrics.com/guides/guide-vmcluster-multipl
## Downsampling ## Downsampling
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports multi-level downsampling with `-downsampling.period` command-line flag. For example: [VictoriaMetrics Enterprise](https://victoriametrics.com/products/enterprise/) supports multi-level downsampling with `-downsampling.period` command-line flag. For example:
* `-downsampling.period=30d:5m` instructs VictoriaMetrics to [deduplicate](#deduplication) samples older than 30 days with 5 minutes interval. * `-downsampling.period=30d:5m` instructs VictoriaMetrics to [deduplicate](#deduplication) samples older than 30 days with 5 minutes interval.
* `-downsampling.period=30d:5m,180d:1h` instructs VictoriaMetrics to deduplicate samples older than 30 days with 5 minutes interval and to deduplicate samples older than 180 days with 1 hour interval. * `-downsampling.period=30d:5m,180d:1h` instructs VictoriaMetrics to deduplicate samples older than 30 days with 5 minutes interval and to deduplicate samples older than 180 days with 1 hour interval.
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).

View file

@ -23,7 +23,7 @@ Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-set
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html). Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). [Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/products/enterprise/). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
## Prominent features ## Prominent features
@ -60,7 +60,7 @@ VictoriaMetrics has the following prominent features:
* [Native binary format](#how-to-import-data-in-native-format). * [Native binary format](#how-to-import-data-in-native-format).
* It supports metrics' relabeling. See [these docs](#relabeling) for details. * It supports metrics' relabeling. See [these docs](#relabeling) for details.
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter). * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html). * It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/products/enterprise/).
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). * It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html). See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
@ -574,7 +574,7 @@ VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series w
### Graphite Render API usage ### Graphite Render API usage
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset [VictoriaMetrics Enterprise](https://victoriametrics.com/products/enterprise/) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset
at `/render` endpoint, which is used by [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/). at `/render` endpoint, which is used by [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/).
When configuring Graphite datasource in Grafana, the `Storage-Step` http request header must be set to a step between Graphite data points stored in VictoriaMetrics. For example, `Storage-Step: 10s` would mean 10 seconds distance between Graphite datapoints stored in VictoriaMetrics. When configuring Graphite datasource in Grafana, the `Storage-Step` http request header must be set to a step between Graphite data points stored in VictoriaMetrics. For example, `Storage-Step: 10s` would mean 10 seconds distance between Graphite datapoints stored in VictoriaMetrics.
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
@ -636,7 +636,7 @@ to your needs or when testing bugfixes.
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics` binary and puts it into the `bin` folder. It builds `victoria-metrics` binary and puts it into the `bin` folder.
@ -652,7 +652,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
### Development ARM build ### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder. It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder.
@ -666,7 +666,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies. `Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder. It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
@ -1175,13 +1175,13 @@ See [these docs](https://docs.victoriametrics.com/guides/guide-vmcluster-multipl
## Downsampling ## Downsampling
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports multi-level downsampling with `-downsampling.period` command-line flag. For example: [VictoriaMetrics Enterprise](https://victoriametrics.com/products/enterprise/) supports multi-level downsampling with `-downsampling.period` command-line flag. For example:
* `-downsampling.period=30d:5m` instructs VictoriaMetrics to [deduplicate](#deduplication) samples older than 30 days with 5 minutes interval. * `-downsampling.period=30d:5m` instructs VictoriaMetrics to [deduplicate](#deduplication) samples older than 30 days with 5 minutes interval.
* `-downsampling.period=30d:5m,180d:1h` instructs VictoriaMetrics to deduplicate samples older than 30 days with 5 minutes interval and to deduplicate samples older than 180 days with 1 hour interval. * `-downsampling.period=30d:5m,180d:1h` instructs VictoriaMetrics to deduplicate samples older than 30 days with 5 minutes interval and to deduplicate samples older than 180 days with 1 hour interval.
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).

View file

@ -282,11 +282,11 @@ cat <<EOF | helm install my-grafana grafana/grafana -f -
default: default:
victoriametrics: victoriametrics:
gnetId: 11176 gnetId: 11176
revision: 16 revision: 17
datasource: victoriametrics datasource: victoriametrics
vmagent: vmagent:
gnetId: 12683 gnetId: 12683
revision: 6 revision: 7
datasource: victoriametrics datasource: victoriametrics
kubernetes: kubernetes:
gnetId: 14205 gnetId: 14205

View file

@ -484,11 +484,11 @@ cat <<EOF | helm install my-grafana grafana/grafana -f -
default: default:
victoriametrics: victoriametrics:
gnetId: 11176 gnetId: 11176
revision: 16 revision: 17
datasource: victoriametrics datasource: victoriametrics
vmagent: vmagent:
gnetId: 12683 gnetId: 12683
revision: 6 revision: 7
datasource: victoriametrics datasource: victoriametrics
kubernetes: kubernetes:
gnetId: 14205 gnetId: 14205

View file

@ -282,7 +282,7 @@ cat <<EOF | helm install my-grafana grafana/grafana -f -
default: default:
victoriametrics: victoriametrics:
gnetId: 10229 gnetId: 10229
revision: 21 revision: 22
datasource: victoriametrics datasource: victoriametrics
kubernetes: kubernetes:
gnetId: 14205 gnetId: 14205

View file

@ -305,7 +305,6 @@ You can read more about relabeling in the following articles:
* If the metric disappears from the list of scraped metrics, then stale marker is sent to this particular metric. * If the metric disappears from the list of scraped metrics, then stale marker is sent to this particular metric.
* If the scrape target becomes temporarily unavailable, then stale markers are sent for all the metrics scraped from this target. * If the scrape target becomes temporarily unavailable, then stale markers are sent for all the metrics scraped from this target.
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target. * If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
* Stale markers are sent for all the scraped metrics on graceful shutdown of `vmagent`.
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series) for details. Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series) for details.
@ -525,7 +524,7 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
## Kafka integration ## Kafka integration
[Enterprise version](https://victoriametrics.com/enterprise.html) of `vmagent` can read and write metrics from / to Kafka: [Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` can read and write metrics from / to Kafka:
* [Reading metrics from Kafka](#reading-metrics-from-kafka) * [Reading metrics from Kafka](#reading-metrics-from-kafka)
* [Writing metrics to Kafka](#writing-metrics-to-kafka) * [Writing metrics to Kafka](#writing-metrics-to-kafka)
@ -535,7 +534,7 @@ The enterprise version of vmagent is available for evaluation at [releases](http
### Reading metrics from Kafka ### Reading metrics from Kafka
[Enterprise version](https://victoriametrics.com/enterprise.html) of `vmagent` can read metrics in various formats from Kafka messages. These formats can be configured with `-kafka.consumer.topic.defaultFormat` or `-kafka.consumer.topic.format` command-line options. The following formats are supported: [Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` can read metrics in various formats from Kafka messages. These formats can be configured with `-kafka.consumer.topic.defaultFormat` or `-kafka.consumer.topic.format` command-line options. The following formats are supported:
* `promremotewrite` - [Prometheus remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). Messages in this format can be sent by vmagent - see [these docs](#writing-metrics-to-kafka). * `promremotewrite` - [Prometheus remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). Messages in this format can be sent by vmagent - see [these docs](#writing-metrics-to-kafka).
* `influx` - [InfluxDB line protocol format](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/). * `influx` - [InfluxDB line protocol format](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/).
@ -571,7 +570,7 @@ data_format = "influx"
#### Command-line flags for Kafka consumer #### Command-line flags for Kafka consumer
These command-line flags are available only in [enterprise](https://victoriametrics.com/enterprise.html) version of `vmagent`, which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page (see `vmutils-*-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix. These command-line flags are available only in [enterprise](https://victoriametrics.com/products/enterprise/) version of `vmagent`, which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page (see `vmutils-*-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
``` ```
-kafka.consumer.topic array -kafka.consumer.topic array
@ -604,7 +603,7 @@ These command-line flags are available only in [enterprise](https://victoriametr
### Writing metrics to Kafka ### Writing metrics to Kafka
[Enterprise version](https://victoriametrics.com/enterprise.html) of `vmagent` writes data to Kafka with `at-least-once` semantics if `-remoteWrite.url` contains e.g. Kafka url. For example, if `vmagent` is started with `-remoteWrite.url=kafka://localhost:9092/?topic=prom-rw`, then it would send Prometheus remote_write messages to Kafka bootstrap server at `localhost:9092` with the topic `prom-rw`. These messages can be read later from Kafka by another `vmagent` - see [these docs](#reading-metrics-from-kafka) for details. [Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` writes data to Kafka with `at-least-once` semantics if `-remoteWrite.url` contains e.g. Kafka url. For example, if `vmagent` is started with `-remoteWrite.url=kafka://localhost:9092/?topic=prom-rw`, then it would send Prometheus remote_write messages to Kafka bootstrap server at `localhost:9092` with the topic `prom-rw`. These messages can be read later from Kafka by another `vmagent` - see [these docs](#reading-metrics-from-kafka) for details.
Additional Kafka options can be passed as query params to `-remoteWrite.url`. For instance, `kafka://localhost:9092/?topic=prom-rw&client.id=my-favorite-id` sets `client.id` Kafka option to `my-favorite-id`. The full list of Kafka options is available [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). Additional Kafka options can be passed as query params to `-remoteWrite.url`. For instance, `kafka://localhost:9092/?topic=prom-rw&client.id=my-favorite-id` sets `client.id` Kafka option to `my-favorite-id`. The full list of Kafka options is available [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
@ -633,7 +632,7 @@ We recommend using [binary releases](https://github.com/VictoriaMetrics/Victoria
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds the `vmagent` binary and puts it into the `bin` folder. It builds the `vmagent` binary and puts it into the `bin` folder.
@ -662,7 +661,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
### Development ARM build ### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics) 2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder. It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder.

View file

@ -233,7 +233,7 @@ There are the following approaches exist for alerting and recording rules across
rules to `AccountID=123`. rules to `AccountID=123`.
* To specify `tenant` parameter per each alerting and recording group if * To specify `tenant` parameter per each alerting and recording group if
[enterprise version of vmalert](https://victoriametrics.com/enterprise.html) is used [enterprise version of vmalert](https://victoriametrics.com/products/enterprise/) is used
with `-clusterMode` command-line flag. For example: with `-clusterMode` command-line flag. For example:
```yaml ```yaml

View file

@ -163,7 +163,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmauth` binary and puts it into the `bin` folder. It builds `vmauth` binary and puts it into the `bin` folder.

View file

@ -271,7 +271,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmbackup` binary and puts it into the `bin` folder. It builds `vmbackup` binary and puts it into the `bin` folder.

View file

@ -4,7 +4,7 @@ sort: 10
## vmbackupmanager ## vmbackupmanager
***vmbackupmanager is a part of [enterprise package](https://victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)*** ***vmbackupmanager is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
The VictoriaMetrics backup manager automates regular backup procedures. It supports the following backup intervals: **hourly**, **daily**, **weekly** and **monthly**. Multiple backup intervals may be configured simultaneously. I.e. the backup manager creates hourly backups every hour, while it creates daily backups every day, etc. Backup manager must have read access to the storage data, so best practice is to install it on the same machine (or as a sidecar) where the storage node is installed. The VictoriaMetrics backup manager automates regular backup procedures. It supports the following backup intervals: **hourly**, **daily**, **weekly** and **monthly**. Multiple backup intervals may be configured simultaneously. I.e. the backup manager creates hourly backups every hour, while it creates daily backups every day, etc. Backup manager must have read access to the storage data, so best practice is to install it on the same machine (or as a sidecar) where the storage node is installed.
The backup service makes a backup every hour and puts it to the latest folder and then copies data to the folders which represent the backup intervals (hourly, daily, weekly and monthly) The backup service makes a backup every hour and puts it to the latest folder and then copies data to the folders which represent the backup intervals (hourly, daily, weekly and monthly)

View file

@ -564,6 +564,15 @@ results such as `average`, `rate`, etc.
If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`. If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`.
If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries. If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries.
### Rate limiting
Limiting the rate of data transfer could help to reduce pressure on disk or on destination database.
The rate limit may be set in bytes-per-second via `--vm-rate-limit` flag.
Please note, you can also use [vmagent](https://docs.victoriametrics.com/vmagent.html)
as a proxy between `vmctl` and destination with `-remoteWrite.rateLimit` flag enabled.
## How to build ## How to build
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmctl` is located in `vmutils-*` archives there. It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmctl` is located in `vmutils-*` archives there.
@ -571,7 +580,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmctl` binary and puts it into the `bin` folder. It builds `vmctl` binary and puts it into the `bin` folder.
@ -600,7 +609,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
#### Development ARM build #### Development ARM build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder. It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder.

View file

@ -4,7 +4,7 @@ sort: 9
# vmgateway # vmgateway
***vmgateway is a part of [enterprise package](https://victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)*** ***vmgateway is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
<img alt="vmgateway" src="vmgateway-overview.jpeg"> <img alt="vmgateway" src="vmgateway-overview.jpeg">
@ -18,7 +18,7 @@ sort: 9
* Provides access by tenantID in the Cluster version * Provides access by tenantID in the Cluster version
* Allows for separate write/read/admin access to data * Allows for separate write/read/admin access to data
`vmgateway` is included in our [enterprise packages](https://victoriametrics.com/enterprise.html). `vmgateway` is included in our [enterprise packages](https://victoriametrics.com/products/enterprise/).
## Access Control ## Access Control

View file

@ -167,7 +167,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
### Development build ### Development build
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16. 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
2. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). 2. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
It builds `vmrestore` binary and puts it into the `bin` folder. It builds `vmrestore` binary and puts it into the `bin` folder.

43
go.mod
View file

@ -1,5 +1,7 @@
module github.com/VictoriaMetrics/VictoriaMetrics module github.com/VictoriaMetrics/VictoriaMetrics
go 1.17
require ( require (
cloud.google.com/go/storage v1.18.2 cloud.google.com/go/storage v1.18.2
github.com/VictoriaMetrics/fastcache v1.8.0 github.com/VictoriaMetrics/fastcache v1.8.0
@ -9,9 +11,7 @@ require (
github.com/VictoriaMetrics/fasthttp v1.1.0 github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.18.1 github.com/VictoriaMetrics/metrics v1.18.1
github.com/VictoriaMetrics/metricsql v0.34.0 github.com/VictoriaMetrics/metricsql v0.34.0
github.com/VividCortex/ewma v1.2.0 // indirect github.com/aws/aws-sdk-go v1.42.25
github.com/aws/aws-sdk-go v1.42.23
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.0.8 github.com/cheggaaa/pb/v3 v3.0.8
github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51 // indirect github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51 // indirect
@ -38,8 +38,41 @@ require (
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
google.golang.org/api v0.63.0 google.golang.org/api v0.63.0
google.golang.org/grpc v1.43.0 // indirect
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
go 1.16 require (
cloud.google.com/go v0.99.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
github.com/go-kit/log v0.2.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/histogram v1.2.0 // indirect
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20211221231510-d629cc9a93d5 // indirect
google.golang.org/grpc v1.43.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

7
go.sum
View file

@ -155,8 +155,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.42.23 h1:V0V5hqMEyVelgpu1e4gMPVCJ+KhmscdNxP/NWP1iCOA= github.com/aws/aws-sdk-go v1.42.25 h1:BbdvHAi+t9LRiaYUyd53noq9jcaAcfzOhSVbKfr6Avs=
github.com/aws/aws-sdk-go v1.42.23/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs= github.com/aws/aws-sdk-go v1.42.25/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
@ -1536,8 +1536,9 @@ google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ6
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211221231510-d629cc9a93d5 h1:v7aOwCaINsgis88/5e6DEZ6TlP7vXueAw/Ftqd5rm+w=
google.golang.org/genproto v0.0.0-20211221231510-d629cc9a93d5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -3,6 +3,8 @@ package actions
import ( import (
"fmt" "fmt"
"io" "io"
"os"
"path"
"sync/atomic" "sync/atomic"
"time" "time"
@ -51,6 +53,9 @@ func (r *Restore) Run() error {
} }
defer fs.MustClose(flockF) defer fs.MustClose(flockF)
if err := createRestoreLock(r.Dst.Dir); err != nil {
return err
}
concurrency := r.Concurrency concurrency := r.Concurrency
src := r.Src src := r.Src
dst := r.Dst dst := r.Dst
@ -189,7 +194,7 @@ func (r *Restore) Run() error {
logger.Infof("restored %d bytes from backup in %.3f seconds; deleted %d bytes; downloaded %d bytes", logger.Infof("restored %d bytes from backup in %.3f seconds; deleted %d bytes; downloaded %d bytes",
backupSize, time.Since(startTime).Seconds(), deleteSize, downloadSize) backupSize, time.Since(startTime).Seconds(), deleteSize, downloadSize)
return nil return removeLockFile(r.Dst.Dir)
} }
type statWriter struct { type statWriter struct {
@ -202,3 +207,20 @@ func (sw *statWriter) Write(p []byte) (int, error) {
atomic.AddUint64(sw.bytesWritten, uint64(n)) atomic.AddUint64(sw.bytesWritten, uint64(n))
return n, err return n, err
} }
func createRestoreLock(dstDir string) error {
lockF := path.Join(dstDir, "restore-in-progress")
f, err := os.Create(lockF)
if err != nil {
return fmt.Errorf("cannot create restore lock file %q: %w", lockF, err)
}
return f.Close()
}
func removeLockFile(dstDir string) error {
lockF := path.Join(dstDir, "restore-in-progress")
if err := os.Remove(lockF); err != nil {
return fmt.Errorf("cannote remove restore lock file %q: %w", lockF, err)
}
return nil
}

View file

@ -72,9 +72,8 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
if name == "." || name == ".." { if name == "." || name == ".." {
continue continue
} }
if name == "flock.lock" { if isSpecialFile(name) {
// Do not take into account flock.lock files, since they are used // Do not take into account special files.
// for preventing from concurrent access.
continue continue
} }
path := dir + "/" + name path := dir + "/" + name
@ -135,6 +134,10 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
return dst, nil return dst, nil
} }
func isSpecialFile(name string) bool {
return name == "flock.lock" || name == "restore-in-progress"
}
// RemoveEmptyDirs recursively removes empty directories under the given dir. // RemoveEmptyDirs recursively removes empty directories under the given dir.
func RemoveEmptyDirs(dir string) error { func RemoveEmptyDirs(dir string) error {
_, err := removeEmptyDirs(dir) _, err := removeEmptyDirs(dir)
@ -173,7 +176,6 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
return false, fmt.Errorf("cannot read directory contents in %q: %w", dir, err) return false, fmt.Errorf("cannot read directory contents in %q: %w", dir, err)
} }
dirEntries := 0 dirEntries := 0
hasFlock := false
for _, fi := range fis { for _, fi := range fis {
name := fi.Name() name := fi.Name()
if name == "." || name == ".." { if name == "." || name == ".." {
@ -192,11 +194,10 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
continue continue
} }
if fi.Mode()&os.ModeSymlink != os.ModeSymlink { if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
if name == "flock.lock" { if isSpecialFile(name) {
hasFlock = true // Do not take into account special files
continue continue
} }
// Skip plain files.
dirEntries++ dirEntries++
continue continue
} }
@ -248,14 +249,9 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
if dirEntries > 0 { if dirEntries > 0 {
return false, nil return false, nil
} }
logger.Infof("removing empty dir %q", dir) // Use os.RemoveAll() instead of os.Remove(), since the dir may contain special files such as flock.lock and restore-in-progress,
if hasFlock { // which must be ingored.
flockFilepath := dir + "/flock.lock" if err := os.RemoveAll(dir); err != nil {
if err := os.Remove(flockFilepath); err != nil {
return false, fmt.Errorf("cannot remove %q: %w", flockFilepath, err)
}
}
if err := os.Remove(dir); err != nil {
return false, fmt.Errorf("cannot remove %q: %w", dir, err) return false, fmt.Errorf("cannot remove %q: %w", dir, err)
} }
return true, nil return true, nil

72
lib/logger/throttler.go Normal file
View file

@ -0,0 +1,72 @@
package logger
import (
"sync"
"time"
)
var (
logThrottlerRegistryMu = sync.Mutex{}
logThrottlerRegistry = make(map[string]*LogThrottler)
)
// WithThrottler returns a logger throttled by time - only one message in throttle duration will be logged.
//
// New logger is created only once for each unique name passed.
// The function is thread-safe.
func WithThrottler(name string, throttle time.Duration) *LogThrottler {
logThrottlerRegistryMu.Lock()
defer logThrottlerRegistryMu.Unlock()
lt, ok := logThrottlerRegistry[name]
if ok {
return lt
}
lt = newLogThrottler(throttle)
lt.warnF = Warnf
lt.errorF = Errorf
logThrottlerRegistry[name] = lt
return lt
}
// LogThrottler is a logger, which throttles log messages passed to Warnf and Errorf.
//
// LogThrottler must be created via WithThrottler() call.
type LogThrottler struct {
ch chan struct{}
warnF func(format string, args ...interface{})
errorF func(format string, args ...interface{})
}
func newLogThrottler(throttle time.Duration) *LogThrottler {
lt := &LogThrottler{
ch: make(chan struct{}, 1),
}
go func() {
for {
<-lt.ch
time.Sleep(throttle)
}
}()
return lt
}
// Errorf logs error message.
func (lt *LogThrottler) Errorf(format string, args ...interface{}) {
select {
case lt.ch <- struct{}{}:
lt.errorF(format, args...)
default:
}
}
// Warnf logs warn message.
func (lt *LogThrottler) Warnf(format string, args ...interface{}) {
select {
case lt.ch <- struct{}{}:
lt.warnF(format, args...)
default:
}
}

View file

@ -0,0 +1,40 @@
package logger
import (
"testing"
"time"
)
func TestLoggerWithThrottler(t *testing.T) {
lName := "test"
lThrottle := 50 * time.Millisecond
lt := WithThrottler(lName, lThrottle)
var i int
lt.warnF = func(format string, args ...interface{}) {
i++
}
lt.Warnf("")
lt.Warnf("")
lt.Warnf("")
if i != 1 {
t.Fatalf("expected logger will be throttled to 1; got %d instead", i)
}
time.Sleep(lThrottle * 2) // wait to throttle to fade off
// the same logger supposed to be return for the same name
WithThrottler(lName, lThrottle).Warnf("")
if i != 2 {
t.Fatalf("expected logger to have 2 iterations; got %d instead", i)
}
logThrottlerRegistryMu.Lock()
registeredN := len(logThrottlerRegistry)
logThrottlerRegistryMu.Unlock()
if registeredN != 1 {
t.Fatalf("expected only 1 logger to be registered; got %d", registeredN)
}
}

View file

@ -52,22 +52,22 @@ func CheckConfig() error {
// //
// Scraped data is passed to pushData. // Scraped data is passed to pushData.
func Init(pushData func(wr *prompbmarshal.WriteRequest)) { func Init(pushData func(wr *prompbmarshal.WriteRequest)) {
globalStopCh = make(chan struct{}) globalStopChan = make(chan struct{})
scraperWG.Add(1) scraperWG.Add(1)
go func() { go func() {
defer scraperWG.Done() defer scraperWG.Done()
runScraper(*promscrapeConfigFile, pushData, globalStopCh) runScraper(*promscrapeConfigFile, pushData, globalStopChan)
}() }()
} }
// Stop stops Prometheus scraper. // Stop stops Prometheus scraper.
func Stop() { func Stop() {
close(globalStopCh) close(globalStopChan)
scraperWG.Wait() scraperWG.Wait()
} }
var ( var (
globalStopCh chan struct{} globalStopChan chan struct{}
scraperWG sync.WaitGroup scraperWG sync.WaitGroup
// PendingScrapeConfigs - zero value means, that // PendingScrapeConfigs - zero value means, that
// all scrapeConfigs are inited and ready for work. // all scrapeConfigs are inited and ready for work.
@ -108,7 +108,7 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest)
configData.Store(&marshaledData) configData.Store(&marshaledData)
cfg.mustStart() cfg.mustStart()
scs := newScrapeConfigs(pushData) scs := newScrapeConfigs(pushData, globalStopCh)
scs.add("consul_sd_configs", *consul.SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getConsulSDScrapeWork(swsPrev) }) scs.add("consul_sd_configs", *consul.SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getConsulSDScrapeWork(swsPrev) })
scs.add("digitalocean_sd_configs", *digitalocean.SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getDigitalOceanDScrapeWork(swsPrev) }) scs.add("digitalocean_sd_configs", *digitalocean.SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getDigitalOceanDScrapeWork(swsPrev) })
scs.add("dns_sd_configs", *dns.SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getDNSSDScrapeWork(swsPrev) }) scs.add("dns_sd_configs", *dns.SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getDNSSDScrapeWork(swsPrev) })
@ -184,13 +184,15 @@ type scrapeConfigs struct {
pushData func(wr *prompbmarshal.WriteRequest) pushData func(wr *prompbmarshal.WriteRequest)
wg sync.WaitGroup wg sync.WaitGroup
stopCh chan struct{} stopCh chan struct{}
globalStopCh <-chan struct{}
scfgs []*scrapeConfig scfgs []*scrapeConfig
} }
func newScrapeConfigs(pushData func(wr *prompbmarshal.WriteRequest)) *scrapeConfigs { func newScrapeConfigs(pushData func(wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) *scrapeConfigs {
return &scrapeConfigs{ return &scrapeConfigs{
pushData: pushData, pushData: pushData,
stopCh: make(chan struct{}), stopCh: make(chan struct{}),
globalStopCh: globalStopCh,
} }
} }
@ -209,7 +211,7 @@ func (scs *scrapeConfigs) add(name string, checkInterval time.Duration, getScrap
scs.wg.Add(1) scs.wg.Add(1)
go func() { go func() {
defer scs.wg.Done() defer scs.wg.Done()
scfg.run() scfg.run(scs.globalStopCh)
}() }()
scs.scfgs = append(scs.scfgs, scfg) scs.scfgs = append(scs.scfgs, scfg)
} }
@ -237,8 +239,8 @@ type scrapeConfig struct {
discoveryDuration *metrics.Histogram discoveryDuration *metrics.Histogram
} }
func (scfg *scrapeConfig) run() { func (scfg *scrapeConfig) run(globalStopCh <-chan struct{}) {
sg := newScraperGroup(scfg.name, scfg.pushData) sg := newScraperGroup(scfg.name, scfg.pushData, globalStopCh)
defer sg.stop() defer sg.stop()
var tickerCh <-chan time.Time var tickerCh <-chan time.Time
@ -283,9 +285,11 @@ type scraperGroup struct {
activeScrapers *metrics.Counter activeScrapers *metrics.Counter
scrapersStarted *metrics.Counter scrapersStarted *metrics.Counter
scrapersStopped *metrics.Counter scrapersStopped *metrics.Counter
globalStopCh <-chan struct{}
} }
func newScraperGroup(name string, pushData func(wr *prompbmarshal.WriteRequest)) *scraperGroup { func newScraperGroup(name string, pushData func(wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) *scraperGroup {
sg := &scraperGroup{ sg := &scraperGroup{
name: name, name: name,
m: make(map[string]*scraper), m: make(map[string]*scraper),
@ -295,6 +299,8 @@ func newScraperGroup(name string, pushData func(wr *prompbmarshal.WriteRequest))
activeScrapers: metrics.NewCounter(fmt.Sprintf(`vm_promscrape_active_scrapers{type=%q}`, name)), activeScrapers: metrics.NewCounter(fmt.Sprintf(`vm_promscrape_active_scrapers{type=%q}`, name)),
scrapersStarted: metrics.NewCounter(fmt.Sprintf(`vm_promscrape_scrapers_started_total{type=%q}`, name)), scrapersStarted: metrics.NewCounter(fmt.Sprintf(`vm_promscrape_scrapers_started_total{type=%q}`, name)),
scrapersStopped: metrics.NewCounter(fmt.Sprintf(`vm_promscrape_scrapers_stopped_total{type=%q}`, name)), scrapersStopped: metrics.NewCounter(fmt.Sprintf(`vm_promscrape_scrapers_stopped_total{type=%q}`, name)),
globalStopCh: globalStopCh,
} }
metrics.NewGauge(fmt.Sprintf(`vm_promscrape_targets{type=%q, status="up"}`, name), func() float64 { metrics.NewGauge(fmt.Sprintf(`vm_promscrape_targets{type=%q, status="up"}`, name), func() float64 {
return float64(tsmGlobal.StatusByGroup(sg.name, true)) return float64(tsmGlobal.StatusByGroup(sg.name, true))
@ -373,7 +379,7 @@ func (sg *scraperGroup) update(sws []*ScrapeWork) {
sg.wg.Done() sg.wg.Done()
close(sc.stoppedCh) close(sc.stoppedCh)
}() }()
sc.sw.run(sc.stopCh) sc.sw.run(sc.stopCh, sg.globalStopCh)
tsmGlobal.Unregister(sw) tsmGlobal.Unregister(sw)
sg.activeScrapers.Dec() sg.activeScrapers.Dec()
sg.scrapersStopped.Inc() sg.scrapersStopped.Inc()

View file

@ -256,7 +256,7 @@ func (sw *scrapeWork) finalizeLastScrape() {
} }
} }
func (sw *scrapeWork) run(stopCh <-chan struct{}) { func (sw *scrapeWork) run(stopCh <-chan struct{}, globalStopCh <-chan struct{}) {
var randSleep uint64 var randSleep uint64
scrapeInterval := sw.Config.ScrapeInterval scrapeInterval := sw.Config.ScrapeInterval
scrapeAlignInterval := sw.Config.ScrapeAlignInterval scrapeAlignInterval := sw.Config.ScrapeAlignInterval
@ -270,7 +270,12 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}) {
// scrape urls and labels. // scrape urls and labels.
// This also makes consistent scrape times across restarts // This also makes consistent scrape times across restarts
// for a target with the same ScrapeURL and labels. // for a target with the same ScrapeURL and labels.
key := fmt.Sprintf("ScrapeURL=%s, Labels=%s", sw.Config.ScrapeURL, sw.Config.LabelsString()) //
// Include clusterMemberNum to the key in order to guarantee that each member in vmagent cluster
// scrapes replicated targets at different time offsets. This guarantees that the deduplication consistently leaves samples
// received from the same vmagent replica.
// See https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets
key := fmt.Sprintf("ClusterMemberNum=%d, ScrapeURL=%s, Labels=%s", *clusterMemberNum, sw.Config.ScrapeURL, sw.Config.LabelsString())
h := xxhash.Sum64(bytesutil.ToUnsafeBytes(key)) h := xxhash.Sum64(bytesutil.ToUnsafeBytes(key))
randSleep = uint64(float64(scrapeInterval) * (float64(h) / (1 << 64))) randSleep = uint64(float64(scrapeInterval) * (float64(h) / (1 << 64)))
sleepOffset := uint64(time.Now().UnixNano()) % uint64(scrapeInterval) sleepOffset := uint64(time.Now().UnixNano()) % uint64(scrapeInterval)
@ -306,7 +311,13 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}) {
case <-stopCh: case <-stopCh:
t := time.Now().UnixNano() / 1e6 t := time.Now().UnixNano() / 1e6
lastScrape := sw.loadLastScrape() lastScrape := sw.loadLastScrape()
select {
case <-globalStopCh:
// Do not send staleness markers on graceful shutdown as Prometheus does.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2013#issuecomment-1006994079
default:
sw.sendStaleSeries(lastScrape, "", t, true) sw.sendStaleSeries(lastScrape, "", t, true)
}
if sw.seriesLimiter != nil { if sw.seriesLimiter != nil {
job := sw.Config.Job() job := sw.Config.Job()
metrics.UnregisterMetric(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`, metrics.UnregisterMetric(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,

View file

@ -2719,6 +2719,7 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64,
// Use global search if date isn't set. // Use global search if date isn't set.
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixTagToMetricIDs) kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixTagToMetricIDs)
} }
prefix := kb.B
kb.B = append(kb.B, tf.prefix[len(commonPrefix):]...) kb.B = append(kb.B, tf.prefix[len(commonPrefix):]...)
tfNew := *tf tfNew := *tf
tfNew.isNegative = false // isNegative for the original tf is handled by the caller. tfNew.isNegative = false // isNegative for the original tf is handled by the caller.
@ -2735,7 +2736,8 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64,
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601 // This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601
// See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395 // See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395
maxLoopsCount -= loopsCount maxLoopsCount -= loopsCount
if err := tfNew.Init(kb.B, tf.key, []byte(".+"), false, true); err != nil { tfNew = tagFilter{}
if err := tfNew.Init(prefix, tf.key, []byte(".+"), false, true); err != nil {
logger.Panicf(`BUG: cannot init tag filter: {%q=~".+"}: %s`, tf.key, err) logger.Panicf(`BUG: cannot init tag filter: {%q=~".+"}: %s`, tf.key, err)
} }
m, lc, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics, maxLoopsCount) m, lc, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics, maxLoopsCount)

Some files were not shown because too many files have changed in this diff Show more