Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2022-02-14 17:52:50 +02:00
commit 7917486d78
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
110 changed files with 3416 additions and 1885 deletions

View file

@ -100,66 +100,82 @@ release: \
release-victoria-metrics: \ release-victoria-metrics: \
release-victoria-metrics-amd64 \ release-victoria-metrics-amd64 \
release-victoria-metrics-arm \ release-victoria-metrics-arm \
release-victoria-metrics-arm64 release-victoria-metrics-arm64 \
release-victoria-metrics-darwin-amd64 \
release-victoria-metrics-darwin-arm64
release-victoria-metrics-amd64: release-victoria-metrics-amd64:
GOARCH=amd64 $(MAKE) release-victoria-metrics-generic OSARCH=amd64 $(MAKE) release-victoria-metrics-generic
release-victoria-metrics-arm: release-victoria-metrics-arm:
GOARCH=arm $(MAKE) release-victoria-metrics-generic OSARCH=arm $(MAKE) release-victoria-metrics-generic
release-victoria-metrics-arm64: release-victoria-metrics-arm64:
GOARCH=arm64 $(MAKE) release-victoria-metrics-generic OSARCH=arm64 $(MAKE) release-victoria-metrics-generic
release-victoria-metrics-generic: victoria-metrics-$(GOARCH)-prod release-victoria-metrics-darwin-amd64:
OSARCH=darwin-arm64 $(MAKE) release-victoria-metrics-generic
release-victoria-metrics-darwin-arm64:
OSARCH=darwin-arm64 $(MAKE) release-victoria-metrics-generic
release-victoria-metrics-generic: victoria-metrics-$(OSARCH)-prod
cd bin && \ cd bin && \
tar --transform="flags=r;s|-$(GOARCH)||" -czf victoria-metrics-$(GOARCH)-$(PKG_TAG).tar.gz \ tar --transform="flags=r;s|-$(OSARCH)||" -czf victoria-metrics-$(OSARCH)-$(PKG_TAG).tar.gz \
victoria-metrics-$(GOARCH)-prod \ victoria-metrics-$(OSARCH)-prod \
&& sha256sum victoria-metrics-$(GOARCH)-$(PKG_TAG).tar.gz \ && sha256sum victoria-metrics-$(OSARCH)-$(PKG_TAG).tar.gz \
victoria-metrics-$(GOARCH)-prod \ victoria-metrics-$(OSARCH)-prod \
| sed s/-$(GOARCH)-prod/-prod/ > victoria-metrics-$(GOARCH)-$(PKG_TAG)_checksums.txt | sed s/-$(OSARCH)-prod/-prod/ > victoria-metrics-$(OSARCH)-$(PKG_TAG)_checksums.txt
release-vmutils: \ release-vmutils: \
release-vmutils-amd64 \ release-vmutils-amd64 \
release-vmutils-arm64 \ release-vmutils-arm64 \
release-vmutils-arm \ release-vmutils-arm \
release-vmutils-darwin-amd64 \
release-vmutils-darwin-arm64 \
release-vmutils-windows-amd64 release-vmutils-windows-amd64
release-vmutils-amd64: release-vmutils-amd64:
GOARCH=amd64 $(MAKE) release-vmutils-generic OSARCH=amd64 $(MAKE) release-vmutils-generic
release-vmutils-arm64: release-vmutils-arm64:
GOARCH=arm64 $(MAKE) release-vmutils-generic OSARCH=arm64 $(MAKE) release-vmutils-generic
release-vmutils-arm: release-vmutils-arm:
GOARCH=arm $(MAKE) release-vmutils-generic OSARCH=arm $(MAKE) release-vmutils-generic
release-vmutils-darwin-amd64:
OSARCH=darwin-amd64 $(MAKE) release-vmutils-generic
release-vmutils-darwin-arm64:
OSARCH=darwin-arm64 $(MAKE) release-vmutils-generic
release-vmutils-windows-amd64: release-vmutils-windows-amd64:
GOARCH=amd64 $(MAKE) release-vmutils-windows-generic GOARCH=amd64 $(MAKE) release-vmutils-windows-generic
release-vmutils-generic: \ release-vmutils-generic: \
vmagent-$(GOARCH)-prod \ vmagent-$(OSARCH)-prod \
vmalert-$(GOARCH)-prod \ vmalert-$(OSARCH)-prod \
vmauth-$(GOARCH)-prod \ vmauth-$(OSARCH)-prod \
vmbackup-$(GOARCH)-prod \ vmbackup-$(OSARCH)-prod \
vmrestore-$(GOARCH)-prod \ vmrestore-$(OSARCH)-prod \
vmctl-$(GOARCH)-prod vmctl-$(OSARCH)-prod
cd bin && \ cd bin && \
tar --transform="flags=r;s|-$(GOARCH)||" -czf vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \ tar --transform="flags=r;s|-$(OSARCH)||" -czf vmutils-$(OSARCH)-$(PKG_TAG).tar.gz \
vmagent-$(GOARCH)-prod \ vmagent-$(OSARCH)-prod \
vmalert-$(GOARCH)-prod \ vmalert-$(OSARCH)-prod \
vmauth-$(GOARCH)-prod \ vmauth-$(OSARCH)-prod \
vmbackup-$(GOARCH)-prod \ vmbackup-$(OSARCH)-prod \
vmrestore-$(GOARCH)-prod \ vmrestore-$(OSARCH)-prod \
vmctl-$(GOARCH)-prod \ vmctl-$(OSARCH)-prod \
&& sha256sum vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \ && sha256sum vmutils-$(OSARCH)-$(PKG_TAG).tar.gz \
vmagent-$(GOARCH)-prod \ vmagent-$(OSARCH)-prod \
vmalert-$(GOARCH)-prod \ vmalert-$(OSARCH)-prod \
vmauth-$(GOARCH)-prod \ vmauth-$(OSARCH)-prod \
vmbackup-$(GOARCH)-prod \ vmbackup-$(OSARCH)-prod \
vmrestore-$(GOARCH)-prod \ vmrestore-$(OSARCH)-prod \
vmctl-$(GOARCH)-prod \ vmctl-$(OSARCH)-prod \
| sed s/-$(GOARCH)-prod/-prod/ > vmutils-$(GOARCH)-$(PKG_TAG)_checksums.txt | sed s/-$(OSARCH)-prod/-prod/ > vmutils-$(OSARCH)-$(PKG_TAG)_checksums.txt
release-vmutils-windows-generic: \ release-vmutils-windows-generic: \
vmagent-windows-$(GOARCH)-prod \ vmagent-windows-$(GOARCH)-prod \

View file

@ -1129,9 +1129,9 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
## Deduplication ## Deduplication
VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2112#issuecomment-1032587618) for more details on how downsampling works.
is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points
on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details. The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
@ -1140,15 +1140,49 @@ write data to the same VictoriaMetrics instance. These vmagent or Prometheus ins
`external_labels` section in their configs, so they write data to the same time series. `external_labels` section in their configs, so they write data to the same time series.
## Storage
VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like
data structures. On insert, VictoriaMetrics accumulates up to 1s of data and dumps it on disk to
`<-storageDataPath>/data/small/YYYY_MM/` subdirectory forming a `part` with the following
name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each part consists of two "columns":
values and timestamps. These are sorted and compressed raw time series values. Additionally, part contains
index files for searching for specific series in the values and timestamps files.
`Parts` are periodically merged into the bigger parts. The resulting `part` is constructed
under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` subdirectory. When the resulting `part` is complete, it is atomically moved from the `tmp`
to its own subdirectory, while the source parts are atomically removed. The end result is that the source
parts are substituted by a single resulting bigger `part` in the `<-storageDataPath>/data/{small,big}/YYYY_MM/` directory.
Information about merging process is available in [single-node VictoriaMetrics](https://grafana.com/dashboards/10229)
and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176) Grafana dashboards.
See more details in [monitoring docs](#monitoring).
The `merge` process is usually named "compaction", because the resulting `part` size is usually smaller than
the sum of the source `parts`. There are following benefits of doing the merge process:
* it improves query performance, since lower number of `parts` are inspected with each query;
* it reduces the number of data files, since each `part`contains fixed number of files;
* better compression rate for the resulting part.
Newly added `parts` either appear in the storage or fail to appear.
Storage never contains partially created parts. The same applies to merge process — `parts` are either fully
merged into a new `part` or fail to merge. There are no partially merged `parts` in MergeTree.
`Part` contents in MergeTree never change. Parts are immutable. They may be only deleted after the merge
to a bigger `part` or when the `part` contents goes outside the configured `-retentionPeriod`.
See [this article](https://valyala.medium.com/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) for more details.
See also [how to work with snapshots](#how-to-work-with-snapshots).
## Retention ## Retention
Retention is configured with `-retentionPeriod` command-line flag. For instance, `-retentionPeriod=3` means Retention is configured with `-retentionPeriod` command-line flag. For instance, `-retentionPeriod=3` means
that the data will be stored for 3 months and then deleted. that the data will be stored for 3 months and then deleted.
Data is split in per-month partitions inside `<-storageDataPath>/data/small` and `<-storageDataPath>/data/big` folders. Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
Data partitions outside the configured retention are deleted on the first day of new month. Data partitions outside the configured retention are deleted on the first day of new month.
Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`.
Data parts outside of the configured retention are eventually deleted during [background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). Data parts outside of the configured retention are eventually deleted during
[background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
In order to keep data according to `-retentionPeriod` max disk space usage is going to be `-retentionPeriod` + 1 month. In order to keep data according to `-retentionPeriod` max disk space usage is going to be `-retentionPeriod` + 1 month.
For example if `-retentionPeriod` is set to 1, data for January is deleted on March 1st. For example if `-retentionPeriod` is set to 1, data for January is deleted on March 1st.
@ -1596,7 +1630,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-dryRun -dryRun
Whether to check only -promscrape.config and then exit. Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse Whether to check only -promscrape.config and then exit. Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag
-enableTCP6 -enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
-envflag.enable -envflag.enable
@ -1604,7 +1638,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/legal/eula/ By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-finalMergeDelay duration -finalMergeDelay duration
The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge
-forceFlushAuthKey string -forceFlushAuthKey string
@ -1714,7 +1748,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.config.dryRun -promscrape.config.dryRun
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output. Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
-promscrape.config.strictParse -promscrape.config.strictParse
Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields (default true)
-promscrape.configCheckInterval duration -promscrape.configCheckInterval duration
Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-promscrape.consul.waitTime duration -promscrape.consul.waitTime duration

View file

@ -27,6 +27,12 @@ victoria-metrics-ppc64le-prod:
victoria-metrics-386-prod: victoria-metrics-386-prod:
APP_NAME=victoria-metrics $(MAKE) app-via-docker-386 APP_NAME=victoria-metrics $(MAKE) app-via-docker-386
victoria-metrics-darwin-amd64-prod:
APP_NAME=victoria-metrics $(MAKE) app-via-docker-darwin-amd64
victoria-metrics-darwin-arm64-prod:
APP_NAME=victoria-metrics $(MAKE) app-via-docker-darwin-arm64
package-victoria-metrics: package-victoria-metrics:
APP_NAME=victoria-metrics $(MAKE) package-via-docker APP_NAME=victoria-metrics $(MAKE) package-via-docker

View file

@ -27,7 +27,7 @@ var (
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the first sample in every time series per each discrete interval "+ minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the first sample in every time series per each discrete interval "+
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling") "equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+ dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+
"Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse") "Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag")
downsamplingPeriods = flagutil.NewArray("downsampling.period", "Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs "+ downsamplingPeriods = flagutil.NewArray("downsampling.period", "Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs "+
"to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details") "to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details")
) )

View file

@ -27,6 +27,15 @@ vmagent-ppc64le-prod:
vmagent-386-prod: vmagent-386-prod:
APP_NAME=vmagent $(MAKE) app-via-docker-386 APP_NAME=vmagent $(MAKE) app-via-docker-386
vmagent-darwin-amd64-prod:
APP_NAME=vmagent $(MAKE) app-via-docker-darwin-amd64
vmagent-darwin-arm64-prod:
APP_NAME=vmagent $(MAKE) app-via-docker-darwin-arm64
vmagent-windows-amd64-prod:
APP_NAME=vmagent $(MAKE) app-via-docker-windows-amd64
package-vmagent: package-vmagent:
APP_NAME=vmagent $(MAKE) package-via-docker APP_NAME=vmagent $(MAKE) package-via-docker
@ -81,6 +90,3 @@ vmagent-pure:
vmagent-windows-amd64: vmagent-windows-amd64:
GOARCH=amd64 APP_NAME=vmagent $(MAKE) app-local-windows-with-goarch GOARCH=amd64 APP_NAME=vmagent $(MAKE) app-local-windows-with-goarch
vmagent-windows-amd64-prod:
APP_NAME=vmagent $(MAKE) app-via-docker-windows-amd64

View file

@ -12,7 +12,7 @@ or any other Prometheus-compatible storage systems that support the `remote_writ
While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast
and RAM friendly to scrape metrics from Prometheus-compatible exporters into VictoriaMetrics. and RAM friendly to scrape metrics from Prometheus-compatible exporters into VictoriaMetrics.
Also, we found that our user's infrastructure are like snowflakes in that no two are alike. Therefore we decided to add more flexibility Also, we found that our user's infrastructure are like snowflakes in that no two are alike. Therefore we decided to add more flexibility
to `vmagent` such as the ability to push metrics instead of pulling them. We did our best and will continue to improve vmagent. to `vmagent` such as the ability to push metrics additionally to pulling them. We did our best and will continue to improve `vmagent`.
## Features ## Features
@ -46,7 +46,7 @@ to `vmagent` such as the ability to push metrics instead of pulling them. We did
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets: and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets:
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url. * `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url. `vmagent` doesn't support some sections of Prometheus config file, so you may need either to delete these sections or to run `vmagent` with `-promscrape.config.strictParse=false` additional command-line flag, so `vmagent` will ignore unsupported sections. See [the list of unsupported sections](#unsupported-prometheus-config-sections).
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems. * `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
Example command line: Example command line:
@ -237,6 +237,19 @@ Every referred file can contain arbitrary number of [supported scrape configs](#
`vmagent` dynamically reloads these files on `SIGHUP` signal or on the request to `http://vmagent:8429/-/reload`. `vmagent` dynamically reloads these files on `SIGHUP` signal or on the request to `http://vmagent:8429/-/reload`.
## Unsupported Prometheus config sections
`vmagent` doesn't support the following sections in Prometheus config file passed to `-promscrape.config` command-line flag:
* [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This section is substituted with various `-remoteWrite*` command-line flags. See [the full list of flags](#advanced-usage). The `remote_write` section isn't supported in order to reduce possible confusion when `vmagent` is used for accepting incoming metrics via push protocols such as InfluxDB, Graphite, OpenTSDB, DataDog, etc. In this case the `-promscrape.config` file isn't needed. See [these docs](#features) for details.
* `remote_read`. This section isn't supported at all.
* `rule_files` and `alerting`. These sections are supported by [vmalert](https://docs.victoriametrics.com/vmalert.html).
The list of supported service discovery types is available [here](#how-to-collect-metrics-in-prometheus-format).
Additionally `vmagent` doesn't support `refresh_interval` option at service discovery sections. This option is substituted with `-promscrape.*CheckInterval` command-line options, which are specific per each service discovery type. See [the full list of command-line flags for vmagent](#advanced-usage).
## Adding labels to metrics ## Adding labels to metrics
Labels can be added to metrics by the following mechanisms: Labels can be added to metrics by the following mechanisms:
@ -708,13 +721,15 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
The maximum size in bytes of a single DataDog POST request to /api/v1/series The maximum size in bytes of a single DataDog POST request to /api/v1/series
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864) Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
-dryRun -dryRun
Whether to check only config files without running vmagent. The following files are checked: -promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse Whether to check only config files without running vmagent. The following files are checked: -promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . Unknown config entries aren't allowed in -promscrape.config by default. This can be changed by passing -promscrape.config.strictParse=false command-line flag
-enableTCP6 -enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
-envflag.enable -envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-fs.disableMmap -fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-graphiteListenAddr string -graphiteListenAddr string
@ -760,6 +775,32 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-insert.maxQueueDuration duration -insert.maxQueueDuration duration
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s) The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
-kafka.consumer.topic array
Kafka topic names for data consumption.
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.basicAuth.password array
Optional basic auth password for -kafka.consumer.topic. Must be used in conjunction with any supported auth methods for kafka client, specified by flag -kafka.consumer.topic.options='security.protocol=SASL_SSL;sasl.mechanisms=PLAIN'
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.basicAuth.username array
Optional basic auth username for -kafka.consumer.topic. Must be used in conjunction with any supported auth methods for kafka client, specified by flag -kafka.consumer.topic.options='security.protocol=SASL_SSL;sasl.mechanisms=PLAIN'
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.brokers array
List of brokers to connect for given topic, e.g. -kafka.consumer.topic.broker=host-1:9092;host-2:9092
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.defaultFormat string
Expected data format in the topic if -kafka.consumer.topic.format is skipped. (default "promremotewrite")
-kafka.consumer.topic.format array
data format for corresponding kafka topic. Valid formats: influx, prometheus, promremotewrite, graphite, jsonline
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.groupID array
Defines group.id for topic
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.isGzipped array
Enables gzip setting for topic messages payload. Only prometheus, jsonline and influx formats accept gzipped messages.
Supports array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.options array
Optional key=value;key1=value2 settings for topic consumer. See full configuration options at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md.
Supports an array of values separated by comma or specified via multiple flags.
-loggerDisableTimestamps -loggerDisableTimestamps
Whether to disable writing timestamps in logs Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int -loggerErrorsPerSecondLimit int
@ -810,7 +851,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-promscrape.config.dryRun -promscrape.config.dryRun
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output. Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
-promscrape.config.strictParse -promscrape.config.strictParse
Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields (default true)
-promscrape.configCheckInterval duration -promscrape.configCheckInterval duration
Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-promscrape.consul.waitTime duration -promscrape.consul.waitTime duration

View file

@ -53,7 +53,7 @@ var (
configAuthKey = flag.String("configAuthKey", "", "Authorization key for accessing /config page. It must be passed via authKey query arg") configAuthKey = flag.String("configAuthKey", "", "Authorization key for accessing /config page. It must be passed via authKey query arg")
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmagent. The following files are checked: "+ dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmagent. The following files are checked: "+
"-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . "+ "-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . "+
"Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse") "Unknown config entries aren't allowed in -promscrape.config by default. This can be changed by passing -promscrape.config.strictParse=false command-line flag")
) )
var ( var (

View file

@ -27,6 +27,15 @@ vmalert-ppc64le-prod:
vmalert-386-prod: vmalert-386-prod:
APP_NAME=vmalert $(MAKE) app-via-docker-386 APP_NAME=vmalert $(MAKE) app-via-docker-386
vmalert-darwin-amd64-prod:
APP_NAME=vmalert $(MAKE) app-via-docker-darwin-amd64
vmalert-darwin-arm64-prod:
APP_NAME=vmalert $(MAKE) app-via-docker-darwin-arm64
vmalert-windows-amd64-prod:
APP_NAME=vmalert $(MAKE) app-via-docker-windows-amd64
package-vmalert: package-vmalert:
APP_NAME=vmalert $(MAKE) package-via-docker APP_NAME=vmalert $(MAKE) package-via-docker
@ -109,6 +118,3 @@ vmalert-pure:
vmalert-windows-amd64: vmalert-windows-amd64:
GOARCH=amd64 APP_NAME=vmalert $(MAKE) app-local-windows-with-goarch GOARCH=amd64 APP_NAME=vmalert $(MAKE) app-local-windows-with-goarch
vmalert-windows-amd64-prod:
APP_NAME=vmalert $(MAKE) app-via-docker-windows-amd64

View file

@ -494,6 +494,10 @@ command-line flags with their descriptions.
The shortlist of configuration flags is the following: The shortlist of configuration flags is the following:
``` ```
-clusterMode
If clusterMode is enabled, then vmalert automatically adds the tenant specified in config groups to -datasource.url, -remoteWrite.url and -remoteRead.url. See https://docs.victoriametrics.com/vmalert.html#multitenancy
-configCheckInterval duration
Interval for checking for changes in '-rule' or '-notifier.config' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes.
-datasource.appendTypePrefix -datasource.appendTypePrefix
Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL. Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.
-datasource.basicAuth.password string -datasource.basicAuth.password string
@ -526,8 +530,12 @@ The shortlist of configuration flags is the following:
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
-datasource.url string -datasource.url string
VictoriaMetrics or vmselect url. Required parameter. E.g. http://127.0.0.1:8428 VictoriaMetrics or vmselect url. Required parameter. E.g. http://127.0.0.1:8428
-defaultTenant.graphite string
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy
-defaultTenant.prometheus string
Default tenant for Prometheus alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy
-disableAlertgroupLabel -disableAlertgroupLabel
Whether to disable adding group's name as label to generated alerts and time series. Whether to disable adding group's Name as label to generated alerts and time series.
-dryRun -rule -dryRun -rule
Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified. Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.
-enableTCP6 -enableTCP6
@ -536,13 +544,15 @@ The shortlist of configuration flags is the following:
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-evaluationInterval duration -evaluationInterval duration
How often to evaluate the rules (default 1m0s) How often to evaluate the rules (default 1m0s)
-external.alert.source string -external.alert.source string
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used
-external.label array -external.label array
Optional label in the form 'name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets. Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-external.url string -external.url string
External URL is used as alert's source for sent alerts to the notifier External URL is used as alert's source for sent alerts to the notifier
@ -591,11 +601,15 @@ The shortlist of configuration flags is the following:
Optional basic auth password for -notifier.url Optional basic auth password for -notifier.url
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-notifier.basicAuth.passwordFile array -notifier.basicAuth.passwordFile array
Optional path to basic auth password file for -notifier.url Optional path to basic auth password file for -notifier.url
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-notifier.basicAuth.username array -notifier.basicAuth.username array
Optional basic auth username for -notifier.url Optional basic auth username for -notifier.url
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-notifier.config string
Path to configuration file for notifiers
-notifier.suppressDuplicateTargetErrors
Whether to suppress 'duplicate target' errors during discovery
-notifier.tlsCAFile array -notifier.tlsCAFile array
Optional path to TLS CA file to use for verifying connections to -notifier.url. By default system CA is used Optional path to TLS CA file to use for verifying connections to -notifier.url. By default system CA is used
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
@ -616,6 +630,14 @@ The shortlist of configuration flags is the following:
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-pprofAuthKey string -pprofAuthKey string
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
-promscrape.consul.waitTime duration
Wait time used by Consul service discovery. Default value is used if not set
-promscrape.consulSDCheckInterval duration
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
-promscrape.discovery.concurrency int
The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100)
-promscrape.discovery.concurrentWaitTime duration
The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s)
-remoteRead.basicAuth.password string -remoteRead.basicAuth.password string
Optional basic auth password for -remoteRead.url Optional basic auth password for -remoteRead.url
-remoteRead.basicAuth.passwordFile string -remoteRead.basicAuth.passwordFile string
@ -695,8 +717,8 @@ The shortlist of configuration flags is the following:
absolute path to all .yaml files in root. absolute path to all .yaml files in root.
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars. Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-configCheckInterval duration -rule.configCheckInterval duration
Interval for checking for changes in '-rule' or '-notifier.config' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead
-rule.maxResolveDuration duration -rule.maxResolveDuration duration
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group. Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
-rule.validateExpressions -rule.validateExpressions
@ -709,14 +731,6 @@ The shortlist of configuration flags is the following:
Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower
-tlsKeyFile string -tlsKeyFile string
Path to file with TLS key. Used only if -tls is set Path to file with TLS key. Used only if -tls is set
-promscrape.consul.waitTime duration
Wait time used by Consul service discovery. Default value is used if not set
-promscrape.consulSDCheckInterval duration
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
-promscrape.discovery.concurrency int
The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100)
-promscrape.discovery.concurrentWaitTime duration
The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s)
-version -version
Show VictoriaMetrics version Show VictoriaMetrics version
``` ```

View file

@ -17,6 +17,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate" "github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
// Group contains list of Rules grouped into // Group contains list of Rules grouped into
@ -25,7 +26,7 @@ type Group struct {
Type datasource.Type `yaml:"type,omitempty"` Type datasource.Type `yaml:"type,omitempty"`
File string File string
Name string `yaml:"name"` Name string `yaml:"name"`
Interval utils.PromDuration `yaml:"interval"` Interval promutils.Duration `yaml:"interval"`
Rules []Rule `yaml:"rules"` Rules []Rule `yaml:"rules"`
Concurrency int `yaml:"concurrency"` Concurrency int `yaml:"concurrency"`
// ExtraFilterLabels is a list label filters applied to every rule // ExtraFilterLabels is a list label filters applied to every rule
@ -129,7 +130,7 @@ type Rule struct {
Record string `yaml:"record,omitempty"` Record string `yaml:"record,omitempty"`
Alert string `yaml:"alert,omitempty"` Alert string `yaml:"alert,omitempty"`
Expr string `yaml:"expr"` Expr string `yaml:"expr"`
For utils.PromDuration `yaml:"for"` For promutils.Duration `yaml:"for"`
Labels map[string]string `yaml:"labels,omitempty"` Labels map[string]string `yaml:"labels,omitempty"`
Annotations map[string]string `yaml:"annotations,omitempty"` Annotations map[string]string `yaml:"annotations,omitempty"`

View file

@ -11,7 +11,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
@ -260,7 +260,7 @@ func TestGroup_Validate(t *testing.T) {
Rules: []Rule{ Rules: []Rule{
{ {
Expr: "sumSeries(time('foo.bar',10))", Expr: "sumSeries(time('foo.bar',10))",
For: utils.NewPromDuration(10 * time.Millisecond), For: promutils.NewDuration(10 * time.Millisecond),
}, },
{ {
Expr: "sum(up == 0 ) by (host)", Expr: "sum(up == 0 ) by (host)",
@ -275,7 +275,7 @@ func TestGroup_Validate(t *testing.T) {
Rules: []Rule{ Rules: []Rule{
{ {
Expr: "sum(up == 0 ) by (host)", Expr: "sum(up == 0 ) by (host)",
For: utils.NewPromDuration(10 * time.Millisecond), For: promutils.NewDuration(10 * time.Millisecond),
}, },
{ {
Expr: "sumSeries(time('foo.bar',10))", Expr: "sumSeries(time('foo.bar',10))",
@ -342,7 +342,7 @@ func TestHashRule(t *testing.T) {
true, true,
}, },
{ {
Rule{Alert: "alert", Expr: "up == 1", For: utils.NewPromDuration(time.Minute)}, Rule{Alert: "alert", Expr: "up == 1", For: promutils.NewDuration(time.Minute)},
Rule{Alert: "alert", Expr: "up == 1"}, Rule{Alert: "alert", Expr: "up == 1"},
true, true,
}, },

View file

@ -9,7 +9,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
func init() { func init() {
@ -34,7 +34,7 @@ func TestUpdateWith(t *testing.T) {
[]config.Rule{{ []config.Rule{{
Alert: "foo", Alert: "foo",
Expr: "up > 0", Expr: "up > 0",
For: utils.NewPromDuration(time.Second), For: promutils.NewDuration(time.Second),
Labels: map[string]string{ Labels: map[string]string{
"bar": "baz", "bar": "baz",
}, },
@ -46,7 +46,7 @@ func TestUpdateWith(t *testing.T) {
[]config.Rule{{ []config.Rule{{
Alert: "foo", Alert: "foo",
Expr: "up > 10", Expr: "up > 10",
For: utils.NewPromDuration(time.Second), For: promutils.NewDuration(time.Second),
Labels: map[string]string{ Labels: map[string]string{
"baz": "bar", "baz": "bar",
}, },

View file

@ -11,11 +11,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
// Config contains list of supported configuration settings // Config contains list of supported configuration settings
@ -38,7 +38,7 @@ type Config struct {
RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"` RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"`
// The timeout used when sending alerts. // The timeout used when sending alerts.
Timeout utils.PromDuration `yaml:"timeout,omitempty"` Timeout promutils.Duration `yaml:"timeout,omitempty"`
// Checksum stores the hash of yaml definition for the config. // Checksum stores the hash of yaml definition for the config.
// May be used to detect any changes to the config file. // May be used to detect any changes to the config file.
@ -71,7 +71,7 @@ func (cfg *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
cfg.Scheme = "http" cfg.Scheme = "http"
} }
if cfg.Timeout.Duration() == 0 { if cfg.Timeout.Duration() == 0 {
cfg.Timeout = utils.NewPromDuration(time.Second * 10) cfg.Timeout = promutils.NewDuration(time.Second * 10)
} }
rCfg, err := promrelabel.ParseRelabelConfigs(cfg.RelabelConfigs, false) rCfg, err := promrelabel.ParseRelabelConfigs(cfg.RelabelConfigs, false)
if err != nil { if err != nil {

View file

@ -27,7 +27,7 @@ import (
textTpl "text/template" textTpl "text/template"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/metricsql" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
// metric is private copy of datasource.Metric, // metric is private copy of datasource.Metric,
@ -104,12 +104,12 @@ func InitTemplateFunc(externalURL *url.URL) {
}, },
// parseDuration parses a duration string such as "1h" into the number of seconds it represents // parseDuration parses a duration string such as "1h" into the number of seconds it represents
"parseDuration": func(d string) (float64, error) { "parseDuration": func(s string) (float64, error) {
ms, err := metricsql.DurationValue(d, 0) d, err := promutils.ParseDuration(s)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return float64(ms) / 1000, nil return d.Seconds(), nil
}, },
/* Numbers */ /* Numbers */

View file

@ -8,7 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
type fakeReplayQuerier struct { type fakeReplayQuerier struct {
@ -83,7 +83,7 @@ func TestReplay(t *testing.T) {
to: "2021-01-01T15:02:30.000Z", to: "2021-01-01T15:02:30.000Z",
maxDP: 60, maxDP: 60,
cfg: []config.Group{ cfg: []config.Group{
{Interval: utils.NewPromDuration(time.Minute), Rules: []config.Rule{{Record: "foo", Expr: "sum(up)"}}}, {Interval: promutils.NewDuration(time.Minute), Rules: []config.Rule{{Record: "foo", Expr: "sum(up)"}}},
}, },
qb: &fakeReplayQuerier{ qb: &fakeReplayQuerier{
registry: map[string]map[string]struct{}{ registry: map[string]map[string]struct{}{

View file

@ -1,43 +0,0 @@
package utils
import (
"time"
"github.com/VictoriaMetrics/metricsql"
)
// PromDuration is Prometheus duration.
type PromDuration struct {
milliseconds int64
}
// NewPromDuration returns PromDuration for given d.
func NewPromDuration(d time.Duration) PromDuration {
return PromDuration{
milliseconds: d.Milliseconds(),
}
}
// MarshalYAML implements yaml.Marshaler interface.
func (pd PromDuration) MarshalYAML() (interface{}, error) {
return pd.Duration().String(), nil
}
// UnmarshalYAML implements yaml.Unmarshaler interface.
func (pd *PromDuration) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
ms, err := metricsql.DurationValue(s, 0)
if err != nil {
return err
}
pd.milliseconds = ms
return nil
}
// Duration returns duration for pd.
func (pd *PromDuration) Duration() time.Duration {
return time.Duration(pd.milliseconds) * time.Millisecond
}

View file

@ -25,6 +25,9 @@ var (
func initLinks() { func initLinks() {
pathPrefix := httpserver.GetPathPrefix() pathPrefix := httpserver.GetPathPrefix()
if pathPrefix == "" {
pathPrefix = "/"
}
apiLinks = [][2]string{ apiLinks = [][2]string{
{path.Join(pathPrefix, "api/v1/groups"), "list all loaded groups and rules"}, {path.Join(pathPrefix, "api/v1/groups"), "list all loaded groups and rules"},
{path.Join(pathPrefix, "api/v1/alerts"), "list all active alerts"}, {path.Join(pathPrefix, "api/v1/alerts"), "list all active alerts"},
@ -51,6 +54,11 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
initLinks() initLinks()
}) })
pathPrefix := httpserver.GetPathPrefix()
if pathPrefix == "" {
pathPrefix = "/"
}
switch r.URL.Path { switch r.URL.Path {
case "/": case "/":
if r.Method != "GET" { if r.Method != "GET" {
@ -59,7 +67,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
WriteWelcome(w) WriteWelcome(w)
return true return true
case "/alerts": case "/alerts":
WriteListAlerts(w, rh.groupAlerts()) WriteListAlerts(w, pathPrefix, rh.groupAlerts())
return true return true
case "/groups": case "/groups":
WriteListGroups(w, rh.groups()) WriteListGroups(w, rh.groups())
@ -113,7 +121,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
} }
// <groupID>/<alertID>/status // <groupID>/<alertID>/status
WriteAlert(w, alert) WriteAlert(w, pathPrefix, alert)
return true return true
} }
} }

View file

@ -3,6 +3,7 @@
{% import ( {% import (
"time" "time"
"sort" "sort"
"path"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
@ -120,7 +121,7 @@
{% endfunc %} {% endfunc %}
{% func ListAlerts(groupAlerts []GroupAlerts) %} {% func ListAlerts(pathPrefix string, groupAlerts []GroupAlerts) %}
{%= tpl.Header("Alerts", navItems) %} {%= tpl.Header("Alerts", navItems) %}
{% if len(groupAlerts) > 0 %} {% if len(groupAlerts) > 0 %}
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a> <a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
@ -185,7 +186,7 @@
</td> </td>
<td>{%s ar.Value %}</td> <td>{%s ar.Value %}</td>
<td> <td>
<a href="/{%s g.ID %}/{%s ar.ID %}/status">Details</a> <a href="{%s path.Join(pathPrefix, g.ID, ar.ID, "status") %}">Details</a>
</td> </td>
</tr> </tr>
{% endfor %} {% endfor %}
@ -262,7 +263,7 @@
{% endfunc %} {% endfunc %}
{% func Alert(alert *APIAlert) %} {% func Alert(pathPrefix string, alert *APIAlert) %}
{%= tpl.Header("", navItems) %} {%= tpl.Header("", navItems) %}
{%code {%code
var labelKeys []string var labelKeys []string
@ -329,7 +330,7 @@
Group Group
</div> </div>
<div class="col"> <div class="col">
<a target="_blank" href="/groups#group-{%s alert.GroupID %}">{%s alert.GroupID %}</a> <a target="_blank" href="{%s path.Join(pathPrefix,"groups") %}#group-{%s alert.GroupID %}">{%s alert.GroupID %}</a>
</div> </div>
</div> </div>
</div> </div>

File diff suppressed because it is too large Load diff

View file

@ -27,6 +27,15 @@ vmauth-ppc64le-prod:
vmauth-386-prod: vmauth-386-prod:
APP_NAME=vmauth $(MAKE) app-via-docker-386 APP_NAME=vmauth $(MAKE) app-via-docker-386
vmauth-darwin-amd64-prod:
APP_NAME=vmauth $(MAKE) app-via-docker-darwin-amd64
vmauth-darwin-arm64-prod:
APP_NAME=vmauth $(MAKE) app-via-docker-darwin-arm64
vmauth-windows-amd64-prod:
APP_NAME=vmauth $(MAKE) app-via-docker-windows-amd64
package-vmauth: package-vmauth:
APP_NAME=vmauth $(MAKE) package-via-docker APP_NAME=vmauth $(MAKE) package-via-docker
@ -80,6 +89,3 @@ vmauth-pure:
vmauth-windows-amd64: vmauth-windows-amd64:
GOARCH=amd64 APP_NAME=vmauth $(MAKE) app-local-windows-with-goarch GOARCH=amd64 APP_NAME=vmauth $(MAKE) app-local-windows-with-goarch
vmauth-windows-amd64-prod:
APP_NAME=vmauth $(MAKE) app-via-docker-windows-amd64

View file

@ -223,6 +223,8 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-fs.disableMmap -fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-http.connTimeout duration -http.connTimeout duration

View file

@ -27,6 +27,12 @@ vmbackup-ppc64le-prod:
vmbackup-386-prod: vmbackup-386-prod:
APP_NAME=vmbackup $(MAKE) app-via-docker-386 APP_NAME=vmbackup $(MAKE) app-via-docker-386
vmbackup-darwin-amd64-prod:
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-amd64
vmbackup-darwin-arm64-prod:
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-arm64
package-vmbackup: package-vmbackup:
APP_NAME=vmbackup $(MAKE) package-via-docker APP_NAME=vmbackup $(MAKE) package-via-docker

View file

@ -27,6 +27,15 @@ vmctl-ppc64le-prod:
vmctl-386-prod: vmctl-386-prod:
APP_NAME=vmctl $(MAKE) app-via-docker-386 APP_NAME=vmctl $(MAKE) app-via-docker-386
vmctl-darwin-amd64-prod:
APP_NAME=vmctl $(MAKE) app-via-docker-darwin-amd64
vmctl-darwin-arm64-prod:
APP_NAME=vmctl $(MAKE) app-via-docker-darwin-arm64
vmctl-windows-amd64-prod:
APP_NAME=vmctl $(MAKE) app-via-docker-windows-amd64
package-vmctl: package-vmctl:
APP_NAME=vmctl $(MAKE) package-via-docker APP_NAME=vmctl $(MAKE) package-via-docker
@ -75,5 +84,3 @@ vmctl-pure:
vmctl-windows-amd64: vmctl-windows-amd64:
GOARCH=amd64 APP_NAME=vmctl $(MAKE) app-local-windows-with-goarch GOARCH=amd64 APP_NAME=vmctl $(MAKE) app-local-windows-with-goarch
vmctl-windows-amd64-prod:
APP_NAME=vmctl $(MAKE) app-via-docker-windows-amd64

View file

@ -27,6 +27,12 @@ vmrestore-ppc64le-prod:
vmrestore-386-prod: vmrestore-386-prod:
APP_NAME=vmrestore $(MAKE) app-via-docker-386 APP_NAME=vmrestore $(MAKE) app-via-docker-386
vmrestore-darwin-amd64-prod:
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-amd64
vmrestore-darwin-arm64-prod:
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-arm64
package-vmrestore: package-vmrestore:
APP_NAME=vmrestore $(MAKE) package-via-docker APP_NAME=vmrestore $(MAKE) package-via-docker

View file

@ -633,6 +633,9 @@ func evalRollupFuncWithoutAt(ec *EvalConfig, funcName string, rf rollupFunc, exp
if err != nil { if err != nil {
return nil, err return nil, err
} }
if funcName == "absent_over_time" {
rvs = aggregateAbsentOverTime(ec, re.Expr, rvs)
}
if offset != 0 && len(rvs) > 0 { if offset != 0 && len(rvs) > 0 {
// Make a copy of timestamps, since they may be used in other values. // Make a copy of timestamps, since they may be used in other values.
srcTimestamps := rvs[0].Timestamps srcTimestamps := rvs[0].Timestamps
@ -647,6 +650,27 @@ func evalRollupFuncWithoutAt(ec *EvalConfig, funcName string, rf rollupFunc, exp
return rvs, nil return rvs, nil
} }
// aggregateAbsentOverTime collapses tss to a single time series with 1 and nan values.
//
// Values for returned series are set to nan if at least a single tss series contains nan at that point.
// This means that tss contains a series with non-empty results at that point.
// This follows Prometheus logic - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2130
func aggregateAbsentOverTime(ec *EvalConfig, expr metricsql.Expr, tss []*timeseries) []*timeseries {
rvs := getAbsentTimeseries(ec, expr)
if len(tss) == 0 {
return rvs
}
for i := range tss[0].Values {
for _, ts := range tss {
if math.IsNaN(ts.Values[i]) {
rvs[0].Values[i] = nan
break
}
}
}
return rvs
}
func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr) ([]*timeseries, error) { func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr) ([]*timeseries, error) {
// TODO: determine whether to use rollupResultCacheV here. // TODO: determine whether to use rollupResultCacheV here.
step := re.Step.Duration(ec.Step) step := re.Step.Duration(ec.Step)
@ -669,10 +693,6 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc,
return nil, err return nil, err
} }
if len(tssSQ) == 0 { if len(tssSQ) == 0 {
if funcName == "absent_over_time" {
tss := evalNumber(ec, 1)
return tss, nil
}
return nil, nil return nil, nil
} }
sharedTimestamps := getTimestamps(ec.Start, ec.End, ec.Step) sharedTimestamps := getTimestamps(ec.Start, ec.End, ec.Step)
@ -822,14 +842,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
rssLen := rss.Len() rssLen := rss.Len()
if rssLen == 0 { if rssLen == 0 {
rss.Cancel() rss.Cancel()
var tss []*timeseries tss := mergeTimeseries(tssCached, nil, start, ec)
if funcName == "absent_over_time" {
tss = getAbsentTimeseries(ec, me)
}
// Add missing points until ec.End.
// Do not cache the result, since missing points
// may be backfilled in the future.
tss = mergeTimeseries(tssCached, tss, start, ec)
return tss, nil return tss, nil
} }

View file

@ -867,19 +867,37 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r} resultExpected := []netstorage.Result{r}
f(q, resultExpected) f(q, resultExpected)
}) })
t.Run(`absent_over_time(scalar(multi-timeseries))`, func(t *testing.T) { t.Run(`absent_over_time(non-nan)`, func(t *testing.T) {
t.Parallel() t.Parallel()
q := ` q := `
absent_over_time(label_set(scalar(1 or label_set(2, "xx", "foo")), "yy", "foo"))` absent_over_time(time())`
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`absent_over_time(nan)`, func(t *testing.T) {
t.Parallel()
q := `
absent_over_time((time() < 1500)[300s:])`
r := netstorage.Result{ r := netstorage.Result{
MetricName: metricNameExpected, MetricName: metricNameExpected,
Values: []float64{1, 1, 1, 1, 1, 1}, Values: []float64{nan, nan, nan, nan, 1, 1},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`absent_over_time(multi-ts)`, func(t *testing.T) {
t.Parallel()
q := `
absent_over_time((
alias((time() < 1400)[200s:], "one"),
alias((time() > 1600)[200s:], "two"),
))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{nan, nan, nan, 1, nan, nan},
Timestamps: timestampsExpected, Timestamps: timestampsExpected,
} }
r.MetricName.Tags = []storage.Tag{{
Key: []byte("yy"),
Value: []byte("foo"),
}}
resultExpected := []netstorage.Result{r} resultExpected := []netstorage.Result{r}
f(q, resultExpected) f(q, resultExpected)
}) })

View file

@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metricsql" "github.com/VictoriaMetrics/metricsql"
) )
@ -48,14 +49,14 @@ func GetTime(r *http.Request, argKey string, defaultMs int64) (int64, error) {
return maxTimeMsecs, nil return maxTimeMsecs, nil
} }
// Try parsing duration relative to the current time // Try parsing duration relative to the current time
d, err1 := metricsql.DurationValue(argValue, 0) d, err1 := promutils.ParseDuration(argValue)
if err1 != nil { if err1 != nil {
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err) return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
} }
if d > 0 { if d > 0 {
d = -d d = -d
} }
t = time.Now().Add(time.Duration(d) * time.Millisecond) t = time.Now().Add(d)
} }
secs = float64(t.UnixNano()) / 1e9 secs = float64(t.UnixNano()) / 1e9
} }
@ -91,11 +92,11 @@ func GetDuration(r *http.Request, argKey string, defaultValue int64) (int64, err
secs, err := strconv.ParseFloat(argValue, 64) secs, err := strconv.ParseFloat(argValue, 64)
if err != nil { if err != nil {
// Try parsing string format // Try parsing string format
d, err := metricsql.DurationValue(argValue, 0) d, err := promutils.ParseDuration(argValue)
if err != nil { if err != nil {
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err) return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
} }
secs = float64(d) / 1000 secs = d.Seconds()
} }
msecs := int64(secs * 1e3) msecs := int64(secs * 1e3)
if msecs <= 0 || msecs > maxDurationMsecs { if msecs <= 0 || msecs > maxDurationMsecs {

View file

@ -1,12 +1,12 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.098d452b.css", "main.css": "./static/css/main.098d452b.css",
"main.js": "./static/js/main.7750d578.js", "main.js": "./static/js/main.c945b173.js",
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js", "static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.098d452b.css", "static/css/main.098d452b.css",
"static/js/main.7750d578.js" "static/js/main.c945b173.js"
] ]
} }

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.7750d578.js"></script><link href="./static/css/main.098d452b.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.c945b173.js"></script><link href="./static/css/main.098d452b.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,11 +1,3 @@
/*! @preserve
* numeral.js
* version : 2.0.6
* author : Adam Draper
* license : MIT
* http://adamwdraper.github.com/Numeral-js/
*/
/** /**
* A better abstraction over CSS. * A better abstraction over CSS.
* *
@ -14,7 +6,7 @@
* @license MIT * @license MIT
*/ */
/** @license MUI v5.3.0 /** @license MUI v5.4.1
* *
* This source code is licensed under the MIT license found in the * This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree. * LICENSE file in the root directory of this source tree.

View file

@ -467,6 +467,9 @@ func registerStorageMetrics() {
metrics.NewGauge(`vm_new_timeseries_created_total`, func() float64 { metrics.NewGauge(`vm_new_timeseries_created_total`, func() float64 {
return float64(idbm().NewTimeseriesCreated) return float64(idbm().NewTimeseriesCreated)
}) })
metrics.NewGauge(`vm_timeseries_repopulated_total`, func() float64 {
return float64(idbm().TimeseriesRepopulated)
})
metrics.NewGauge(`vm_missing_tsids_for_metric_id_total`, func() float64 { metrics.NewGauge(`vm_missing_tsids_for_metric_id_total`, func() float64 {
return float64(idbm().MissingTSIDsForMetricID) return float64(idbm().MissingTSIDsForMetricID)
}) })

File diff suppressed because it is too large Load diff

View file

@ -6,32 +6,30 @@
"dependencies": { "dependencies": {
"@date-io/dayjs": "^2.11.0", "@date-io/dayjs": "^2.11.0",
"@emotion/styled": "^11.6.0", "@emotion/styled": "^11.6.0",
"@mui/icons-material": "^5.3.1", "@mui/icons-material": "^5.4.1",
"@mui/lab": "^5.0.0-alpha.66", "@mui/lab": "^5.0.0-alpha.68",
"@mui/material": "^5.3.1", "@mui/material": "^5.4.1",
"@mui/styles": "^5.3.0", "@mui/styles": "^5.4.1",
"@testing-library/jest-dom": "^5.16.1", "@testing-library/jest-dom": "^5.16.2",
"@testing-library/react": "^12.1.2", "@testing-library/react": "^12.1.2",
"@testing-library/user-event": "^13.5.0", "@testing-library/user-event": "^13.5.0",
"@types/jest": "^27.4.0", "@types/jest": "^27.4.0",
"@types/lodash.debounce": "^4.0.6", "@types/lodash.debounce": "^4.0.6",
"@types/lodash.get": "^4.4.6", "@types/lodash.get": "^4.4.6",
"@types/lodash.throttle": "^4.1.6", "@types/lodash.throttle": "^4.1.6",
"@types/node": "^17.0.13", "@types/node": "^17.0.17",
"@types/numeral": "^2.0.2",
"@types/qs": "^6.9.7", "@types/qs": "^6.9.7",
"@types/react": "^17.0.38", "@types/react": "^17.0.39",
"@types/react-dom": "^17.0.11", "@types/react-dom": "^17.0.11",
"@types/react-measure": "^2.0.8", "@types/react-measure": "^2.0.8",
"dayjs": "^1.10.7", "dayjs": "^1.10.7",
"lodash.debounce": "^4.0.8", "lodash.debounce": "^4.0.8",
"lodash.get": "^4.4.2", "lodash.get": "^4.4.2",
"lodash.throttle": "^4.1.1", "lodash.throttle": "^4.1.1",
"numeral": "^2.0.6",
"preact": "^10.6.5", "preact": "^10.6.5",
"qs": "^6.10.3", "qs": "^6.10.3",
"typescript": "~4.5.5", "typescript": "~4.5.5",
"uplot": "^1.6.18", "uplot": "^1.6.19",
"web-vitals": "^2.1.4" "web-vitals": "^2.1.4"
}, },
"scripts": { "scripts": {
@ -62,8 +60,8 @@
}, },
"devDependencies": { "devDependencies": {
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.7", "@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.7",
"@typescript-eslint/eslint-plugin": "^5.10.1", "@typescript-eslint/eslint-plugin": "^5.11.0",
"@typescript-eslint/parser": "^5.10.1", "@typescript-eslint/parser": "^5.11.0",
"customize-cra": "^1.0.0", "customize-cra": "^1.0.0",
"eslint-plugin-react": "^7.28.0", "eslint-plugin-react": "^7.28.0",
"react-app-rewired": "^2.1.11" "react-app-rewired": "^2.1.11"

View file

@ -13,6 +13,21 @@ export interface GraphViewProps {
data?: MetricResult[]; data?: MetricResult[];
} }
const promValueToNumber = (s: string): number => {
// See https://prometheus.io/docs/prometheus/latest/querying/api/#expression-query-result-formats
switch (s) {
case "NaN":
return NaN;
case "Inf":
case "+Inf":
return Infinity;
case "-Inf":
return -Infinity;
default:
return parseFloat(s);
}
};
const GraphView: FC<GraphViewProps> = ({data = []}) => { const GraphView: FC<GraphViewProps> = ({data = []}) => {
const graphDispatch = useGraphDispatch(); const graphDispatch = useGraphDispatch();
const {time: {period}} = useAppState(); const {time: {period}} = useAppState();
@ -43,19 +58,36 @@ const GraphView: FC<GraphViewProps> = ({data = []}) => {
const seriesItem = getSeriesItem(d, hideSeries); const seriesItem = getSeriesItem(d, hideSeries);
tempSeries.push(seriesItem); tempSeries.push(seriesItem);
tempLegend.push(getLegendItem(seriesItem, d.group)); tempLegend.push(getLegendItem(seriesItem, d.group));
let tmpValues = tempValues[d.group];
d.values.forEach(v => { if (!tmpValues) {
tmpValues = [];
}
for (const v of d.values) {
tempTimes.push(v[0]); tempTimes.push(v[0]);
tempValues[d.group] ? tempValues[d.group].push(+v[1]) : tempValues[d.group] = [+v[1]]; tmpValues.push(promValueToNumber(v[1]));
}); }
tempValues[d.group] = tmpValues;
}); });
const timeSeries = getTimeSeries(tempTimes, currentStep, period); const timeSeries = getTimeSeries(tempTimes, currentStep, period);
setDataChart([timeSeries, ...data.map(d => { setDataChart([timeSeries, ...data.map(d => {
return timeSeries.map(t => { const results = [];
const value = d.values.find(v => v[0] === t); const values = d.values;
return value ? +value[1] : null; let j = 0;
}); for (const t of timeSeries) {
while (j < values.length && values[j][0] < t) j++;
let v = null;
if (j < values.length && values[j][0] == t) {
v = promValueToNumber(values[j][1]);
if (!Number.isFinite(v)) {
// Treat special values as nulls in order to satisfy uPlot.
// Otherwise it may draw unexpected graphs.
v = null;
}
}
results.push(v);
}
return results;
})] as uPlotData); })] as uPlotData);
setLimitsYaxis(tempValues); setLimitsYaxis(tempValues);

View file

@ -48,7 +48,7 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = []}) => {
}; };
const onReadyChart = (u: uPlot) => { const onReadyChart = (u: uPlot) => {
const factor = 0.85; const factor = 0.9;
tooltipOffset.left = parseFloat(u.over.style.left); tooltipOffset.left = parseFloat(u.over.style.left);
tooltipOffset.top = parseFloat(u.over.style.top); tooltipOffset.top = parseFloat(u.over.style.top);
u.root.querySelector(".u-wrap")?.appendChild(tooltip); u.root.querySelector(".u-wrap")?.appendChild(tooltip);

View file

@ -1,17 +1,23 @@
export const getMaxFromArray = (arr: number[]): number => { export const getMaxFromArray = (a: number[]) => {
let len = arr.length; let len = a.length;
let max = -Infinity; let max = -Infinity;
while (len--) { while (len--) {
if (arr[len] > max) max = arr[len]; const v = a[len];
if (Number.isFinite(v) && v > max) {
max = v;
}
} }
return max; return Number.isFinite(max) ? max : null;
}; };
export const getMinFromArray = (arr: number[]): number => { export const getMinFromArray = (a: number[]) => {
let len = arr.length; let len = a.length;
let min = Infinity; let min = Infinity;
while (len--) { while (len--) {
if (arr[len] < min) min = arr[len]; const v = a[len];
if (Number.isFinite(v) && v < min) {
min = v;
}
} }
return min; return Number.isFinite(min) ? min : null;
}; };

View file

@ -2,12 +2,11 @@ import {TimeParams, TimePeriod} from "../types";
import dayjs, {UnitTypeShort} from "dayjs"; import dayjs, {UnitTypeShort} from "dayjs";
import duration from "dayjs/plugin/duration"; import duration from "dayjs/plugin/duration";
import utc from "dayjs/plugin/utc"; import utc from "dayjs/plugin/utc";
import numeral from "numeral";
dayjs.extend(duration); dayjs.extend(duration);
dayjs.extend(utc); dayjs.extend(utc);
const MAX_ITEMS_PER_CHART = window.innerWidth / 2; const MAX_ITEMS_PER_CHART = window.innerWidth / 4;
export const limitsDurations = {min: 1, max: 1.578e+11}; // min: 1 ms, max: 5 years export const limitsDurations = {min: 1, max: 1.578e+11}; // min: 1 ms, max: 5 years
@ -26,7 +25,7 @@ export const supportedDurations = [
const shortDurations = supportedDurations.map(d => d.short); const shortDurations = supportedDurations.map(d => d.short);
export const roundTimeSeconds = (num: number): number => +(numeral(num).format("0.000")); export const roundToMilliseconds = (num: number): number => Math.round(num*1000)/1000;
export const isSupportedDuration = (str: string): Partial<Record<UnitTypeShort, string>> | undefined => { export const isSupportedDuration = (str: string): Partial<Record<UnitTypeShort, string>> | undefined => {
@ -59,7 +58,7 @@ export const getTimeperiodForDuration = (dur: string, date?: Date): TimeParams =
}, {}); }, {});
const delta = dayjs.duration(durObject).asSeconds(); const delta = dayjs.duration(durObject).asSeconds();
const step = roundTimeSeconds(delta / MAX_ITEMS_PER_CHART) || 0.001; const step = roundToMilliseconds(delta / MAX_ITEMS_PER_CHART) || 0.001;
return { return {
start: n - delta, start: n - delta,

View file

@ -1,6 +1,6 @@
import {Axis, Series} from "uplot"; import {Axis, Series} from "uplot";
import {getMaxFromArray, getMinFromArray} from "../math"; import {getMaxFromArray, getMinFromArray} from "../math";
import {roundTimeSeconds} from "../time"; import {roundToMilliseconds} from "../time";
import {AxisRange} from "../../state/graph/reducer"; import {AxisRange} from "../../state/graph/reducer";
import {formatTicks} from "./helpers"; import {formatTicks} from "./helpers";
import {TimeParams} from "../../types"; import {TimeParams} from "../../types";
@ -12,19 +12,37 @@ export const getAxes = (series: Series[]): Axis[] => Array.from(new Set(series.m
return axis; return axis;
}); });
export const getTimeSeries = (times: number[], defaultStep: number, period: TimeParams): number[] => { export const getTimeSeries = (times: number[], step: number, period: TimeParams): number[] => {
const allTimes = Array.from(new Set(times)).sort((a, b) => a - b); const allTimes = Array.from(new Set(times)).sort((a, b) => a - b);
const length = Math.ceil((period.end - period.start)/defaultStep); let t = period.start;
const startTime = allTimes[0] || 0; const tEnd = roundToMilliseconds(period.end + step);
return new Array(length*2).fill(startTime).map((d, i) => roundTimeSeconds(d + (defaultStep * i))); let j = 0;
const results: number[] = [];
while (t <= tEnd) {
while (j < allTimes.length && allTimes[j] <= t) {
t = allTimes[j];
j++;
results.push(t);
}
t = roundToMilliseconds(t + step);
if (j >= allTimes.length || allTimes[j] > t) {
results.push(t);
}
}
while (results.length < 2) {
results.push(t);
t = roundToMilliseconds(t + step);
}
return results;
}; };
export const getMinMaxBuffer = (min: number, max: number): [number, number] => { export const getMinMaxBuffer = (min: number | null, max: number | null): [number, number] => {
const minCorrect = isNaN(min) ? -1 : min; if (min == null || max == null) {
const maxCorrect = isNaN(max) ? 1 : max; return [-1, 1];
const valueRange = Math.abs(maxCorrect - minCorrect) || Math.abs(minCorrect) || 1; }
const valueRange = Math.abs(max - min) || Math.abs(min) || 1;
const padding = 0.02*valueRange; const padding = 0.02*valueRange;
return [minCorrect - padding, maxCorrect + padding]; return [min - padding, max + padding];
}; };
export const getLimitsYAxis = (values: { [key: string]: number[] }): AxisRange => { export const getLimitsYAxis = (values: { [key: string]: number[] }): AxisRange => {

View file

@ -1,5 +1,4 @@
import uPlot from "uplot"; import uPlot from "uplot";
import numeral from "numeral";
import {getColorFromString} from "../color"; import {getColorFromString} from "../color";
export const defaultOptions = { export const defaultOptions = {
@ -29,8 +28,14 @@ export const defaultOptions = {
}, },
}; };
export const formatTicks = (u: uPlot, ticks: number[]): (string | number)[] => { export const formatTicks = (u: uPlot, ticks: number[]): string[] => {
return ticks.map(n => n > 1000 ? numeral(n).format("0.0a") : n); return ticks.map(v => {
const n = Math.abs(v);
if (n > 1e-3 && n < 1e4) {
return v.toString();
}
return v.toExponential(1);
});
}; };
export const getColorLine = (scale: number, label: string): string => getColorFromString(`${scale}${label}`); export const getColorLine = (scale: number, label: string): string => getColorFromString(`${scale}${label}`);

View file

@ -4,8 +4,8 @@ DOCKER_NAMESPACE := victoriametrics
ROOT_IMAGE ?= alpine:3.15.0 ROOT_IMAGE ?= alpine:3.15.0
CERTS_IMAGE := alpine:3.15.0 CERTS_IMAGE := alpine:3.15.0
GO_BUILDER_IMAGE := golang:1.17.6-alpine GO_BUILDER_IMAGE := golang:1.17.7-alpine
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __) BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __) BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
package-base: package-base:
@ -94,6 +94,22 @@ app-via-docker-goarch:
DOCKER_OPTS='--env CGO_ENABLED=$(CGO_ENABLED) --env GOOS=linux --env GOARCH=$(GOARCH)' \ DOCKER_OPTS='--env CGO_ENABLED=$(CGO_ENABLED) --env GOOS=linux --env GOARCH=$(GOARCH)' \
$(MAKE) app-via-docker $(MAKE) app-via-docker
app-via-docker-darwin-amd64:
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-via-docker-goos-goarch
app-via-docker-darwin-arm64:
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-via-docker-goos-goarch
app-via-docker-goos-goarch:
APP_SUFFIX='-$(GOOS)-$(GOARCH)' \
DOCKER_OPTS='--env CGO_ENABLED=$(CGO_ENABLED) --env GOOS=$(GOOS) --env GOARCH=$(GOARCH)' \
$(MAKE) app-via-docker
app-via-docker-goarch-arm64:
APP_SUFFIX='-arm64' \
DOCKER_OPTS='--env CGO_ENABLED=1 --env GOOS=linux --env GOARCH=arm64 --env CC=/opt/cross-builder/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc' \
$(MAKE) app-via-docker
app-via-docker-windows-goarch: app-via-docker-windows-goarch:
APP_SUFFIX='-$(GOARCH)' \ APP_SUFFIX='-$(GOARCH)' \
DOCKER_OPTS='--env CGO_ENABLED=0 --env GOOS=windows --env GOARCH=$(GOARCH)' \ DOCKER_OPTS='--env CGO_ENABLED=0 --env GOOS=windows --env GOARCH=$(GOARCH)' \
@ -115,7 +131,11 @@ app-via-docker-arm:
GOARCH=arm $(MAKE) app-via-docker-goarch-nocgo GOARCH=arm $(MAKE) app-via-docker-goarch-nocgo
app-via-docker-arm64: app-via-docker-arm64:
ifeq ($(APP_NAME),vmagent)
GOARCH=arm64 $(MAKE) app-via-docker-goarch-nocgo GOARCH=arm64 $(MAKE) app-via-docker-goarch-nocgo
else
$(MAKE) app-via-docker-goarch-arm64
endif
app-via-docker-ppc64le: app-via-docker-ppc64le:
GOARCH=ppc64le $(MAKE) app-via-docker-goarch-nocgo GOARCH=ppc64le $(MAKE) app-via-docker-goarch-nocgo
@ -131,6 +151,11 @@ package-via-docker-goarch:
DOCKER_OPTS='--env CGO_ENABLED=$(CGO_ENABLED) --env GOOS=linux --env GOARCH=$(GOARCH)' \ DOCKER_OPTS='--env CGO_ENABLED=$(CGO_ENABLED) --env GOOS=linux --env GOARCH=$(GOARCH)' \
$(MAKE) package-via-docker $(MAKE) package-via-docker
package-via-docker-goarch-arm64:
APP_SUFFIX='-arm64' \
DOCKER_OPTS='--env CGO_ENABLED=1 --env GOOS=linux --env GOARCH=arm64 --env CC=/opt/cross-builder/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc' \
$(MAKE) package-via-docker
package-via-docker-goarch-cgo: package-via-docker-goarch-cgo:
CGO_ENABLED=1 $(MAKE) package-via-docker-goarch CGO_ENABLED=1 $(MAKE) package-via-docker-goarch
@ -147,7 +172,12 @@ package-via-docker-arm:
GOARCH=arm $(MAKE) package-via-docker-goarch-nocgo GOARCH=arm $(MAKE) package-via-docker-goarch-nocgo
package-via-docker-arm64: package-via-docker-arm64:
ifeq ($(APP_NAME),vmagent)
GOARCH=arm64 $(MAKE) package-via-docker-goarch-nocgo GOARCH=arm64 $(MAKE) package-via-docker-goarch-nocgo
else
$(MAKE) package-via-docker-goarch-arm64
endif
package-via-docker-ppc64le: package-via-docker-ppc64le:
GOARCH=ppc64le $(MAKE) package-via-docker-goarch-nocgo GOARCH=ppc64le $(MAKE) package-via-docker-goarch-nocgo
@ -156,4 +186,4 @@ package-via-docker-386:
GOARCH=386 $(MAKE) package-via-docker-goarch-nocgo GOARCH=386 $(MAKE) package-via-docker-goarch-nocgo
remove-docker-images: remove-docker-images:
docker image ls --format '{{.Repository}}\t{{.ID}}' | grep $(DOCKER_NAMESPACE)/ | grep -v /builder | awk '{print $$2}' | xargs docker image rm -f docker image ls --format '{{.Repository}}\t{{.ID}}' | grep $(DOCKER_NAMESPACE)/ | awk '{print $$2}' | xargs docker image rm -f

View file

@ -1,4 +1,9 @@
ARG go_builder_image ARG go_builder_image
FROM $go_builder_image FROM $go_builder_image
STOPSIGNAL SIGINT STOPSIGNAL SIGINT
RUN apk add gcc musl-dev make --no-cache RUN apk add gcc musl-dev make wget --no-cache && \
mkdir /opt/cross-builder && \
wget https://musl.cc/aarch64-linux-musl-cross.tgz -O /opt/cross-builder/aarch64-musl.tgz && \
cd /opt/cross-builder && \
tar zxf aarch64-musl.tgz -C ./ && \
rm /opt/cross-builder/aarch64-musl.tgz

View file

@ -39,7 +39,7 @@ services:
restart: always restart: always
grafana: grafana:
container_name: grafana container_name: grafana
image: grafana/grafana:8.3.4 image: grafana/grafana:8.3.5
depends_on: depends_on:
- "victoriametrics" - "victoriametrics"
ports: ports:

View file

@ -8,6 +8,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
## Third-party articles and slides about VictoriaMetrics ## Third-party articles and slides about VictoriaMetrics
* [Announcing Asserts](https://www.asserts.ai/blog/announcing-asserts/)
* [Choosing a Time Series Database for High Cardinality Aggregations](https://abiosgaming.com/press/high-cardinality-aggregations/) * [Choosing a Time Series Database for High Cardinality Aggregations](https://abiosgaming.com/press/high-cardinality-aggregations/)
* [Scaling to trillions of metric data points](https://engineering.razorpay.com/scaling-to-trillions-of-metric-data-points-f569a5b654f2) * [Scaling to trillions of metric data points](https://engineering.razorpay.com/scaling-to-trillions-of-metric-data-points-f569a5b654f2)
* [VictoriaMetrics vs. OpenTSDB](https://blg.robot-house.us/posts/tsdbs-grow/) * [VictoriaMetrics vs. OpenTSDB](https://blg.robot-house.us/posts/tsdbs-grow/)

View file

@ -4,8 +4,18 @@ sort: 15
# CHANGELOG # CHANGELOG
The following tip changes can be tested by building VictoriaMetrics components from the latest commits according to the following docs:
* [How to build single-node VictoriaMetrics](https://docs.victoriametrics.com/#how-to-build-from-sources)
* [How to build cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#building-from-sources)
* [How to build vmagent](https://docs.victoriametrics.com/vmagent.html#how-to-build-from-sources)
* [How to build vmalert](https://docs.victoriametrics.com/vmalert.html#how-to-build-from-sources)
* [How to build vmauth](https://docs.victoriametrics.com/vmauth.html#how-to-build-from-sources)
* [How to build vmctl](https://docs.victoriametrics.com/vmctl.html#how-to-build)
## tip ## tip
* FEATURE: publish VictoriaMetrics binaries for MacOS amd64 and MacOS arm64 (aka MacBook M1) at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1896) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1851).
* FEATURE: reduce CPU and disk IO usage during `indexdb` rotation once per `-retentionPeriod`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401).
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): add `-dropSamplesOnOverload` command-line flag for `vminsert`. If this flag is set, then `vminsert` drops incoming data if the destination `vmstorage` is temporarily unavailable or cannot keep up with the ingestion rate. The number of dropped rows can be [monitored](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring) via `vm_rpc_rows_dropped_on_overload_total` metric at `vminsert`. * FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): add `-dropSamplesOnOverload` command-line flag for `vminsert`. If this flag is set, then `vminsert` drops incoming data if the destination `vmstorage` is temporarily unavailable or cannot keep up with the ingestion rate. The number of dropped rows can be [monitored](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring) via `vm_rpc_rows_dropped_on_overload_total` metric at `vminsert`.
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve re-routing logic, so it re-routes incoming data more evenly if some of `vmstorage` nodes are temporarily unavailable and/or accept data at slower rate than other `vmstorage` nodes. Also significantly reduce possible re-routing storm when `vminsert` runs with `-disableRerouting=false` command-line flag. This should help the following issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1337), [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1165), [three](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1054), [four](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/791), [five](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1544). * FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve re-routing logic, so it re-routes incoming data more evenly if some of `vmstorage` nodes are temporarily unavailable and/or accept data at slower rate than other `vmstorage` nodes. Also significantly reduce possible re-routing storm when `vminsert` runs with `-disableRerouting=false` command-line flag. This should help the following issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1337), [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1165), [three](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1054), [four](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/791), [five](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1544).
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): cover more cases with the [label filters' propagation optimization](https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization). This should improve the average performance for practical queries. The following cases are additionally covered: * FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): cover more cases with the [label filters' propagation optimization](https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization). This should improve the average performance for practical queries. The following cases are additionally covered:
@ -14,19 +24,30 @@ sort: 15
* Multi-level binary operations. For example, `foo{a="b"} + bar{x="y"} + baz{z="q"}` is now optimized to `foo{a="b",x="y",z="q"} + bar{a="b",x="y",z="q"} + baz{a="b",x="y",z="q"}` * Multi-level binary operations. For example, `foo{a="b"} + bar{x="y"} + baz{z="q"}` is now optimized to `foo{a="b",x="y",z="q"} + bar{a="b",x="y",z="q"} + baz{a="b",x="y",z="q"}`
* Aggregate functions. For example, `sum(foo{a="b"}) by (c) + bar{c="d"}` is now optimized to `sum(foo{a="b",c="d"}) by (c) + bar{c="d"}` * Aggregate functions. For example, `sum(foo{a="b"}) by (c) + bar{c="d"}` is now optimized to `sum(foo{a="b",c="d"}) by (c) + bar{c="d"}`
* FEATURE [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): optimize joining with `*_info` labels. For example: `kube_pod_created{namespace="prod"} * on (uid) group_left(node) kube_pod_info` now automatically adds the needed filters on `uid` label to `kube_pod_info` before selecting series for the right side of `*` operation. This may save CPU, RAM and disk IO resources. See [this article](https://www.robustperception.io/exposing-the-software-version-to-prometheus) for details on `*_info` labels. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1827). * FEATURE [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): optimize joining with `*_info` labels. For example: `kube_pod_created{namespace="prod"} * on (uid) group_left(node) kube_pod_info` now automatically adds the needed filters on `uid` label to `kube_pod_info` before selecting series for the right side of `*` operation. This may save CPU, RAM and disk IO resources. See [this article](https://www.robustperception.io/exposing-the-software-version-to-prometheus) for details on `*_info` labels. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1827).
* FEATURE: all: improve performance for arm64 builds of VictoriaMetrics components by up to 15%. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2102).
* FEATURE: all: expose `process_cpu_cores_available` metric, which shows the number of CPU cores available to the app. The number can be fractional if the corresponding cgroup limit is set to a fractional value. This metric is useful for alerting on CPU saturation. For example, the following query alerts when the app uses more than 90% of CPU during the last 5 minutes: `rate(process_cpu_seconds_total[5m]) / process_cpu_cores_available > 0.9` . See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2107). * FEATURE: all: expose `process_cpu_cores_available` metric, which shows the number of CPU cores available to the app. The number can be fractional if the corresponding cgroup limit is set to a fractional value. This metric is useful for alerting on CPU saturation. For example, the following query alerts when the app uses more than 90% of CPU during the last 5 minutes: `rate(process_cpu_seconds_total[5m]) / process_cpu_cores_available > 0.9` . See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2107).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to configure notifiers (e.g. alertmanager) via a file in the way similar to Prometheus. See [these docs](https://docs.victoriametrics.com/vmalert.html#notifier-configuration-file), [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2127). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to configure notifiers (e.g. alertmanager) via a file in the way similar to Prometheus. See [these docs](https://docs.victoriametrics.com/vmalert.html#notifier-configuration-file), [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2127).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for Consul service discovery for notifiers. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1947). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for Consul service discovery for notifiers. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1947).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for specifying Basic Auth password for notifiers via a file. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1567). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for specifying Basic Auth password for notifiers via a file. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1567).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): provide the ability to fetch target responses on behalf of `vmagent` by clicking the `response` link for the needed target at `/targets` page. This feature may be useful for debugging responses from targets located in isolated environments. * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): provide the ability to fetch target responses on behalf of `vmagent` by clicking the `response` link for the needed target at `/targets` page. This feature may be useful for debugging responses from targets located in isolated environments.
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): show the total number of scrapes and the total number of scrape errors per target at `/targets` page. This information may be useful when debugging unreliable scrape targets. * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): show the total number of scrapes and the total number of scrape errors per target at `/targets` page. This information may be useful when debugging unreliable scrape targets.
* FEATURE: vmagent and single-node VictoriaMetrics: disallow unknown fields at `-promscrape.config` file. Previously unknown fields were allowed. This could lead to long-living silent config errors. The previous behaviour can be returned by passing `-promscrape.config.strictParse=false` command-line flag.
* FEATURE: vmagent: add `__meta_kubernetes_endpointslice_label*` and `__meta_kubernetes_endpointslice_annotation*` labels for `role: endpointslice` targets in [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) to be consistent with other `role` values. See [this issue](https://github.com/prometheus/prometheus/issues/10284).
* FEATURE: vmagent: add `collapse all` and `expand all` buttons to `http://vmagent:8429/targets` page. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2021).
* FEATURE: vmagent: support Prometheus-like durations in `-promscrape.config`. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/817#issuecomment-1033384766).
* FEATURE: automatically re-read `-tlsCertFile` and `-tlsKeyFile` files, so their contents can be updated without the need to restart VictoriaMetrics apps. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2171).
* BUGFIX: calculate [absent_over_time()](https://docs.victoriametrics.com/MetricsQL.html#absent_over_time) in the same way as Prometheus does. Previously it could return multiple time series instead of at most one time series like Prometheus does. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2130).
* BUGFIX: return proper results from `highestMax()` function at [Graphite render API](https://docs.victoriametrics.com/#graphite-render-api-usage). Previously it was incorrectly returning timeseries with min peaks instead of max peaks. * BUGFIX: return proper results from `highestMax()` function at [Graphite render API](https://docs.victoriametrics.com/#graphite-render-api-usage). Previously it was incorrectly returning timeseries with min peaks instead of max peaks.
* BUGFIX: properly limit indexdb cache sizes. Previously they could exceed values set via `-memory.allowedPercent` and/or `-memory.allowedBytes` when `indexdb` contained many data parts. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2007). * BUGFIX: properly limit indexdb cache sizes. Previously they could exceed values set via `-memory.allowedPercent` and/or `-memory.allowedBytes` when `indexdb` contained many data parts. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2007).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a bug, which could break time range picker when editing `From` or `To` input fields. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2080). * BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a bug, which could break time range picker when editing `From` or `To` input fields. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2080).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a bug, which could break switching between `graph`, `json` and `table` views. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2084). * BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a bug, which could break switching between `graph`, `json` and `table` views. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2084).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix possible UI freeze after querying `node_uname_info` time series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2115). * BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix possible UI freeze after querying `node_uname_info` time series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2115).
* BUGFIX: show the original location of the warning or error message when logging throttled messages. Previously the location inside `lib/logger/throttler.go` was shown. This could increase the complexity of debugging. * BUGFIX: show the original location of the warning or error message when logging throttled messages. Previously the location inside `lib/logger/throttler.go` was shown. This could increase the complexity of debugging.
* BUGFIX: vmalert: fix links at web UI. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2167).
* BUGFIX: vmagent: properly discover pods without exposed ports for the given service for `role: endpoints` and `role: endpointslice` in [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2134).
* BUGFIX: vmagent: properly display `zone` contents for `gce_sd_configs` section at `http://vmagent:8429/config` page. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2179). Thanks to @artifactori for the bugfix.
* BUGFIX: vmagent: properly handle `all_tenants: true` config option at `openstack_sd_config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2182).
## [v1.72.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.72.0) ## [v1.72.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.72.0)

View file

@ -493,7 +493,7 @@ Below is the output for `/path/to/vminsert -help`:
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/legal/eula/ By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-fs.disableMmap -fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-graphiteListenAddr string -graphiteListenAddr string
@ -616,7 +616,7 @@ Below is the output for `/path/to/vmselect -help`:
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/legal/eula/ By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-fs.disableMmap -fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-graphiteTrimTimestamp duration -graphiteTrimTimestamp duration
@ -744,7 +744,7 @@ Below is the output for `/path/to/vmstorage -help`:
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/legal/eula/ By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-finalMergeDelay duration -finalMergeDelay duration
The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge
-forceFlushAuthKey string -forceFlushAuthKey string

View file

@ -1129,9 +1129,9 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
## Deduplication ## Deduplication
VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2112#issuecomment-1032587618) for more details on how downsampling works.
is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points
on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details. The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
@ -1140,15 +1140,49 @@ write data to the same VictoriaMetrics instance. These vmagent or Prometheus ins
`external_labels` section in their configs, so they write data to the same time series. `external_labels` section in their configs, so they write data to the same time series.
## Storage
VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like
data structures. On insert, VictoriaMetrics accumulates up to 1s of data and dumps it on disk to
`<-storageDataPath>/data/small/YYYY_MM/` subdirectory forming a `part` with the following
name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each part consists of two "columns":
values and timestamps. These are sorted and compressed raw time series values. Additionally, part contains
index files for searching for specific series in the values and timestamps files.
`Parts` are periodically merged into the bigger parts. The resulting `part` is constructed
under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` subdirectory. When the resulting `part` is complete, it is atomically moved from the `tmp`
to its own subdirectory, while the source parts are atomically removed. The end result is that the source
parts are substituted by a single resulting bigger `part` in the `<-storageDataPath>/data/{small,big}/YYYY_MM/` directory.
Information about merging process is available in [single-node VictoriaMetrics](https://grafana.com/dashboards/10229)
and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176) Grafana dashboards.
See more details in [monitoring docs](#monitoring).
The `merge` process is usually named "compaction", because the resulting `part` size is usually smaller than
the sum of the source `parts`. There are following benefits of doing the merge process:
* it improves query performance, since lower number of `parts` are inspected with each query;
* it reduces the number of data files, since each `part`contains fixed number of files;
* better compression rate for the resulting part.
Newly added `parts` either appear in the storage or fail to appear.
Storage never contains partially created parts. The same applies to merge process — `parts` are either fully
merged into a new `part` or fail to merge. There are no partially merged `parts` in MergeTree.
`Part` contents in MergeTree never change. Parts are immutable. They may be only deleted after the merge
to a bigger `part` or when the `part` contents goes outside the configured `-retentionPeriod`.
See [this article](https://valyala.medium.com/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) for more details.
See also [how to work with snapshots](#how-to-work-with-snapshots).
## Retention ## Retention
Retention is configured with `-retentionPeriod` command-line flag. For instance, `-retentionPeriod=3` means Retention is configured with `-retentionPeriod` command-line flag. For instance, `-retentionPeriod=3` means
that the data will be stored for 3 months and then deleted. that the data will be stored for 3 months and then deleted.
Data is split in per-month partitions inside `<-storageDataPath>/data/small` and `<-storageDataPath>/data/big` folders. Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
Data partitions outside the configured retention are deleted on the first day of new month. Data partitions outside the configured retention are deleted on the first day of new month.
Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`.
Data parts outside of the configured retention are eventually deleted during [background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). Data parts outside of the configured retention are eventually deleted during
[background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
In order to keep data according to `-retentionPeriod` max disk space usage is going to be `-retentionPeriod` + 1 month. In order to keep data according to `-retentionPeriod` max disk space usage is going to be `-retentionPeriod` + 1 month.
For example if `-retentionPeriod` is set to 1, data for January is deleted on March 1st. For example if `-retentionPeriod` is set to 1, data for January is deleted on March 1st.
@ -1596,7 +1630,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-dryRun -dryRun
Whether to check only -promscrape.config and then exit. Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse Whether to check only -promscrape.config and then exit. Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag
-enableTCP6 -enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
-envflag.enable -envflag.enable
@ -1604,7 +1638,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/legal/eula/ By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-finalMergeDelay duration -finalMergeDelay duration
The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge
-forceFlushAuthKey string -forceFlushAuthKey string
@ -1714,7 +1748,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.config.dryRun -promscrape.config.dryRun
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output. Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
-promscrape.config.strictParse -promscrape.config.strictParse
Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields (default true)
-promscrape.configCheckInterval duration -promscrape.configCheckInterval duration
Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-promscrape.consul.waitTime duration -promscrape.consul.waitTime duration

View file

@ -1133,9 +1133,9 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
## Deduplication ## Deduplication
VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2112#issuecomment-1032587618) for more details on how downsampling works.
is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points
on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details. The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
@ -1144,15 +1144,49 @@ write data to the same VictoriaMetrics instance. These vmagent or Prometheus ins
`external_labels` section in their configs, so they write data to the same time series. `external_labels` section in their configs, so they write data to the same time series.
## Storage
VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like
data structures. On insert, VictoriaMetrics accumulates up to 1s of data and dumps it on disk to
`<-storageDataPath>/data/small/YYYY_MM/` subdirectory forming a `part` with the following
name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each part consists of two "columns":
values and timestamps. These are sorted and compressed raw time series values. Additionally, part contains
index files for searching for specific series in the values and timestamps files.
`Parts` are periodically merged into the bigger parts. The resulting `part` is constructed
under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` subdirectory. When the resulting `part` is complete, it is atomically moved from the `tmp`
to its own subdirectory, while the source parts are atomically removed. The end result is that the source
parts are substituted by a single resulting bigger `part` in the `<-storageDataPath>/data/{small,big}/YYYY_MM/` directory.
Information about merging process is available in [single-node VictoriaMetrics](https://grafana.com/dashboards/10229)
and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176) Grafana dashboards.
See more details in [monitoring docs](#monitoring).
The `merge` process is usually named "compaction", because the resulting `part` size is usually smaller than
the sum of the source `parts`. There are following benefits of doing the merge process:
* it improves query performance, since lower number of `parts` are inspected with each query;
* it reduces the number of data files, since each `part`contains fixed number of files;
* better compression rate for the resulting part.
Newly added `parts` either appear in the storage or fail to appear.
Storage never contains partially created parts. The same applies to merge process — `parts` are either fully
merged into a new `part` or fail to merge. There are no partially merged `parts` in MergeTree.
`Part` contents in MergeTree never change. Parts are immutable. They may be only deleted after the merge
to a bigger `part` or when the `part` contents goes outside the configured `-retentionPeriod`.
See [this article](https://valyala.medium.com/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) for more details.
See also [how to work with snapshots](#how-to-work-with-snapshots).
## Retention ## Retention
Retention is configured with `-retentionPeriod` command-line flag. For instance, `-retentionPeriod=3` means Retention is configured with `-retentionPeriod` command-line flag. For instance, `-retentionPeriod=3` means
that the data will be stored for 3 months and then deleted. that the data will be stored for 3 months and then deleted.
Data is split in per-month partitions inside `<-storageDataPath>/data/small` and `<-storageDataPath>/data/big` folders. Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
Data partitions outside the configured retention are deleted on the first day of new month. Data partitions outside the configured retention are deleted on the first day of new month.
Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`.
Data parts outside of the configured retention are eventually deleted during [background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). Data parts outside of the configured retention are eventually deleted during
[background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
In order to keep data according to `-retentionPeriod` max disk space usage is going to be `-retentionPeriod` + 1 month. In order to keep data according to `-retentionPeriod` max disk space usage is going to be `-retentionPeriod` + 1 month.
For example if `-retentionPeriod` is set to 1, data for January is deleted on March 1st. For example if `-retentionPeriod` is set to 1, data for January is deleted on March 1st.
@ -1600,7 +1634,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-dryRun -dryRun
Whether to check only -promscrape.config and then exit. Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse Whether to check only -promscrape.config and then exit. Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag
-enableTCP6 -enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
-envflag.enable -envflag.enable
@ -1608,7 +1642,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula -eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/legal/eula/ By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-finalMergeDelay duration -finalMergeDelay duration
The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge
-forceFlushAuthKey string -forceFlushAuthKey string
@ -1718,7 +1752,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.config.dryRun -promscrape.config.dryRun
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output. Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
-promscrape.config.strictParse -promscrape.config.strictParse
Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields (default true)
-promscrape.configCheckInterval duration -promscrape.configCheckInterval duration
Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-promscrape.consul.waitTime duration -promscrape.consul.waitTime duration

View file

@ -16,7 +16,7 @@ or any other Prometheus-compatible storage systems that support the `remote_writ
While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast
and RAM friendly to scrape metrics from Prometheus-compatible exporters into VictoriaMetrics. and RAM friendly to scrape metrics from Prometheus-compatible exporters into VictoriaMetrics.
Also, we found that our user's infrastructure are like snowflakes in that no two are alike. Therefore we decided to add more flexibility Also, we found that our user's infrastructure are like snowflakes in that no two are alike. Therefore we decided to add more flexibility
to `vmagent` such as the ability to push metrics instead of pulling them. We did our best and will continue to improve vmagent. to `vmagent` such as the ability to push metrics additionally to pulling them. We did our best and will continue to improve `vmagent`.
## Features ## Features
@ -50,7 +50,7 @@ to `vmagent` such as the ability to push metrics instead of pulling them. We did
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets: and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets:
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url. * `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url. `vmagent` doesn't support some sections of Prometheus config file, so you may need either to delete these sections or to run `vmagent` with `-promscrape.config.strictParse=false` additional command-line flag, so `vmagent` will ignore unsupported sections. See [the list of unsupported sections](#unsupported-prometheus-config-sections).
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems. * `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
Example command line: Example command line:
@ -241,6 +241,19 @@ Every referred file can contain arbitrary number of [supported scrape configs](#
`vmagent` dynamically reloads these files on `SIGHUP` signal or on the request to `http://vmagent:8429/-/reload`. `vmagent` dynamically reloads these files on `SIGHUP` signal or on the request to `http://vmagent:8429/-/reload`.
## Unsupported Prometheus config sections
`vmagent` doesn't support the following sections in Prometheus config file passed to `-promscrape.config` command-line flag:
* [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This section is substituted with various `-remoteWrite*` command-line flags. See [the full list of flags](#advanced-usage). The `remote_write` section isn't supported in order to reduce possible confusion when `vmagent` is used for accepting incoming metrics via push protocols such as InfluxDB, Graphite, OpenTSDB, DataDog, etc. In this case the `-promscrape.config` file isn't needed. See [these docs](#features) for details.
* `remote_read`. This section isn't supported at all.
* `rule_files` and `alerting`. These sections are supported by [vmalert](https://docs.victoriametrics.com/vmalert.html).
The list of supported service discovery types is available [here](#how-to-collect-metrics-in-prometheus-format).
Additionally `vmagent` doesn't support `refresh_interval` option at service discovery sections. This option is substituted with `-promscrape.*CheckInterval` command-line options, which are specific per each service discovery type. See [the full list of command-line flags for vmagent](#advanced-usage).
## Adding labels to metrics ## Adding labels to metrics
Labels can be added to metrics by the following mechanisms: Labels can be added to metrics by the following mechanisms:
@ -712,13 +725,15 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
The maximum size in bytes of a single DataDog POST request to /api/v1/series The maximum size in bytes of a single DataDog POST request to /api/v1/series
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864) Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
-dryRun -dryRun
Whether to check only config files without running vmagent. The following files are checked: -promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse Whether to check only config files without running vmagent. The following files are checked: -promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . Unknown config entries aren't allowed in -promscrape.config by default. This can be changed by passing -promscrape.config.strictParse=false command-line flag
-enableTCP6 -enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
-envflag.enable -envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-fs.disableMmap -fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-graphiteListenAddr string -graphiteListenAddr string
@ -764,6 +779,32 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-insert.maxQueueDuration duration -insert.maxQueueDuration duration
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s) The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
-kafka.consumer.topic array
Kafka topic names for data consumption.
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.basicAuth.password array
Optional basic auth password for -kafka.consumer.topic. Must be used in conjunction with any supported auth methods for kafka client, specified by flag -kafka.consumer.topic.options='security.protocol=SASL_SSL;sasl.mechanisms=PLAIN'
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.basicAuth.username array
Optional basic auth username for -kafka.consumer.topic. Must be used in conjunction with any supported auth methods for kafka client, specified by flag -kafka.consumer.topic.options='security.protocol=SASL_SSL;sasl.mechanisms=PLAIN'
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.brokers array
List of brokers to connect for given topic, e.g. -kafka.consumer.topic.broker=host-1:9092;host-2:9092
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.defaultFormat string
Expected data format in the topic if -kafka.consumer.topic.format is skipped. (default "promremotewrite")
-kafka.consumer.topic.format array
data format for corresponding kafka topic. Valid formats: influx, prometheus, promremotewrite, graphite, jsonline
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.groupID array
Defines group.id for topic
Supports an array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.isGzipped array
Enables gzip setting for topic messages payload. Only prometheus, jsonline and influx formats accept gzipped messages.
Supports array of values separated by comma or specified via multiple flags.
-kafka.consumer.topic.options array
Optional key=value;key1=value2 settings for topic consumer. See full configuration options at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md.
Supports an array of values separated by comma or specified via multiple flags.
-loggerDisableTimestamps -loggerDisableTimestamps
Whether to disable writing timestamps in logs Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int -loggerErrorsPerSecondLimit int
@ -814,7 +855,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-promscrape.config.dryRun -promscrape.config.dryRun
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output. Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
-promscrape.config.strictParse -promscrape.config.strictParse
Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields (default true)
-promscrape.configCheckInterval duration -promscrape.configCheckInterval duration
Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-promscrape.consul.waitTime duration -promscrape.consul.waitTime duration

View file

@ -498,6 +498,10 @@ command-line flags with their descriptions.
The shortlist of configuration flags is the following: The shortlist of configuration flags is the following:
``` ```
-clusterMode
If clusterMode is enabled, then vmalert automatically adds the tenant specified in config groups to -datasource.url, -remoteWrite.url and -remoteRead.url. See https://docs.victoriametrics.com/vmalert.html#multitenancy
-configCheckInterval duration
Interval for checking for changes in '-rule' or '-notifier.config' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes.
-datasource.appendTypePrefix -datasource.appendTypePrefix
Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL. Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.
-datasource.basicAuth.password string -datasource.basicAuth.password string
@ -530,8 +534,12 @@ The shortlist of configuration flags is the following:
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
-datasource.url string -datasource.url string
VictoriaMetrics or vmselect url. Required parameter. E.g. http://127.0.0.1:8428 VictoriaMetrics or vmselect url. Required parameter. E.g. http://127.0.0.1:8428
-defaultTenant.graphite string
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy
-defaultTenant.prometheus string
Default tenant for Prometheus alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy
-disableAlertgroupLabel -disableAlertgroupLabel
Whether to disable adding group's name as label to generated alerts and time series. Whether to disable adding group's Name as label to generated alerts and time series.
-dryRun -rule -dryRun -rule
Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified. Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.
-enableTCP6 -enableTCP6
@ -540,13 +548,15 @@ The shortlist of configuration flags is the following:
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-evaluationInterval duration -evaluationInterval duration
How often to evaluate the rules (default 1m0s) How often to evaluate the rules (default 1m0s)
-external.alert.source string -external.alert.source string
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used
-external.label array -external.label array
Optional label in the form 'name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets. Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-external.url string -external.url string
External URL is used as alert's source for sent alerts to the notifier External URL is used as alert's source for sent alerts to the notifier
@ -595,11 +605,15 @@ The shortlist of configuration flags is the following:
Optional basic auth password for -notifier.url Optional basic auth password for -notifier.url
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-notifier.basicAuth.passwordFile array -notifier.basicAuth.passwordFile array
Optional path to basic auth password file for -notifier.url Optional path to basic auth password file for -notifier.url
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-notifier.basicAuth.username array -notifier.basicAuth.username array
Optional basic auth username for -notifier.url Optional basic auth username for -notifier.url
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-notifier.config string
Path to configuration file for notifiers
-notifier.suppressDuplicateTargetErrors
Whether to suppress 'duplicate target' errors during discovery
-notifier.tlsCAFile array -notifier.tlsCAFile array
Optional path to TLS CA file to use for verifying connections to -notifier.url. By default system CA is used Optional path to TLS CA file to use for verifying connections to -notifier.url. By default system CA is used
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
@ -620,6 +634,14 @@ The shortlist of configuration flags is the following:
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-pprofAuthKey string -pprofAuthKey string
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
-promscrape.consul.waitTime duration
Wait time used by Consul service discovery. Default value is used if not set
-promscrape.consulSDCheckInterval duration
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
-promscrape.discovery.concurrency int
The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100)
-promscrape.discovery.concurrentWaitTime duration
The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s)
-remoteRead.basicAuth.password string -remoteRead.basicAuth.password string
Optional basic auth password for -remoteRead.url Optional basic auth password for -remoteRead.url
-remoteRead.basicAuth.passwordFile string -remoteRead.basicAuth.passwordFile string
@ -699,8 +721,8 @@ The shortlist of configuration flags is the following:
absolute path to all .yaml files in root. absolute path to all .yaml files in root.
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars. Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
Supports an array of values separated by comma or specified via multiple flags. Supports an array of values separated by comma or specified via multiple flags.
-configCheckInterval duration -rule.configCheckInterval duration
Interval for checking for changes in '-rule' or '-notifier.config' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead
-rule.maxResolveDuration duration -rule.maxResolveDuration duration
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group. Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
-rule.validateExpressions -rule.validateExpressions
@ -713,14 +735,6 @@ The shortlist of configuration flags is the following:
Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower
-tlsKeyFile string -tlsKeyFile string
Path to file with TLS key. Used only if -tls is set Path to file with TLS key. Used only if -tls is set
-promscrape.consul.waitTime duration
Wait time used by Consul service discovery. Default value is used if not set
-promscrape.consulSDCheckInterval duration
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
-promscrape.discovery.concurrency int
The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100)
-promscrape.discovery.concurrentWaitTime duration
The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s)
-version -version
Show VictoriaMetrics version Show VictoriaMetrics version
``` ```

View file

@ -227,6 +227,8 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string -envflag.prefix string
Prefix for environment variables if -envflag.enable is set Prefix for environment variables if -envflag.enable is set
-eula
By specifying this flag, you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
-fs.disableMmap -fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-http.connTimeout duration -http.connTimeout duration

10
go.mod
View file

@ -4,14 +4,14 @@ go 1.17
require ( require (
cloud.google.com/go/storage v1.20.0 cloud.google.com/go/storage v1.20.0
github.com/VictoriaMetrics/fastcache v1.8.0 github.com/VictoriaMetrics/fastcache v1.9.0
// Do not use the original github.com/valyala/fasthttp because of issues // Do not use the original github.com/valyala/fasthttp because of issues
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.1.0 github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.18.1 github.com/VictoriaMetrics/metrics v1.18.1
github.com/VictoriaMetrics/metricsql v0.40.0 github.com/VictoriaMetrics/metricsql v0.40.0
github.com/aws/aws-sdk-go v1.42.47 github.com/aws/aws-sdk-go v1.42.52
github.com/cespare/xxhash/v2 v2.1.2 github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.0.8 github.com/cheggaaa/pb/v3 v3.0.8
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
@ -33,8 +33,8 @@ require (
github.com/valyala/quicktemplate v1.7.0 github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a golang.org/x/sys v0.0.0-20220209214540-3681064d5158
google.golang.org/api v0.67.0 google.golang.org/api v0.68.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
@ -68,7 +68,7 @@ require (
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e // indirect google.golang.org/genproto v0.0.0-20220211171837-173942840c17 // indirect
google.golang.org/grpc v1.44.0 // indirect google.golang.org/grpc v1.44.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect

21
go.sum
View file

@ -110,8 +110,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VictoriaMetrics/fastcache v1.8.0 h1:ybZqS7kRy8YVzYsI09GLzQhs7iqS6cOEH2avtknD1SU= github.com/VictoriaMetrics/fastcache v1.9.0 h1:oMwsS6c8abz98B7ytAewQ7M1ZN/Im/iwKoE1euaFvhs=
github.com/VictoriaMetrics/fastcache v1.8.0/go.mod h1:n7Sl+ioh/HlWeYHLSIBIE8TcZFHg/+xgvomWSS5xuEE= github.com/VictoriaMetrics/fastcache v1.9.0/go.mod h1:otoTS3xu+6IzF/qByjqzjp3rTuzM3Qf0ScU1UTj97iU=
github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0= github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0=
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ= github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0= github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
@ -162,8 +162,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.42.47 h1:Faabrbp+bOBiZjHje7Hbhvni212aQYQIXZMruwkgmmA= github.com/aws/aws-sdk-go v1.42.52 h1:/+TZ46+0qu9Ph/UwjVrU3SG8OBi87uJLrLiYRNZKbHQ=
github.com/aws/aws-sdk-go v1.42.47/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/aws/aws-sdk-go v1.42.52/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
@ -1299,16 +1299,15 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a h1:ppl5mZgokTT8uPkmYOyEUmPTr3ypaKkg5eFOGrAmxxE=
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1460,8 +1459,8 @@ google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3h
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=
google.golang.org/api v0.67.0 h1:lYaaLa+x3VVUhtosaK9xihwQ9H9KRa557REHwwZ2orM= google.golang.org/api v0.68.0 h1:9eJiHhwJKIYX6sX2fUZxQLi7pDRA/MYu8c12q6WbJik=
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.68.0/go.mod h1:sOM8pTpwgflXRhz+oC8H2Dr+UcbMqkPPWNJo88Q7TH8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1544,10 +1543,10 @@ google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ6
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e h1:hXl9hnyOkeznztYpYxVPAVZfPzcbO6Q0C+nLXodza8k=
google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220211171837-173942840c17 h1:2X+CNIheCutWRyKRte8szGxrE5ggtV4U+NKAbh/oLhg=
google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -4,14 +4,165 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"unsafe"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
xxhash "github.com/cespare/xxhash/v2"
) )
// Cache caches Block entries. // Cache caches Block entries.
// //
// Call NewCache() for creating new Cache. // Call NewCache() for creating new Cache.
type Cache struct { type Cache struct {
shards []*cache
cleanerMustStopCh chan struct{}
cleanerStoppedCh chan struct{}
}
// NewCache creates new cache.
//
// Cache size in bytes is limited by the value returned by getMaxSizeBytes() callback.
// Call MustStop() in order to free up resources occupied by Cache.
func NewCache(getMaxSizeBytes func() int) *Cache {
cpusCount := cgroup.AvailableCPUs()
shardsCount := cgroup.AvailableCPUs()
// Increase the number of shards with the increased number of available CPU cores.
// This should reduce contention on per-shard mutexes.
multiplier := cpusCount
if multiplier > 16 {
multiplier = 16
}
shardsCount *= multiplier
shards := make([]*cache, shardsCount)
getMaxShardBytes := func() int {
n := getMaxSizeBytes()
return n / shardsCount
}
for i := range shards {
shards[i] = newCache(getMaxShardBytes)
}
c := &Cache{
shards: shards,
cleanerMustStopCh: make(chan struct{}),
cleanerStoppedCh: make(chan struct{}),
}
go c.cleaner()
return c
}
// MustStop frees up resources occupied by c.
func (c *Cache) MustStop() {
close(c.cleanerMustStopCh)
<-c.cleanerStoppedCh
}
// RemoveBlocksForPart removes all the blocks for the given part from the cache.
func (c *Cache) RemoveBlocksForPart(p interface{}) {
for _, shard := range c.shards {
shard.RemoveBlocksForPart(p)
}
}
// GetBlock returns a block for the given key k from c.
func (c *Cache) GetBlock(k Key) Block {
idx := uint64(0)
if len(c.shards) > 1 {
h := k.hashUint64()
idx = h % uint64(len(c.shards))
}
shard := c.shards[idx]
return shard.GetBlock(k)
}
// PutBlock puts the given block b under the given key k into c.
func (c *Cache) PutBlock(k Key, b Block) {
idx := uint64(0)
if len(c.shards) > 1 {
h := k.hashUint64()
idx = h % uint64(len(c.shards))
}
shard := c.shards[idx]
shard.PutBlock(k, b)
}
// Len returns the number of blocks in the cache c.
func (c *Cache) Len() int {
n := 0
for _, shard := range c.shards {
n += shard.Len()
}
return n
}
// SizeBytes returns an approximate size in bytes of all the blocks stored in the cache c.
func (c *Cache) SizeBytes() int {
n := 0
for _, shard := range c.shards {
n += shard.SizeBytes()
}
return n
}
// SizeMaxBytes returns the max allowed size in bytes for c.
func (c *Cache) SizeMaxBytes() int {
n := 0
for _, shard := range c.shards {
n += shard.SizeMaxBytes()
}
return n
}
// Requests returns the number of requests served by c.
func (c *Cache) Requests() uint64 {
n := uint64(0)
for _, shard := range c.shards {
n += shard.Requests()
}
return n
}
// Misses returns the number of cache misses for c.
func (c *Cache) Misses() uint64 {
n := uint64(0)
for _, shard := range c.shards {
n += shard.Misses()
}
return n
}
func (c *Cache) cleaner() {
ticker := time.NewTicker(57 * time.Second)
defer ticker.Stop()
perKeyMissesTicker := time.NewTicker(7 * time.Minute)
defer perKeyMissesTicker.Stop()
for {
select {
case <-c.cleanerMustStopCh:
close(c.cleanerStoppedCh)
return
case <-ticker.C:
c.cleanByTimeout()
case <-perKeyMissesTicker.C:
c.cleanPerKeyMisses()
}
}
}
func (c *Cache) cleanByTimeout() {
for _, shard := range c.shards {
shard.cleanByTimeout()
}
}
func (c *Cache) cleanPerKeyMisses() {
for _, shard := range c.shards {
shard.cleanPerKeyMisses()
}
}
type cache struct {
// Atomically updated fields must go first in the struct, so they are properly // Atomically updated fields must go first in the struct, so they are properly
// aligned to 8 bytes on 32-bit architectures. // aligned to 8 bytes on 32-bit architectures.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212 // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
@ -45,6 +196,11 @@ type Key struct {
Offset uint64 Offset uint64
} }
func (k *Key) hashUint64() uint64 {
buf := (*[unsafe.Sizeof(*k)]byte)(unsafe.Pointer(k))
return xxhash.Sum64(buf[:])
}
// Block is an item, which may be cached in the Cache. // Block is an item, which may be cached in the Cache.
type Block interface { type Block interface {
// SizeBytes must return the approximate size of the given block in bytes // SizeBytes must return the approximate size of the given block in bytes
@ -61,72 +217,55 @@ type cacheEntry struct {
block Block block Block
} }
// NewCache creates new cache. func newCache(getMaxSizeBytes func() int) *cache {
// var c cache
// Cache size in bytes is limited by the value returned by getMaxSizeBytes() callback.
func NewCache(getMaxSizeBytes func() int) *Cache {
var c Cache
c.getMaxSizeBytes = getMaxSizeBytes c.getMaxSizeBytes = getMaxSizeBytes
c.m = make(map[interface{}]map[uint64]*cacheEntry) c.m = make(map[interface{}]map[uint64]*cacheEntry)
c.perKeyMisses = make(map[Key]int) c.perKeyMisses = make(map[Key]int)
go c.cleaner()
return &c return &c
} }
// RemoveBlocksForPart removes all the blocks for the given part from the cache. func (c *cache) RemoveBlocksForPart(p interface{}) {
func (c *Cache) RemoveBlocksForPart(p interface{}) {
c.mu.Lock() c.mu.Lock()
sizeBytes := 0 sizeBytes := 0
for _, e := range c.m[p] { for _, e := range c.m[p] {
sizeBytes += e.block.SizeBytes() sizeBytes += e.block.SizeBytes()
// do not delete the entry from c.perKeyMisses, since it is removed by Cache.cleaner later. // do not delete the entry from c.perKeyMisses, since it is removed by cache.cleaner later.
} }
c.updateSizeBytes(-sizeBytes) c.updateSizeBytes(-sizeBytes)
delete(c.m, p) delete(c.m, p)
c.mu.Unlock() c.mu.Unlock()
} }
func (c *Cache) updateSizeBytes(n int) { func (c *cache) updateSizeBytes(n int) {
atomic.AddInt64(&c.sizeBytes, int64(n)) atomic.AddInt64(&c.sizeBytes, int64(n))
} }
// cleaner periodically cleans least recently used entries in c. func (c *cache) cleanPerKeyMisses() {
func (c *Cache) cleaner() {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
perKeyMissesTicker := time.NewTicker(2 * time.Minute)
defer perKeyMissesTicker.Stop()
for {
select {
case <-ticker.C:
c.cleanByTimeout()
case <-perKeyMissesTicker.C:
c.mu.Lock()
c.perKeyMisses = make(map[Key]int, len(c.perKeyMisses))
c.mu.Unlock()
}
}
}
func (c *Cache) cleanByTimeout() {
currentTime := fasttime.UnixTimestamp()
c.mu.Lock() c.mu.Lock()
for _, pes := range c.m { c.perKeyMisses = make(map[Key]int, len(c.perKeyMisses))
for offset, e := range pes {
// Delete items accessed more than two minutes ago.
// This time should be enough for repeated queries.
if currentTime-atomic.LoadUint64(&e.lastAccessTime) > 2*60 {
c.updateSizeBytes(-e.block.SizeBytes())
delete(pes, offset)
// do not delete the entry from c.perKeyMisses, since it is removed by Cache.cleaner later.
}
}
}
c.mu.Unlock() c.mu.Unlock()
} }
// GetBlock returns a block for the given key k from c. func (c *cache) cleanByTimeout() {
func (c *Cache) GetBlock(k Key) Block { // Delete items accessed more than five minutes ago.
// This time should be enough for repeated queries.
lastAccessTime := fasttime.UnixTimestamp() - 5*60
c.mu.Lock()
defer c.mu.Unlock()
for _, pes := range c.m {
for offset, e := range pes {
if lastAccessTime > atomic.LoadUint64(&e.lastAccessTime) {
c.updateSizeBytes(-e.block.SizeBytes())
delete(pes, offset)
// do not delete the entry from c.perKeyMisses, since it is removed by cache.cleaner later.
}
}
}
}
func (c *cache) GetBlock(k Key) Block {
atomic.AddUint64(&c.requests, 1) atomic.AddUint64(&c.requests, 1)
var e *cacheEntry var e *cacheEntry
c.mu.RLock() c.mu.RLock()
@ -151,8 +290,7 @@ func (c *Cache) GetBlock(k Key) Block {
return nil return nil
} }
// PutBlock puts the given block b under the given key k into c. func (c *cache) PutBlock(k Key, b Block) {
func (c *Cache) PutBlock(k Key, b Block) {
c.mu.RLock() c.mu.RLock()
// If the entry wasn't accessed yet (e.g. c.perKeyMisses[k] == 0), then cache it, since it is likely it will be accessed soon. // If the entry wasn't accessed yet (e.g. c.perKeyMisses[k] == 0), then cache it, since it is likely it will be accessed soon.
// Do not cache the entry only if there was only a single unsuccessful attempt to access it. // Do not cache the entry only if there was only a single unsuccessful attempt to access it.
@ -167,14 +305,19 @@ func (c *Cache) PutBlock(k Key, b Block) {
// Store b in the cache. // Store b in the cache.
c.mu.Lock() c.mu.Lock()
e := &cacheEntry{ defer c.mu.Unlock()
lastAccessTime: fasttime.UnixTimestamp(),
block: b,
}
pes := c.m[k.Part] pes := c.m[k.Part]
if pes == nil { if pes == nil {
pes = make(map[uint64]*cacheEntry) pes = make(map[uint64]*cacheEntry)
c.m[k.Part] = pes c.m[k.Part] = pes
} else if pes[k.Offset] != nil {
// The block has been already registered by concurrent goroutine.
return
}
e := &cacheEntry{
lastAccessTime: fasttime.UnixTimestamp(),
block: b,
} }
pes[k.Offset] = e pes[k.Offset] = e
c.updateSizeBytes(e.block.SizeBytes()) c.updateSizeBytes(e.block.SizeBytes())
@ -185,41 +328,37 @@ func (c *Cache) PutBlock(k Key, b Block) {
for offset, e := range pes { for offset, e := range pes {
c.updateSizeBytes(-e.block.SizeBytes()) c.updateSizeBytes(-e.block.SizeBytes())
delete(pes, offset) delete(pes, offset)
// do not delete the entry from c.perKeyMisses, since it is removed by Cache.cleaner later. // do not delete the entry from c.perKeyMisses, since it is removed by cache.cleaner later.
if c.SizeBytes() < maxSizeBytes { if c.SizeBytes() < maxSizeBytes {
goto end return
} }
} }
} }
} }
end:
c.mu.Unlock()
} }
// Len returns the number of blocks in the cache c. func (c *cache) Len() int {
func (c *Cache) Len() int {
c.mu.RLock() c.mu.RLock()
n := len(c.m) n := 0
for _, m := range c.m {
n += len(m)
}
c.mu.RUnlock() c.mu.RUnlock()
return n return n
} }
// SizeBytes returns an approximate size in bytes of all the blocks stored in the cache c. func (c *cache) SizeBytes() int {
func (c *Cache) SizeBytes() int {
return int(atomic.LoadInt64(&c.sizeBytes)) return int(atomic.LoadInt64(&c.sizeBytes))
} }
// SizeMaxBytes returns the max allowed size in bytes for c. func (c *cache) SizeMaxBytes() int {
func (c *Cache) SizeMaxBytes() int {
return c.getMaxSizeBytes() return c.getMaxSizeBytes()
} }
// Requests returns the number of requests served by c. func (c *cache) Requests() uint64 {
func (c *Cache) Requests() uint64 {
return atomic.LoadUint64(&c.requests) return atomic.LoadUint64(&c.requests)
} }
// Misses returns the number of cache misses for c. func (c *cache) Misses() uint64 {
func (c *Cache) Misses() uint64 {
return atomic.LoadUint64(&c.misses) return atomic.LoadUint64(&c.misses)
} }

View file

@ -0,0 +1,159 @@
package blockcache
import (
"fmt"
"sync"
"testing"
)
func TestCache(t *testing.T) {
const sizeMaxBytes = 1024 * 1024
getMaxSize := func() int {
return sizeMaxBytes
}
c := NewCache(getMaxSize)
defer c.MustStop()
if n := c.SizeBytes(); n != 0 {
t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, 0)
}
if n := c.SizeMaxBytes(); n != sizeMaxBytes {
t.Fatalf("unexpected SizeMaxBytes(); got %d; want %d", n, sizeMaxBytes)
}
offset := uint64(1234)
part := (interface{})("foobar")
k := Key{
Offset: offset,
Part: part,
}
var b testBlock
blockSize := b.SizeBytes()
// Put a single entry into cache
c.PutBlock(k, &b)
if n := c.Len(); n != 1 {
t.Fatalf("unexpected number of items in the cache; got %d; want %d", n, 1)
}
if n := c.SizeBytes(); n != blockSize {
t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, blockSize)
}
if n := c.Requests(); n != 0 {
t.Fatalf("unexpected number of requests; got %d; want %d", n, 0)
}
if n := c.Misses(); n != 0 {
t.Fatalf("unexpected number of misses; got %d; want %d", n, 0)
}
// Obtain this entry from the cache
if b1 := c.GetBlock(k); b1 != &b {
t.Fatalf("unexpected block obtained; got %v; want %v", b1, &b)
}
if n := c.Requests(); n != 1 {
t.Fatalf("unexpected number of requests; got %d; want %d", n, 1)
}
if n := c.Misses(); n != 0 {
t.Fatalf("unexpected number of misses; got %d; want %d", n, 0)
}
// Obtain non-existing entry from the cache
if b1 := c.GetBlock(Key{Offset: offset + 1}); b1 != nil {
t.Fatalf("unexpected non-nil block obtained for non-existing key: %v", b1)
}
if n := c.Requests(); n != 2 {
t.Fatalf("unexpected number of requests; got %d; want %d", n, 2)
}
if n := c.Misses(); n != 1 {
t.Fatalf("unexpected number of misses; got %d; want %d", n, 1)
}
// Remove entries for the given part from the cache
c.RemoveBlocksForPart(part)
if n := c.SizeBytes(); n != 0 {
t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, 0)
}
// Verify that the entry has been removed from the cache
if b1 := c.GetBlock(k); b1 != nil {
t.Fatalf("unexpected non-nil block obtained after removing all the blocks for the part; got %v", b1)
}
if n := c.Requests(); n != 3 {
t.Fatalf("unexpected number of requests; got %d; want %d", n, 3)
}
if n := c.Misses(); n != 2 {
t.Fatalf("unexpected number of misses; got %d; want %d", n, 2)
}
// Store the missed entry to the cache. It shouldn't be stored because of the previous cache miss
c.PutBlock(k, &b)
if n := c.SizeBytes(); n != 0 {
t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, 0)
}
// Verify that the entry wasn't stored to the cache.
if b1 := c.GetBlock(k); b1 != nil {
t.Fatalf("unexpected non-nil block obtained after removing all the blocks for the part; got %v", b1)
}
if n := c.Requests(); n != 4 {
t.Fatalf("unexpected number of requests; got %d; want %d", n, 4)
}
if n := c.Misses(); n != 3 {
t.Fatalf("unexpected number of misses; got %d; want %d", n, 3)
}
// Store the entry again. Now it must be stored because of the second cache miss.
c.PutBlock(k, &b)
if n := c.SizeBytes(); n != blockSize {
t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, blockSize)
}
if b1 := c.GetBlock(k); b1 != &b {
t.Fatalf("unexpected block obtained; got %v; want %v", b1, &b)
}
if n := c.Requests(); n != 5 {
t.Fatalf("unexpected number of requests; got %d; want %d", n, 5)
}
if n := c.Misses(); n != 3 {
t.Fatalf("unexpected number of misses; got %d; want %d", n, 3)
}
// Manually clean the cache. The entry shouldn't be deleted because it was recently accessed.
c.cleanPerKeyMisses()
c.cleanByTimeout()
if n := c.SizeBytes(); n != blockSize {
t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, blockSize)
}
}
func TestCacheConcurrentAccess(t *testing.T) {
const sizeMaxBytes = 16 * 1024 * 1024
getMaxSize := func() int {
return sizeMaxBytes
}
c := NewCache(getMaxSize)
defer c.MustStop()
workers := 5
var wg sync.WaitGroup
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
defer wg.Done()
testCacheSetGet(c)
}()
}
wg.Wait()
}
func testCacheSetGet(c *Cache) {
for i := 0; i < 1000; i++ {
part := (interface{})(i)
b := testBlock{}
k := Key{
Offset: uint64(i),
Part: part,
}
c.PutBlock(k, &b)
if b1 := c.GetBlock(k); b1 != &b {
panic(fmt.Errorf("unexpected block obtained; got %v; want %v", b1, &b))
}
if b1 := c.GetBlock(Key{}); b1 != nil {
panic(fmt.Errorf("unexpected non-nil block obtained: %v", b1))
}
}
}
type testBlock struct{}
func (tb *testBlock) SizeBytes() int {
return 42
}

View file

@ -0,0 +1,50 @@
package blockcache
import (
"fmt"
"sync/atomic"
"testing"
)
func BenchmarkKeyHashUint64(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
var hSum uint64
var k Key
for pb.Next() {
k.Offset++
h := k.hashUint64()
hSum += h
}
atomic.AddUint64(&BenchSink, hSum)
})
}
var BenchSink uint64
func BenchmarkCacheGet(b *testing.B) {
c := NewCache(func() int {
return 1024 * 1024 * 16
})
defer c.MustStop()
const blocksCount = 10000
blocks := make([]*testBlock, blocksCount)
for i := 0; i < blocksCount; i++ {
blocks[i] = &testBlock{}
c.PutBlock(Key{Offset: uint64(i)}, blocks[i])
}
b.ReportAllocs()
b.SetBytes(int64(len(blocks)))
b.RunParallel(func(pb *testing.PB) {
var k Key
for pb.Next() {
for i := 0; i < blocksCount; i++ {
k.Offset = uint64(i)
b := c.GetBlock(k)
if b != blocks[i] {
panic(fmt.Errorf("unexpected block obtained from the cache; got %v; want %v", b, blocks[i]))
}
}
}
})
}

View file

@ -31,8 +31,8 @@ import (
var ( var (
tlsEnable = flag.Bool("tls", false, "Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set") tlsEnable = flag.Bool("tls", false, "Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set")
tlsCertFile = flag.String("tlsCertFile", "", "Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower") tlsCertFile = flag.String("tlsCertFile", "", "Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated")
tlsKeyFile = flag.String("tlsKeyFile", "", "Path to file with TLS key. Used only if -tls is set") tlsKeyFile = flag.String("tlsKeyFile", "", "Path to file with TLS key. Used only if -tls is set. The provided key file is automatically re-read every second, so it can be dynamically updated")
pathPrefix = flag.String("http.pathPrefix", "", "An optional prefix to add to all the paths handled by http server. For example, if '-http.pathPrefix=/foo/bar' is set, "+ pathPrefix = flag.String("http.pathPrefix", "", "An optional prefix to add to all the paths handled by http server. For example, if '-http.pathPrefix=/foo/bar' is set, "+
"then all the http requests will be handled on '/foo/bar/*' paths. This may be useful for proxied requests. "+ "then all the http requests will be handled on '/foo/bar/*' paths. This may be useful for proxied requests. "+
@ -97,14 +97,30 @@ func Serve(addr string, rh RequestHandler) {
ln := net.Listener(lnTmp) ln := net.Listener(lnTmp)
if *tlsEnable { if *tlsEnable {
cert, err := tls.LoadX509KeyPair(*tlsCertFile, *tlsKeyFile) var certLock sync.Mutex
var certDeadline uint64
var cert *tls.Certificate
c, err := tls.LoadX509KeyPair(*tlsCertFile, *tlsKeyFile)
if err != nil { if err != nil {
logger.Fatalf("cannot load TLS cert from tlsCertFile=%q, tlsKeyFile=%q: %s", *tlsCertFile, *tlsKeyFile, err) logger.Fatalf("cannot load TLS cert from tlsCertFile=%q, tlsKeyFile=%q: %s", *tlsCertFile, *tlsKeyFile, err)
} }
cert = &c
cfg := &tls.Config{ cfg := &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: tls.VersionTLS12, MinVersion: tls.VersionTLS12,
PreferServerCipherSuites: true, PreferServerCipherSuites: true,
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
certLock.Lock()
defer certLock.Unlock()
if fasttime.UnixTimestamp() > certDeadline {
c, err = tls.LoadX509KeyPair(*tlsCertFile, *tlsKeyFile)
if err != nil {
return nil, fmt.Errorf("cannot load TLS cert from tlsCertFile=%q, tlsKeyFile=%q: %w", *tlsCertFile, *tlsKeyFile, err)
}
certDeadline = fasttime.UnixTimestamp() + 1
cert = &c
}
return cert, nil
},
} }
ln = tls.NewListener(ln, cfg) ln = tls.NewListener(ln, cfg)
} }

View file

@ -17,7 +17,7 @@ var ibCache = blockcache.NewCache(getMaxInmemoryBlocksCacheSize)
func getMaxIndexBlocksCacheSize() int { func getMaxIndexBlocksCacheSize() int {
maxIndexBlockCacheSizeOnce.Do(func() { maxIndexBlockCacheSizeOnce.Do(func() {
maxIndexBlockCacheSize = int(0.2 * float64(memory.Allowed())) maxIndexBlockCacheSize = int(0.15 * float64(memory.Allowed()))
}) })
return maxIndexBlockCacheSize return maxIndexBlockCacheSize
} }
@ -29,7 +29,7 @@ var (
func getMaxInmemoryBlocksCacheSize() int { func getMaxInmemoryBlocksCacheSize() int {
maxInmemoryBlockCacheSizeOnce.Do(func() { maxInmemoryBlockCacheSizeOnce.Do(func() {
maxInmemoryBlockCacheSize = int(0.3 * float64(memory.Allowed())) maxInmemoryBlockCacheSize = int(0.4 * float64(memory.Allowed()))
}) })
return maxInmemoryBlockCacheSize return maxInmemoryBlockCacheSize
} }

View file

@ -30,6 +30,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/http" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/http"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kubernetes" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kubernetes"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/openstack" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/openstack"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy" "github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
"github.com/VictoriaMetrics/metrics" "github.com/VictoriaMetrics/metrics"
xxhash "github.com/cespare/xxhash/v2" xxhash "github.com/cespare/xxhash/v2"
@ -37,9 +38,8 @@ import (
) )
var ( var (
strictParse = flag.Bool("promscrape.config.strictParse", false, "Whether to allow only supported fields in -promscrape.config . "+ strictParse = flag.Bool("promscrape.config.strictParse", true, "Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields")
"By default unsupported fields are silently skipped") dryRun = flag.Bool("promscrape.config.dryRun", false, "Checks -promscrape.config file for errors and unsupported fields and then exits. "+
dryRun = flag.Bool("promscrape.config.dryRun", false, "Checks -promscrape.config file for errors and unsupported fields and then exits. "+
"Returns non-zero exit code on parsing errors and emits these errors to stderr. "+ "Returns non-zero exit code on parsing errors and emits these errors to stderr. "+
"See also -promscrape.config.strictParse command-line flag. "+ "See also -promscrape.config.strictParse command-line flag. "+
"Pass -loggerLevel=ERROR if you don't need to see info messages in the output.") "Pass -loggerLevel=ERROR if you don't need to see info messages in the output.")
@ -106,9 +106,9 @@ func (cfg *Config) getJobNames() []string {
// //
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/ // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/
type GlobalConfig struct { type GlobalConfig struct {
ScrapeInterval time.Duration `yaml:"scrape_interval,omitempty"` ScrapeInterval promutils.Duration `yaml:"scrape_interval,omitempty"`
ScrapeTimeout time.Duration `yaml:"scrape_timeout,omitempty"` ScrapeTimeout promutils.Duration `yaml:"scrape_timeout,omitempty"`
ExternalLabels map[string]string `yaml:"external_labels,omitempty"` ExternalLabels map[string]string `yaml:"external_labels,omitempty"`
} }
// ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config. // ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config.
@ -116,8 +116,8 @@ type GlobalConfig struct {
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
type ScrapeConfig struct { type ScrapeConfig struct {
JobName string `yaml:"job_name"` JobName string `yaml:"job_name"`
ScrapeInterval time.Duration `yaml:"scrape_interval,omitempty"` ScrapeInterval promutils.Duration `yaml:"scrape_interval,omitempty"`
ScrapeTimeout time.Duration `yaml:"scrape_timeout,omitempty"` ScrapeTimeout promutils.Duration `yaml:"scrape_timeout,omitempty"`
MetricsPath string `yaml:"metrics_path,omitempty"` MetricsPath string `yaml:"metrics_path,omitempty"`
HonorLabels bool `yaml:"honor_labels,omitempty"` HonorLabels bool `yaml:"honor_labels,omitempty"`
HonorTimestamps *bool `yaml:"honor_timestamps,omitempty"` HonorTimestamps *bool `yaml:"honor_timestamps,omitempty"`
@ -150,8 +150,8 @@ type ScrapeConfig struct {
DisableCompression bool `yaml:"disable_compression,omitempty"` DisableCompression bool `yaml:"disable_compression,omitempty"`
DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"` DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"`
StreamParse bool `yaml:"stream_parse,omitempty"` StreamParse bool `yaml:"stream_parse,omitempty"`
ScrapeAlignInterval time.Duration `yaml:"scrape_align_interval,omitempty"` ScrapeAlignInterval promutils.Duration `yaml:"scrape_align_interval,omitempty"`
ScrapeOffset time.Duration `yaml:"scrape_offset,omitempty"` ScrapeOffset promutils.Duration `yaml:"scrape_offset,omitempty"`
SeriesLimit int `yaml:"series_limit,omitempty"` SeriesLimit int `yaml:"series_limit,omitempty"`
ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"` ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"`
@ -335,7 +335,9 @@ func unmarshalMaybeStrict(data []byte, dst interface{}) error {
data = envtemplate.Replace(data) data = envtemplate.Replace(data)
var err error var err error
if *strictParse { if *strictParse {
err = yaml.UnmarshalStrict(data, dst) if err = yaml.UnmarshalStrict(data, dst); err != nil {
err = fmt.Errorf("%w; pass -promscrape.config.strictParse=false command-line flag for ignoring unknown fields in yaml config", err)
}
} else { } else {
err = yaml.Unmarshal(data, dst) err = yaml.Unmarshal(data, dst)
} }
@ -704,16 +706,16 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
if jobName == "" { if jobName == "" {
return nil, fmt.Errorf("missing `job_name` field in `scrape_config`") return nil, fmt.Errorf("missing `job_name` field in `scrape_config`")
} }
scrapeInterval := sc.ScrapeInterval scrapeInterval := sc.ScrapeInterval.Duration()
if scrapeInterval <= 0 { if scrapeInterval <= 0 {
scrapeInterval = globalCfg.ScrapeInterval scrapeInterval = globalCfg.ScrapeInterval.Duration()
if scrapeInterval <= 0 { if scrapeInterval <= 0 {
scrapeInterval = defaultScrapeInterval scrapeInterval = defaultScrapeInterval
} }
} }
scrapeTimeout := sc.ScrapeTimeout scrapeTimeout := sc.ScrapeTimeout.Duration()
if scrapeTimeout <= 0 { if scrapeTimeout <= 0 {
scrapeTimeout = globalCfg.ScrapeTimeout scrapeTimeout = globalCfg.ScrapeTimeout.Duration()
if scrapeTimeout <= 0 { if scrapeTimeout <= 0 {
scrapeTimeout = defaultScrapeTimeout scrapeTimeout = defaultScrapeTimeout
} }
@ -787,8 +789,8 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
disableCompression: sc.DisableCompression, disableCompression: sc.DisableCompression,
disableKeepAlive: sc.DisableKeepAlive, disableKeepAlive: sc.DisableKeepAlive,
streamParse: sc.StreamParse, streamParse: sc.StreamParse,
scrapeAlignInterval: sc.ScrapeAlignInterval, scrapeAlignInterval: sc.ScrapeAlignInterval.Duration(),
scrapeOffset: sc.ScrapeOffset, scrapeOffset: sc.ScrapeOffset.Duration(),
seriesLimit: sc.SeriesLimit, seriesLimit: sc.SeriesLimit,
} }
return swc, nil return swc, nil
@ -1056,7 +1058,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
// Read __scrape_interval__ and __scrape_timeout__ from labels. // Read __scrape_interval__ and __scrape_timeout__ from labels.
scrapeInterval := swc.scrapeInterval scrapeInterval := swc.scrapeInterval
if s := promrelabel.GetLabelValueByName(labels, "__scrape_interval__"); len(s) > 0 { if s := promrelabel.GetLabelValueByName(labels, "__scrape_interval__"); len(s) > 0 {
d, err := time.ParseDuration(s) d, err := promutils.ParseDuration(s)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse __scrape_interval__=%q: %w", s, err) return nil, fmt.Errorf("cannot parse __scrape_interval__=%q: %w", s, err)
} }
@ -1064,7 +1066,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
} }
scrapeTimeout := swc.scrapeTimeout scrapeTimeout := swc.scrapeTimeout
if s := promrelabel.GetLabelValueByName(labels, "__scrape_timeout__"); len(s) > 0 { if s := promrelabel.GetLabelValueByName(labels, "__scrape_timeout__"); len(s) > 0 {
d, err := time.ParseDuration(s) d, err := promutils.ParseDuration(s)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse __scrape_timeout__=%q: %w", s, err) return nil, fmt.Errorf("cannot parse __scrape_timeout__=%q: %w", s, err)
} }

View file

@ -1648,6 +1648,59 @@ scrape_configs:
ProxyAuthConfig: &promauth.Config{}, ProxyAuthConfig: &promauth.Config{},
}, },
}) })
f(`
global:
scrape_timeout: 1d
scrape_configs:
- job_name: foo
scrape_interval: 1w
scrape_align_interval: 1d
scrape_offset: 2d
static_configs:
- targets: ["foo.bar:1234"]
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: time.Hour * 24 * 7,
ScrapeTimeout: time.Hour * 24,
ScrapeAlignInterval: time.Hour * 24,
ScrapeOffset: time.Hour * 24 * 2,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
Value: "foo.bar:1234",
},
{
Name: "__metrics_path__",
Value: "/metrics",
},
{
Name: "__scheme__",
Value: "http",
},
{
Name: "__scrape_interval__",
Value: "168h0m0s",
},
{
Name: "__scrape_timeout__",
Value: "24h0m0s",
},
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "foo",
},
},
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
},
})
} }
func equalStaticConfigForScrapeWorks(a, b []*ScrapeWork) bool { func equalStaticConfigForScrapeWorks(a, b []*ScrapeWork) bool {

View file

@ -54,6 +54,11 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// MarshalYAML implements yaml.Marshaler
func (z ZoneYAML) MarshalYAML() (interface{}, error) {
return z.zones, nil
}
// GetLabels returns gce labels according to sdc. // GetLabels returns gce labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) { func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
cfg, err := getAPIConfig(sdc) cfg, err := getAPIConfig(sdc)

View file

@ -0,0 +1,27 @@
package gce
import (
"testing"
"gopkg.in/yaml.v2"
)
func TestMarshallingSDConfigWithZoneYAML(t *testing.T) {
sdConfig := SDConfig{
Project: "test-project",
Zone: ZoneYAML{
zones: []string{"zone-a", "zone-b"},
},
}
data, err := yaml.Marshal(sdConfig)
if err != nil {
t.Fatalf("unexpected non-nil error")
}
strData := string(data)
expected := "project: test-project\nzone:\n- zone-a\n- zone-b\n"
if strData != expected {
t.Fatalf("unexpected marshal:\ngot \n%vwant\n%v", strData, expected)
}
}

View file

@ -164,16 +164,22 @@ func getEndpointLabelsForAddressAndPort(podPortsSeen map[*Pod][]int, eps *Endpoi
if svc != nil { if svc != nil {
svc.appendCommonLabels(m) svc.appendCommonLabels(m)
} }
// See https://github.com/prometheus/prometheus/issues/10284
eps.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_endpoints", m) eps.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_endpoints", m)
if ea.TargetRef.Kind != "Pod" || p == nil { if ea.TargetRef.Kind != "Pod" || p == nil {
return m return m
} }
p.appendCommonLabels(m) p.appendCommonLabels(m)
// always add pod targetRef, even if epp port doesn't match container port
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2134
if _, ok := podPortsSeen[p]; !ok {
podPortsSeen[p] = []int{}
}
for _, c := range p.Spec.Containers { for _, c := range p.Spec.Containers {
for _, cp := range c.Ports { for _, cp := range c.Ports {
if cp.ContainerPort == epp.Port { if cp.ContainerPort == epp.Port {
p.appendContainerLabels(m, c, &cp)
podPortsSeen[p] = append(podPortsSeen[p], cp.ContainerPort) podPortsSeen[p] = append(podPortsSeen[p], cp.ContainerPort)
p.appendContainerLabels(m, c, &cp)
break break
} }
} }

View file

@ -113,3 +113,203 @@ func TestParseEndpointsListSuccess(t *testing.T) {
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, expectedLabelss) t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, expectedLabelss)
} }
} }
func TestGetEndpointLabels(t *testing.T) {
type testArgs struct {
containerPorts map[string][]ContainerPort
endpointPorts []EndpointPort
}
f := func(name string, args testArgs, wantLabels [][]prompbmarshal.Label) {
t.Run(name, func(t *testing.T) {
eps := Endpoints{
Metadata: ObjectMeta{
Name: "test-eps",
Namespace: "default",
},
Subsets: []EndpointSubset{
{
Ports: args.endpointPorts,
Addresses: []EndpointAddress{
{
IP: "10.13.15.15",
TargetRef: ObjectReference{
Kind: "Pod",
Namespace: "default",
Name: "test-pod",
},
},
},
},
},
}
svc := Service{
Metadata: ObjectMeta{
Name: "test-eps",
Namespace: "default",
},
Spec: ServiceSpec{
Ports: []ServicePort{
{
Name: "test-port",
Port: 8081,
},
},
},
}
pod := Pod{
Metadata: ObjectMeta{
Name: "test-pod",
Namespace: "default",
},
Status: PodStatus{PodIP: "192.168.15.1"},
}
for cn, ports := range args.containerPorts {
pod.Spec.Containers = append(pod.Spec.Containers, Container{Name: cn, Ports: ports})
}
var gw groupWatcher
gw.m = map[string]*urlWatcher{
"pod": {
role: "pod",
objectsByKey: map[string]object{
"default/test-pod": &pod,
},
},
"service": {
role: "service",
objectsByKey: map[string]object{
"default/test-eps": &svc,
},
},
}
var sortedLabelss [][]prompbmarshal.Label
gotLabels := eps.getTargetLabels(&gw)
for _, lbs := range gotLabels {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(lbs))
}
if !areEqualLabelss(sortedLabelss, wantLabels) {
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, wantLabels)
}
})
}
f("1 port from endpoint", testArgs{
endpointPorts: []EndpointPort{
{
Name: "web",
Port: 8081,
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.13.15.15:8081",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "test-pod",
"__meta_kubernetes_endpoint_port_name": "web",
"__meta_kubernetes_endpoint_port_protocol": "",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_endpoints_name": "test-eps",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_pod_host_ip": "",
"__meta_kubernetes_pod_ip": "192.168.15.1",
"__meta_kubernetes_pod_name": "test-pod",
"__meta_kubernetes_pod_node_name": "",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "",
"__meta_kubernetes_service_cluster_ip": "",
"__meta_kubernetes_service_name": "test-eps",
"__meta_kubernetes_service_type": "",
}),
})
f("1 port from endpoint and 1 from pod", testArgs{
containerPorts: map[string][]ContainerPort{"metrics": {{
Name: "http-metrics",
ContainerPort: 8428,
}}},
endpointPorts: []EndpointPort{
{
Name: "web",
Port: 8081,
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.13.15.15:8081",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "test-pod",
"__meta_kubernetes_endpoint_port_name": "web",
"__meta_kubernetes_endpoint_port_protocol": "",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_endpoints_name": "test-eps",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_pod_host_ip": "",
"__meta_kubernetes_pod_ip": "192.168.15.1",
"__meta_kubernetes_pod_name": "test-pod",
"__meta_kubernetes_pod_node_name": "",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "",
"__meta_kubernetes_service_cluster_ip": "",
"__meta_kubernetes_service_name": "test-eps",
"__meta_kubernetes_service_type": "",
}),
discoveryutils.GetSortedLabels(map[string]string{
"__address__": "192.168.15.1:8428",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_pod_container_name": "metrics",
"__meta_kubernetes_pod_container_port_name": "http-metrics",
"__meta_kubernetes_pod_container_port_number": "8428",
"__meta_kubernetes_pod_container_port_protocol": "",
"__meta_kubernetes_pod_host_ip": "",
"__meta_kubernetes_pod_ip": "192.168.15.1",
"__meta_kubernetes_pod_name": "test-pod",
"__meta_kubernetes_pod_node_name": "",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "",
"__meta_kubernetes_service_cluster_ip": "",
"__meta_kubernetes_service_name": "test-eps",
"__meta_kubernetes_service_type": "",
}),
})
f("1 port from endpoint", testArgs{
containerPorts: map[string][]ContainerPort{"metrics": {{
Name: "web",
ContainerPort: 8428,
}}},
endpointPorts: []EndpointPort{
{
Name: "web",
Port: 8428,
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.13.15.15:8428",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "test-pod",
"__meta_kubernetes_endpoint_port_name": "web",
"__meta_kubernetes_endpoint_port_protocol": "",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_endpoints_name": "test-eps",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_pod_container_name": "metrics",
"__meta_kubernetes_pod_container_port_name": "web",
"__meta_kubernetes_pod_container_port_number": "8428",
"__meta_kubernetes_pod_container_port_protocol": "",
"__meta_kubernetes_pod_host_ip": "",
"__meta_kubernetes_pod_ip": "192.168.15.1",
"__meta_kubernetes_pod_name": "test-pod",
"__meta_kubernetes_pod_node_name": "",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "",
"__meta_kubernetes_service_cluster_ip": "",
"__meta_kubernetes_service_name": "test-eps",
"__meta_kubernetes_service_type": "",
}),
})
}

View file

@ -98,15 +98,22 @@ func getEndpointSliceLabelsForAddressAndPort(podPortsSeen map[*Pod][]int, addr s
if svc != nil { if svc != nil {
svc.appendCommonLabels(m) svc.appendCommonLabels(m)
} }
// See https://github.com/prometheus/prometheus/issues/10284
eps.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_endpointslice", m)
if ea.TargetRef.Kind != "Pod" || p == nil { if ea.TargetRef.Kind != "Pod" || p == nil {
return m return m
} }
// always add pod targetRef, even if epp port doesn't match container port.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2134
if _, ok := podPortsSeen[p]; !ok {
podPortsSeen[p] = []int{}
}
p.appendCommonLabels(m) p.appendCommonLabels(m)
for _, c := range p.Spec.Containers { for _, c := range p.Spec.Containers {
for _, cp := range c.Ports { for _, cp := range c.Ports {
if cp.ContainerPort == epp.Port { if cp.ContainerPort == epp.Port {
p.appendContainerLabels(m, c, &cp)
podPortsSeen[p] = append(podPortsSeen[p], cp.ContainerPort) podPortsSeen[p] = append(podPortsSeen[p], cp.ContainerPort)
p.appendContainerLabels(m, c, &cp)
break break
} }
} }
@ -117,7 +124,6 @@ func getEndpointSliceLabelsForAddressAndPort(podPortsSeen map[*Pod][]int, addr s
// //getEndpointSliceLabels builds labels for given EndpointSlice // //getEndpointSliceLabels builds labels for given EndpointSlice
func getEndpointSliceLabels(eps *EndpointSlice, addr string, ea Endpoint, epp EndpointPort) map[string]string { func getEndpointSliceLabels(eps *EndpointSlice, addr string, ea Endpoint, epp EndpointPort) map[string]string {
addr = discoveryutils.JoinHostPort(addr, epp.Port) addr = discoveryutils.JoinHostPort(addr, epp.Port)
m := map[string]string{ m := map[string]string{
"__address__": addr, "__address__": addr,

View file

@ -137,32 +137,9 @@ func TestParseEndpointSliceListSuccess(t *testing.T) {
"topology": { "topology": {
"kubernetes.io/hostname": "kind-control-plane" "kubernetes.io/hostname": "kind-control-plane"
} }
},
{
"addresses": [
"10.244.0.4"
],
"conditions": {
"ready": true
},
"targetRef": {
"kind": "Pod",
"namespace": "kube-system",
"name": "coredns-66bff467f8-kpbhk",
"uid": "db38d8b4-847a-4e82-874c-fe444fba2718",
"resourceVersion": "576"
},
"topology": {
"kubernetes.io/hostname": "kind-control-plane"
}
} }
], ],
"ports": [ "ports": [
{
"name": "dns-tcp",
"protocol": "TCP",
"port": 53
},
{ {
"name": "metrics", "name": "metrics",
"protocol": "TCP", "protocol": "TCP",
@ -189,99 +166,57 @@ func TestParseEndpointSliceListSuccess(t *testing.T) {
} }
sortedLabelss := getSortedLabelss(objectsByKey) sortedLabelss := getSortedLabelss(objectsByKey)
expectedLabelss := [][]prompbmarshal.Label{ expectedLabelss := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
"__address__": "172.18.0.2:6443",
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_name": "kubernetes",
"__meta_kubernetes_endpointslice_port": "6443",
"__meta_kubernetes_endpointslice_port_name": "https",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_namespace": "default",
}),
discoveryutils.GetSortedLabels(map[string]string{ discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.244.0.3:53", "__address__": "10.244.0.3:53",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod", "__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-z8czk", "__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-z8czk",
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_annotation_endpoints_kubernetes_io_last_change_trigger_time": "2020-09-07T14:28:35Z",
"__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname": "kind-control-plane", "__meta_kubernetes_endpointslice_annotationpresent_endpoints_kubernetes_io_last_change_trigger_time": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_name": "kube-dns-22mvb", "__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname": "kind-control-plane",
"__meta_kubernetes_endpointslice_port": "53", "__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname": "true",
"__meta_kubernetes_endpointslice_port_name": "dns-tcp", "__meta_kubernetes_endpointslice_label_endpointslice_kubernetes_io_managed_by": "endpointslice-controller.k8s.io",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "kube-dns",
"__meta_kubernetes_namespace": "kube-system", "__meta_kubernetes_endpointslice_labelpresent_endpointslice_kubernetes_io_managed_by": "true",
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
"__meta_kubernetes_endpointslice_name": "kube-dns-22mvb",
"__meta_kubernetes_endpointslice_port": "53",
"__meta_kubernetes_endpointslice_port_name": "dns",
"__meta_kubernetes_endpointslice_port_protocol": "UDP",
"__meta_kubernetes_namespace": "kube-system",
}), }),
discoveryutils.GetSortedLabels(map[string]string{ discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.244.0.3:9153", "__address__": "10.244.0.3:9153",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod", "__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-z8czk", "__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-z8czk",
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_annotation_endpoints_kubernetes_io_last_change_trigger_time": "2020-09-07T14:28:35Z",
"__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname": "kind-control-plane", "__meta_kubernetes_endpointslice_annotationpresent_endpoints_kubernetes_io_last_change_trigger_time": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_name": "kube-dns-22mvb", "__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname": "kind-control-plane",
"__meta_kubernetes_endpointslice_port": "9153", "__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname": "true",
"__meta_kubernetes_endpointslice_port_name": "metrics", "__meta_kubernetes_endpointslice_label_endpointslice_kubernetes_io_managed_by": "endpointslice-controller.k8s.io",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "kube-dns",
"__meta_kubernetes_namespace": "kube-system", "__meta_kubernetes_endpointslice_labelpresent_endpointslice_kubernetes_io_managed_by": "true",
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
"__meta_kubernetes_endpointslice_name": "kube-dns-22mvb",
"__meta_kubernetes_endpointslice_port": "9153",
"__meta_kubernetes_endpointslice_port_name": "metrics",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_namespace": "kube-system",
}), }),
discoveryutils.GetSortedLabels(map[string]string{ discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.244.0.3:53", "__address__": "172.18.0.2:6443",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod", "__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-z8czk", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "kubernetes",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname": "kind-control-plane", "__meta_kubernetes_endpointslice_name": "kubernetes",
"__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname": "true", "__meta_kubernetes_endpointslice_port": "6443",
"__meta_kubernetes_endpointslice_name": "kube-dns-22mvb", "__meta_kubernetes_endpointslice_port_name": "https",
"__meta_kubernetes_endpointslice_port": "53", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_endpointslice_port_name": "dns", "__meta_kubernetes_namespace": "default",
"__meta_kubernetes_endpointslice_port_protocol": "UDP",
"__meta_kubernetes_namespace": "kube-system",
}),
discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.244.0.4:53",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-kpbhk",
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname": "kind-control-plane",
"__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname": "true",
"__meta_kubernetes_endpointslice_name": "kube-dns-22mvb",
"__meta_kubernetes_endpointslice_port": "53",
"__meta_kubernetes_endpointslice_port_name": "dns-tcp",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_namespace": "kube-system",
}),
discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.244.0.4:9153",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-kpbhk",
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname": "kind-control-plane",
"__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname": "true",
"__meta_kubernetes_endpointslice_name": "kube-dns-22mvb",
"__meta_kubernetes_endpointslice_port": "9153",
"__meta_kubernetes_endpointslice_port_name": "metrics",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_namespace": "kube-system",
}),
discoveryutils.GetSortedLabels(map[string]string{
"__address__": "10.244.0.4:53",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-kpbhk",
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname": "kind-control-plane",
"__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname": "true",
"__meta_kubernetes_endpointslice_name": "kube-dns-22mvb",
"__meta_kubernetes_endpointslice_port": "53",
"__meta_kubernetes_endpointslice_port_name": "dns",
"__meta_kubernetes_endpointslice_port_protocol": "UDP",
"__meta_kubernetes_namespace": "kube-system",
}), }),
} }
if !areEqualLabelss(sortedLabelss, expectedLabelss) { if !areEqualLabelss(sortedLabelss, expectedLabelss) {

View file

@ -33,7 +33,7 @@ type apiConfig struct {
// tokenLock guards creds refresh // tokenLock guards creds refresh
tokenLock sync.Mutex tokenLock sync.Mutex
creds *apiCredentials creds *apiCredentials
// authTokenReq contins request body for apiCredentials // authTokenReq contains request body for apiCredentials
authTokenReq []byte authTokenReq []byte
// keystone endpoint // keystone endpoint
endpoint *url.URL endpoint *url.URL

View file

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"path" "path"
"sort" "sort"
"strconv"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
) )
@ -110,12 +111,9 @@ func (cfg *apiConfig) getServers() ([]server, error) {
} }
computeURL := *creds.computeURL computeURL := *creds.computeURL
computeURL.Path = path.Join(computeURL.Path, "servers", "detail") computeURL.Path = path.Join(computeURL.Path, "servers", "detail")
// by default, query fetches data from all tenants q := computeURL.Query()
if !cfg.allTenants { q.Set("all_tenants", strconv.FormatBool(cfg.allTenants))
q := computeURL.Query() computeURL.RawQuery = q.Encode()
q.Set("all_tenants", "false")
computeURL.RawQuery = q.Encode()
}
nextLink := computeURL.String() nextLink := computeURL.String()
var servers []server var servers []server
for { for {

View file

@ -1,466 +0,0 @@
// Code generated by qtc from "targets_response.qtpl". DO NOT EDIT.
// See https://github.com/valyala/quicktemplate for details.
//line lib/promscrape/targets_response.qtpl:1
package promscrape
//line lib/promscrape/targets_response.qtpl:1
import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"time"
)
//line lib/promscrape/targets_response.qtpl:9
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line lib/promscrape/targets_response.qtpl:9
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line lib/promscrape/targets_response.qtpl:9
func StreamTargetsResponsePlain(qw422016 *qt422016.Writer, jts []jobTargetsStatuses, emptyJobs []string, showOriginLabels bool) {
//line lib/promscrape/targets_response.qtpl:11
for _, js := range jts {
//line lib/promscrape/targets_response.qtpl:11
qw422016.N().S(`job=`)
//line lib/promscrape/targets_response.qtpl:12
qw422016.N().Q(js.job)
//line lib/promscrape/targets_response.qtpl:12
qw422016.N().S(`(`)
//line lib/promscrape/targets_response.qtpl:12
qw422016.N().D(js.upCount)
//line lib/promscrape/targets_response.qtpl:12
qw422016.N().S(`/`)
//line lib/promscrape/targets_response.qtpl:12
qw422016.N().D(js.targetsTotal)
//line lib/promscrape/targets_response.qtpl:12
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:12
qw422016.N().S(`up)`)
//line lib/promscrape/targets_response.qtpl:13
qw422016.N().S(`
`)
//line lib/promscrape/targets_response.qtpl:14
for _, ts := range js.targetsStatus {
//line lib/promscrape/targets_response.qtpl:15
qw422016.N().S("\t")
//line lib/promscrape/targets_response.qtpl:15
qw422016.N().S(`state=`)
//line lib/promscrape/targets_response.qtpl:16
if ts.up {
//line lib/promscrape/targets_response.qtpl:16
qw422016.N().S(`up`)
//line lib/promscrape/targets_response.qtpl:16
} else {
//line lib/promscrape/targets_response.qtpl:16
qw422016.N().S(`down`)
//line lib/promscrape/targets_response.qtpl:16
}
//line lib/promscrape/targets_response.qtpl:16
qw422016.N().S(`,`)
//line lib/promscrape/targets_response.qtpl:16
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:16
qw422016.N().S(`endpoint=`)
//line lib/promscrape/targets_response.qtpl:17
qw422016.N().S(ts.sw.Config.ScrapeURL)
//line lib/promscrape/targets_response.qtpl:17
qw422016.N().S(`,`)
//line lib/promscrape/targets_response.qtpl:17
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:17
qw422016.N().S(`labels=`)
//line lib/promscrape/targets_response.qtpl:18
qw422016.N().S(promLabelsString(promrelabel.FinalizeLabels(nil, ts.sw.Config.Labels)))
//line lib/promscrape/targets_response.qtpl:18
qw422016.N().S(`,`)
//line lib/promscrape/targets_response.qtpl:18
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:19
if showOriginLabels {
//line lib/promscrape/targets_response.qtpl:19
qw422016.N().S(`originalLabels=`)
//line lib/promscrape/targets_response.qtpl:19
qw422016.N().S(promLabelsString(ts.sw.Config.OriginalLabels))
//line lib/promscrape/targets_response.qtpl:19
qw422016.N().S(`,`)
//line lib/promscrape/targets_response.qtpl:19
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:19
}
//line lib/promscrape/targets_response.qtpl:19
qw422016.N().S(`scrapes_total=`)
//line lib/promscrape/targets_response.qtpl:20
qw422016.N().D(ts.scrapesTotal)
//line lib/promscrape/targets_response.qtpl:20
qw422016.N().S(`,`)
//line lib/promscrape/targets_response.qtpl:20
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:20
qw422016.N().S(`scrapes_failed=`)
//line lib/promscrape/targets_response.qtpl:21
qw422016.N().D(ts.scrapesFailed)
//line lib/promscrape/targets_response.qtpl:21
qw422016.N().S(`,`)
//line lib/promscrape/targets_response.qtpl:21
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:21
qw422016.N().S(`last_scrape=`)
//line lib/promscrape/targets_response.qtpl:22
qw422016.N().FPrec(ts.getDurationFromLastScrape().Seconds(), 3)
//line lib/promscrape/targets_response.qtpl:22
qw422016.N().S(`s ago,`)
//line lib/promscrape/targets_response.qtpl:22
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:22
qw422016.N().S(`scrape_duration=`)
//line lib/promscrape/targets_response.qtpl:23
qw422016.N().D(int(ts.scrapeDuration))
//line lib/promscrape/targets_response.qtpl:23
qw422016.N().S(`ms,`)
//line lib/promscrape/targets_response.qtpl:23
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:23
qw422016.N().S(`samples_scraped=`)
//line lib/promscrape/targets_response.qtpl:24
qw422016.N().D(ts.samplesScraped)
//line lib/promscrape/targets_response.qtpl:24
qw422016.N().S(`,`)
//line lib/promscrape/targets_response.qtpl:24
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:24
qw422016.N().S(`error=`)
//line lib/promscrape/targets_response.qtpl:25
if ts.err != nil {
//line lib/promscrape/targets_response.qtpl:25
qw422016.N().S(ts.err.Error())
//line lib/promscrape/targets_response.qtpl:25
}
//line lib/promscrape/targets_response.qtpl:26
qw422016.N().S(`
`)
//line lib/promscrape/targets_response.qtpl:27
}
//line lib/promscrape/targets_response.qtpl:28
}
//line lib/promscrape/targets_response.qtpl:30
for _, jobName := range emptyJobs {
//line lib/promscrape/targets_response.qtpl:30
qw422016.N().S(`job=`)
//line lib/promscrape/targets_response.qtpl:31
qw422016.N().Q(jobName)
//line lib/promscrape/targets_response.qtpl:31
qw422016.N().S(`(0/0 up)`)
//line lib/promscrape/targets_response.qtpl:32
qw422016.N().S(`
`)
//line lib/promscrape/targets_response.qtpl:33
}
//line lib/promscrape/targets_response.qtpl:35
}
//line lib/promscrape/targets_response.qtpl:35
func WriteTargetsResponsePlain(qq422016 qtio422016.Writer, jts []jobTargetsStatuses, emptyJobs []string, showOriginLabels bool) {
//line lib/promscrape/targets_response.qtpl:35
qw422016 := qt422016.AcquireWriter(qq422016)
//line lib/promscrape/targets_response.qtpl:35
StreamTargetsResponsePlain(qw422016, jts, emptyJobs, showOriginLabels)
//line lib/promscrape/targets_response.qtpl:35
qt422016.ReleaseWriter(qw422016)
//line lib/promscrape/targets_response.qtpl:35
}
//line lib/promscrape/targets_response.qtpl:35
func TargetsResponsePlain(jts []jobTargetsStatuses, emptyJobs []string, showOriginLabels bool) string {
//line lib/promscrape/targets_response.qtpl:35
qb422016 := qt422016.AcquireByteBuffer()
//line lib/promscrape/targets_response.qtpl:35
WriteTargetsResponsePlain(qb422016, jts, emptyJobs, showOriginLabels)
//line lib/promscrape/targets_response.qtpl:35
qs422016 := string(qb422016.B)
//line lib/promscrape/targets_response.qtpl:35
qt422016.ReleaseByteBuffer(qb422016)
//line lib/promscrape/targets_response.qtpl:35
return qs422016
//line lib/promscrape/targets_response.qtpl:35
}
//line lib/promscrape/targets_response.qtpl:37
func StreamTargetsResponseHTML(qw422016 *qt422016.Writer, jts []jobTargetsStatuses, emptyJobs []string, onlyUnhealthy bool) {
//line lib/promscrape/targets_response.qtpl:37
qw422016.N().S(`<!DOCTYPE html><html lang="en"><head><meta charset="utf-8"><meta name="viewport" content="width=device-width, initial-scale=1"><link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous"><title>Scrape targets</title></head><body class="m-3"><h1>Scrape targets</h1><div><button type="button" class="btn`)
//line lib/promscrape/targets_response.qtpl:49
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:49
if !onlyUnhealthy {
//line lib/promscrape/targets_response.qtpl:49
qw422016.N().S(`btn-primary`)
//line lib/promscrape/targets_response.qtpl:49
} else {
//line lib/promscrape/targets_response.qtpl:49
qw422016.N().S(`btn-secondary`)
//line lib/promscrape/targets_response.qtpl:49
}
//line lib/promscrape/targets_response.qtpl:49
qw422016.N().S(`" onclick="location.href='targets'">All</button><button type="button" class="btn`)
//line lib/promscrape/targets_response.qtpl:52
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:52
if onlyUnhealthy {
//line lib/promscrape/targets_response.qtpl:52
qw422016.N().S(`btn-primary`)
//line lib/promscrape/targets_response.qtpl:52
} else {
//line lib/promscrape/targets_response.qtpl:52
qw422016.N().S(`btn-secondary`)
//line lib/promscrape/targets_response.qtpl:52
}
//line lib/promscrape/targets_response.qtpl:52
qw422016.N().S(`" onclick="location.href='targets?show_only_unhealthy=true'">Unhealthy</button></div>`)
//line lib/promscrape/targets_response.qtpl:56
for i, js := range jts {
//line lib/promscrape/targets_response.qtpl:57
if onlyUnhealthy && js.upCount == js.targetsTotal {
//line lib/promscrape/targets_response.qtpl:57
continue
//line lib/promscrape/targets_response.qtpl:57
}
//line lib/promscrape/targets_response.qtpl:57
qw422016.N().S(`<div><h4>`)
//line lib/promscrape/targets_response.qtpl:60
qw422016.E().S(js.job)
//line lib/promscrape/targets_response.qtpl:60
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:60
qw422016.N().S(`(`)
//line lib/promscrape/targets_response.qtpl:60
qw422016.N().D(js.upCount)
//line lib/promscrape/targets_response.qtpl:60
qw422016.N().S(`/`)
//line lib/promscrape/targets_response.qtpl:60
qw422016.N().D(js.targetsTotal)
//line lib/promscrape/targets_response.qtpl:60
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:60
qw422016.N().S(`up)<button type="button" class="btn btn-primary" onclick="document.getElementById('table-`)
//line lib/promscrape/targets_response.qtpl:61
qw422016.N().D(i)
//line lib/promscrape/targets_response.qtpl:61
qw422016.N().S(`').style.display='none'">collapse</button><button type="button" class="btn btn-secondary" onclick="document.getElementById('table-`)
//line lib/promscrape/targets_response.qtpl:62
qw422016.N().D(i)
//line lib/promscrape/targets_response.qtpl:62
qw422016.N().S(`').style.display='block'">expand</button></h4><div id="table-`)
//line lib/promscrape/targets_response.qtpl:64
qw422016.N().D(i)
//line lib/promscrape/targets_response.qtpl:64
qw422016.N().S(`"><table class="table table-striped table-hover table-bordered table-sm"><thead><tr><th scope="col">Endpoint</th><th scope="col">State</th><th scope="col" title="scrape target labels">Labels</th><th scope="col" title="total scrapes">Scrapes</th><th scope="col" title="total scrape errors">Errors</th><th scope="col" title="the time of the last scrape">Last Scrape</th><th scope="col" title="the duration of the last scrape">Duration</th><th scope="col" title="the number of metrics scraped during the last scrape">Samples</th><th scope="col" title="error from the last scrape (if any)">Last error</th></tr></thead><tbody>`)
//line lib/promscrape/targets_response.qtpl:80
for _, ts := range js.targetsStatus {
//line lib/promscrape/targets_response.qtpl:82
endpoint := ts.sw.Config.ScrapeURL
targetID := getTargetID(ts.sw)
lastScrapeTime := ts.getDurationFromLastScrape()
//line lib/promscrape/targets_response.qtpl:86
if onlyUnhealthy && ts.up {
//line lib/promscrape/targets_response.qtpl:86
continue
//line lib/promscrape/targets_response.qtpl:86
}
//line lib/promscrape/targets_response.qtpl:86
qw422016.N().S(`<tr`)
//line lib/promscrape/targets_response.qtpl:87
if !ts.up {
//line lib/promscrape/targets_response.qtpl:87
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:87
qw422016.N().S(`class="alert alert-danger" role="alert"`)
//line lib/promscrape/targets_response.qtpl:87
}
//line lib/promscrape/targets_response.qtpl:87
qw422016.N().S(`><td><a href="`)
//line lib/promscrape/targets_response.qtpl:88
qw422016.E().S(endpoint)
//line lib/promscrape/targets_response.qtpl:88
qw422016.N().S(`" target="_blank">`)
//line lib/promscrape/targets_response.qtpl:88
qw422016.E().S(endpoint)
//line lib/promscrape/targets_response.qtpl:88
qw422016.N().S(`</a> (<a href="target_response?id=`)
//line lib/promscrape/targets_response.qtpl:89
qw422016.E().S(targetID)
//line lib/promscrape/targets_response.qtpl:89
qw422016.N().S(`" target="_blank" title="click to fetch target response on behalf of the scraper">response</a>)</td><td>`)
//line lib/promscrape/targets_response.qtpl:91
if ts.up {
//line lib/promscrape/targets_response.qtpl:91
qw422016.N().S(`UP`)
//line lib/promscrape/targets_response.qtpl:91
} else {
//line lib/promscrape/targets_response.qtpl:91
qw422016.N().S(`DOWN`)
//line lib/promscrape/targets_response.qtpl:91
}
//line lib/promscrape/targets_response.qtpl:91
qw422016.N().S(`</td><td><div title="click to show original labels" onclick="document.getElementById('original_labels_`)
//line lib/promscrape/targets_response.qtpl:93
qw422016.E().S(targetID)
//line lib/promscrape/targets_response.qtpl:93
qw422016.N().S(`').style.display='block'">`)
//line lib/promscrape/targets_response.qtpl:94
streamformatLabel(qw422016, promrelabel.FinalizeLabels(nil, ts.sw.Config.Labels))
//line lib/promscrape/targets_response.qtpl:94
qw422016.N().S(`</div><div style="display:none" id="original_labels_`)
//line lib/promscrape/targets_response.qtpl:96
qw422016.E().S(targetID)
//line lib/promscrape/targets_response.qtpl:96
qw422016.N().S(`">`)
//line lib/promscrape/targets_response.qtpl:97
streamformatLabel(qw422016, ts.sw.Config.OriginalLabels)
//line lib/promscrape/targets_response.qtpl:97
qw422016.N().S(`</div></td><td>`)
//line lib/promscrape/targets_response.qtpl:100
qw422016.N().D(ts.scrapesTotal)
//line lib/promscrape/targets_response.qtpl:100
qw422016.N().S(`</td><td>`)
//line lib/promscrape/targets_response.qtpl:101
qw422016.N().D(ts.scrapesFailed)
//line lib/promscrape/targets_response.qtpl:101
qw422016.N().S(`</td><td>`)
//line lib/promscrape/targets_response.qtpl:103
if lastScrapeTime < 365*24*time.Hour {
//line lib/promscrape/targets_response.qtpl:104
qw422016.N().FPrec(lastScrapeTime.Seconds(), 3)
//line lib/promscrape/targets_response.qtpl:104
qw422016.N().S(`s ago`)
//line lib/promscrape/targets_response.qtpl:105
} else {
//line lib/promscrape/targets_response.qtpl:105
qw422016.N().S(`none`)
//line lib/promscrape/targets_response.qtpl:107
}
//line lib/promscrape/targets_response.qtpl:107
qw422016.N().S(`<td>`)
//line lib/promscrape/targets_response.qtpl:108
qw422016.N().D(int(ts.scrapeDuration))
//line lib/promscrape/targets_response.qtpl:108
qw422016.N().S(`ms</td><td>`)
//line lib/promscrape/targets_response.qtpl:109
qw422016.N().D(ts.samplesScraped)
//line lib/promscrape/targets_response.qtpl:109
qw422016.N().S(`</td><td>`)
//line lib/promscrape/targets_response.qtpl:110
if ts.err != nil {
//line lib/promscrape/targets_response.qtpl:110
qw422016.E().S(ts.err.Error())
//line lib/promscrape/targets_response.qtpl:110
}
//line lib/promscrape/targets_response.qtpl:110
qw422016.N().S(`</td></tr>`)
//line lib/promscrape/targets_response.qtpl:112
}
//line lib/promscrape/targets_response.qtpl:112
qw422016.N().S(`</tbody></table></div></div>`)
//line lib/promscrape/targets_response.qtpl:117
}
//line lib/promscrape/targets_response.qtpl:119
for _, jobName := range emptyJobs {
//line lib/promscrape/targets_response.qtpl:119
qw422016.N().S(`<div><h4><a>`)
//line lib/promscrape/targets_response.qtpl:122
qw422016.E().S(jobName)
//line lib/promscrape/targets_response.qtpl:122
qw422016.N().S(`(0/0 up)</a></h4><table class="table table-striped table-hover table-bordered table-sm"><thead><tr><th scope="col">Endpoint</th><th scope="col">State</th><th scope="col">Labels</th><th scope="col">Last Scrape</th><th scope="col">Scrape Duration</th><th scope="col">Samples Scraped</th><th scope="col">Error</th></tr></thead></table></div>`)
//line lib/promscrape/targets_response.qtpl:138
}
//line lib/promscrape/targets_response.qtpl:138
qw422016.N().S(`</body></html>`)
//line lib/promscrape/targets_response.qtpl:141
}
//line lib/promscrape/targets_response.qtpl:141
func WriteTargetsResponseHTML(qq422016 qtio422016.Writer, jts []jobTargetsStatuses, emptyJobs []string, onlyUnhealthy bool) {
//line lib/promscrape/targets_response.qtpl:141
qw422016 := qt422016.AcquireWriter(qq422016)
//line lib/promscrape/targets_response.qtpl:141
StreamTargetsResponseHTML(qw422016, jts, emptyJobs, onlyUnhealthy)
//line lib/promscrape/targets_response.qtpl:141
qt422016.ReleaseWriter(qw422016)
//line lib/promscrape/targets_response.qtpl:141
}
//line lib/promscrape/targets_response.qtpl:141
func TargetsResponseHTML(jts []jobTargetsStatuses, emptyJobs []string, onlyUnhealthy bool) string {
//line lib/promscrape/targets_response.qtpl:141
qb422016 := qt422016.AcquireByteBuffer()
//line lib/promscrape/targets_response.qtpl:141
WriteTargetsResponseHTML(qb422016, jts, emptyJobs, onlyUnhealthy)
//line lib/promscrape/targets_response.qtpl:141
qs422016 := string(qb422016.B)
//line lib/promscrape/targets_response.qtpl:141
qt422016.ReleaseByteBuffer(qb422016)
//line lib/promscrape/targets_response.qtpl:141
return qs422016
//line lib/promscrape/targets_response.qtpl:141
}
//line lib/promscrape/targets_response.qtpl:143
func streamformatLabel(qw422016 *qt422016.Writer, labels []prompbmarshal.Label) {
//line lib/promscrape/targets_response.qtpl:143
qw422016.N().S(`{`)
//line lib/promscrape/targets_response.qtpl:145
for i, label := range labels {
//line lib/promscrape/targets_response.qtpl:146
qw422016.E().S(label.Name)
//line lib/promscrape/targets_response.qtpl:146
qw422016.N().S(`=`)
//line lib/promscrape/targets_response.qtpl:146
qw422016.E().Q(label.Value)
//line lib/promscrape/targets_response.qtpl:147
if i+1 < len(labels) {
//line lib/promscrape/targets_response.qtpl:147
qw422016.N().S(`,`)
//line lib/promscrape/targets_response.qtpl:147
qw422016.N().S(` `)
//line lib/promscrape/targets_response.qtpl:147
}
//line lib/promscrape/targets_response.qtpl:148
}
//line lib/promscrape/targets_response.qtpl:148
qw422016.N().S(`}`)
//line lib/promscrape/targets_response.qtpl:150
}
//line lib/promscrape/targets_response.qtpl:150
func writeformatLabel(qq422016 qtio422016.Writer, labels []prompbmarshal.Label) {
//line lib/promscrape/targets_response.qtpl:150
qw422016 := qt422016.AcquireWriter(qq422016)
//line lib/promscrape/targets_response.qtpl:150
streamformatLabel(qw422016, labels)
//line lib/promscrape/targets_response.qtpl:150
qt422016.ReleaseWriter(qw422016)
//line lib/promscrape/targets_response.qtpl:150
}
//line lib/promscrape/targets_response.qtpl:150
func formatLabel(labels []prompbmarshal.Label) string {
//line lib/promscrape/targets_response.qtpl:150
qb422016 := qt422016.AcquireByteBuffer()
//line lib/promscrape/targets_response.qtpl:150
writeformatLabel(qb422016, labels)
//line lib/promscrape/targets_response.qtpl:150
qs422016 := string(qb422016.B)
//line lib/promscrape/targets_response.qtpl:150
qt422016.ReleaseByteBuffer(qb422016)
//line lib/promscrape/targets_response.qtpl:150
return qs422016
//line lib/promscrape/targets_response.qtpl:150
}

View file

@ -42,16 +42,36 @@ job={%q= jobName %} (0/0 up)
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous"> <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous">
<title>Scrape targets</title> <title>Scrape targets</title>
<script>
function collapse_all() {
for (var i = 0; i < {%d len(jts) %}; i++) {
var id = "table-" + i;
document.getElementById(id).style.display = 'none';
}
}
function expand_all() {
for (var i = 0; i < {%d len(jts) %}; i++) {
var id = "table-" + i;
document.getElementById(id).style.display = 'block';
}
}
</script>
</head> </head>
<body class="m-3"> <body class="m-3">
<h1>Scrape targets</h1> <h1>Scrape targets</h1>
<div> <div style="padding: 3px">
<button type="button" class="btn{% space %}{% if !onlyUnhealthy %}btn-primary{% else %}btn-secondary{% endif %}" onclick="location.href='targets'"> <button type="button" class="btn{% space %}{% if !onlyUnhealthy %}btn-primary{% else %}btn-secondary{% endif %}" onclick="location.href='targets'">
All All
</button> </button>
<button type="button" class="btn{% space %}{% if onlyUnhealthy %}btn-primary{% else %}btn-secondary{% endif %}" onclick="location.href='targets?show_only_unhealthy=true'"> <button type="button" class="btn{% space %}{% if onlyUnhealthy %}btn-primary{% else %}btn-secondary{% endif %}" onclick="location.href='targets?show_only_unhealthy=true'">
Unhealthy Unhealthy
</button> </button>
<button type="button" class="btn btn-primary" onclick="collapse_all()">
Collapse all
</button>
<button type="button" class="btn btn-secondary" onclick="expand_all()">
Expand all
</button>
</div> </div>
{% for i, js := range jts %} {% for i, js := range jts %}
{% if onlyUnhealthy && js.upCount == js.targetsTotal %}{% continue %}{% endif %} {% if onlyUnhealthy && js.upCount == js.targetsTotal %}{% continue %}{% endif %}

View file

@ -0,0 +1,474 @@
// Code generated by qtc from "targetstatus.qtpl". DO NOT EDIT.
// See https://github.com/valyala/quicktemplate for details.
//line lib/promscrape/targetstatus.qtpl:1
package promscrape
//line lib/promscrape/targetstatus.qtpl:1
import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"time"
)
//line lib/promscrape/targetstatus.qtpl:9
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line lib/promscrape/targetstatus.qtpl:9
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line lib/promscrape/targetstatus.qtpl:9
func StreamTargetsResponsePlain(qw422016 *qt422016.Writer, jts []jobTargetsStatuses, emptyJobs []string, showOriginLabels bool) {
//line lib/promscrape/targetstatus.qtpl:11
for _, js := range jts {
//line lib/promscrape/targetstatus.qtpl:11
qw422016.N().S(`job=`)
//line lib/promscrape/targetstatus.qtpl:12
qw422016.N().Q(js.job)
//line lib/promscrape/targetstatus.qtpl:12
qw422016.N().S(`(`)
//line lib/promscrape/targetstatus.qtpl:12
qw422016.N().D(js.upCount)
//line lib/promscrape/targetstatus.qtpl:12
qw422016.N().S(`/`)
//line lib/promscrape/targetstatus.qtpl:12
qw422016.N().D(js.targetsTotal)
//line lib/promscrape/targetstatus.qtpl:12
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:12
qw422016.N().S(`up)`)
//line lib/promscrape/targetstatus.qtpl:13
qw422016.N().S(`
`)
//line lib/promscrape/targetstatus.qtpl:14
for _, ts := range js.targetsStatus {
//line lib/promscrape/targetstatus.qtpl:15
qw422016.N().S("\t")
//line lib/promscrape/targetstatus.qtpl:15
qw422016.N().S(`state=`)
//line lib/promscrape/targetstatus.qtpl:16
if ts.up {
//line lib/promscrape/targetstatus.qtpl:16
qw422016.N().S(`up`)
//line lib/promscrape/targetstatus.qtpl:16
} else {
//line lib/promscrape/targetstatus.qtpl:16
qw422016.N().S(`down`)
//line lib/promscrape/targetstatus.qtpl:16
}
//line lib/promscrape/targetstatus.qtpl:16
qw422016.N().S(`,`)
//line lib/promscrape/targetstatus.qtpl:16
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:16
qw422016.N().S(`endpoint=`)
//line lib/promscrape/targetstatus.qtpl:17
qw422016.N().S(ts.sw.Config.ScrapeURL)
//line lib/promscrape/targetstatus.qtpl:17
qw422016.N().S(`,`)
//line lib/promscrape/targetstatus.qtpl:17
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:17
qw422016.N().S(`labels=`)
//line lib/promscrape/targetstatus.qtpl:18
qw422016.N().S(promLabelsString(promrelabel.FinalizeLabels(nil, ts.sw.Config.Labels)))
//line lib/promscrape/targetstatus.qtpl:18
qw422016.N().S(`,`)
//line lib/promscrape/targetstatus.qtpl:18
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:19
if showOriginLabels {
//line lib/promscrape/targetstatus.qtpl:19
qw422016.N().S(`originalLabels=`)
//line lib/promscrape/targetstatus.qtpl:19
qw422016.N().S(promLabelsString(ts.sw.Config.OriginalLabels))
//line lib/promscrape/targetstatus.qtpl:19
qw422016.N().S(`,`)
//line lib/promscrape/targetstatus.qtpl:19
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:19
}
//line lib/promscrape/targetstatus.qtpl:19
qw422016.N().S(`scrapes_total=`)
//line lib/promscrape/targetstatus.qtpl:20
qw422016.N().D(ts.scrapesTotal)
//line lib/promscrape/targetstatus.qtpl:20
qw422016.N().S(`,`)
//line lib/promscrape/targetstatus.qtpl:20
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:20
qw422016.N().S(`scrapes_failed=`)
//line lib/promscrape/targetstatus.qtpl:21
qw422016.N().D(ts.scrapesFailed)
//line lib/promscrape/targetstatus.qtpl:21
qw422016.N().S(`,`)
//line lib/promscrape/targetstatus.qtpl:21
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:21
qw422016.N().S(`last_scrape=`)
//line lib/promscrape/targetstatus.qtpl:22
qw422016.N().FPrec(ts.getDurationFromLastScrape().Seconds(), 3)
//line lib/promscrape/targetstatus.qtpl:22
qw422016.N().S(`s ago,`)
//line lib/promscrape/targetstatus.qtpl:22
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:22
qw422016.N().S(`scrape_duration=`)
//line lib/promscrape/targetstatus.qtpl:23
qw422016.N().D(int(ts.scrapeDuration))
//line lib/promscrape/targetstatus.qtpl:23
qw422016.N().S(`ms,`)
//line lib/promscrape/targetstatus.qtpl:23
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:23
qw422016.N().S(`samples_scraped=`)
//line lib/promscrape/targetstatus.qtpl:24
qw422016.N().D(ts.samplesScraped)
//line lib/promscrape/targetstatus.qtpl:24
qw422016.N().S(`,`)
//line lib/promscrape/targetstatus.qtpl:24
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:24
qw422016.N().S(`error=`)
//line lib/promscrape/targetstatus.qtpl:25
if ts.err != nil {
//line lib/promscrape/targetstatus.qtpl:25
qw422016.N().S(ts.err.Error())
//line lib/promscrape/targetstatus.qtpl:25
}
//line lib/promscrape/targetstatus.qtpl:26
qw422016.N().S(`
`)
//line lib/promscrape/targetstatus.qtpl:27
}
//line lib/promscrape/targetstatus.qtpl:28
}
//line lib/promscrape/targetstatus.qtpl:30
for _, jobName := range emptyJobs {
//line lib/promscrape/targetstatus.qtpl:30
qw422016.N().S(`job=`)
//line lib/promscrape/targetstatus.qtpl:31
qw422016.N().Q(jobName)
//line lib/promscrape/targetstatus.qtpl:31
qw422016.N().S(`(0/0 up)`)
//line lib/promscrape/targetstatus.qtpl:32
qw422016.N().S(`
`)
//line lib/promscrape/targetstatus.qtpl:33
}
//line lib/promscrape/targetstatus.qtpl:35
}
//line lib/promscrape/targetstatus.qtpl:35
func WriteTargetsResponsePlain(qq422016 qtio422016.Writer, jts []jobTargetsStatuses, emptyJobs []string, showOriginLabels bool) {
//line lib/promscrape/targetstatus.qtpl:35
qw422016 := qt422016.AcquireWriter(qq422016)
//line lib/promscrape/targetstatus.qtpl:35
StreamTargetsResponsePlain(qw422016, jts, emptyJobs, showOriginLabels)
//line lib/promscrape/targetstatus.qtpl:35
qt422016.ReleaseWriter(qw422016)
//line lib/promscrape/targetstatus.qtpl:35
}
//line lib/promscrape/targetstatus.qtpl:35
func TargetsResponsePlain(jts []jobTargetsStatuses, emptyJobs []string, showOriginLabels bool) string {
//line lib/promscrape/targetstatus.qtpl:35
qb422016 := qt422016.AcquireByteBuffer()
//line lib/promscrape/targetstatus.qtpl:35
WriteTargetsResponsePlain(qb422016, jts, emptyJobs, showOriginLabels)
//line lib/promscrape/targetstatus.qtpl:35
qs422016 := string(qb422016.B)
//line lib/promscrape/targetstatus.qtpl:35
qt422016.ReleaseByteBuffer(qb422016)
//line lib/promscrape/targetstatus.qtpl:35
return qs422016
//line lib/promscrape/targetstatus.qtpl:35
}
//line lib/promscrape/targetstatus.qtpl:37
func StreamTargetsResponseHTML(qw422016 *qt422016.Writer, jts []jobTargetsStatuses, emptyJobs []string, onlyUnhealthy bool) {
//line lib/promscrape/targetstatus.qtpl:37
qw422016.N().S(`<!DOCTYPE html><html lang="en"><head><meta charset="utf-8"><meta name="viewport" content="width=device-width, initial-scale=1"><link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous"><title>Scrape targets</title><script>function collapse_all() {for (var i = 0; i <`)
//line lib/promscrape/targetstatus.qtpl:47
qw422016.N().D(len(jts))
//line lib/promscrape/targetstatus.qtpl:47
qw422016.N().S(`; i++) {var id = "table-" + i;document.getElementById(id).style.display = 'none';}}function expand_all() {for (var i = 0; i <`)
//line lib/promscrape/targetstatus.qtpl:53
qw422016.N().D(len(jts))
//line lib/promscrape/targetstatus.qtpl:53
qw422016.N().S(`; i++) {var id = "table-" + i;document.getElementById(id).style.display = 'block';}}</script></head><body class="m-3"><h1>Scrape targets</h1><div style="padding: 3px"><button type="button" class="btn`)
//line lib/promscrape/targetstatus.qtpl:63
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:63
if !onlyUnhealthy {
//line lib/promscrape/targetstatus.qtpl:63
qw422016.N().S(`btn-primary`)
//line lib/promscrape/targetstatus.qtpl:63
} else {
//line lib/promscrape/targetstatus.qtpl:63
qw422016.N().S(`btn-secondary`)
//line lib/promscrape/targetstatus.qtpl:63
}
//line lib/promscrape/targetstatus.qtpl:63
qw422016.N().S(`" onclick="location.href='targets'">All</button><button type="button" class="btn`)
//line lib/promscrape/targetstatus.qtpl:66
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:66
if onlyUnhealthy {
//line lib/promscrape/targetstatus.qtpl:66
qw422016.N().S(`btn-primary`)
//line lib/promscrape/targetstatus.qtpl:66
} else {
//line lib/promscrape/targetstatus.qtpl:66
qw422016.N().S(`btn-secondary`)
//line lib/promscrape/targetstatus.qtpl:66
}
//line lib/promscrape/targetstatus.qtpl:66
qw422016.N().S(`" onclick="location.href='targets?show_only_unhealthy=true'">Unhealthy</button><button type="button" class="btn btn-primary" onclick="collapse_all()">Collapse all</button><button type="button" class="btn btn-secondary" onclick="expand_all()">Expand all</button></div>`)
//line lib/promscrape/targetstatus.qtpl:76
for i, js := range jts {
//line lib/promscrape/targetstatus.qtpl:77
if onlyUnhealthy && js.upCount == js.targetsTotal {
//line lib/promscrape/targetstatus.qtpl:77
continue
//line lib/promscrape/targetstatus.qtpl:77
}
//line lib/promscrape/targetstatus.qtpl:77
qw422016.N().S(`<div><h4>`)
//line lib/promscrape/targetstatus.qtpl:80
qw422016.E().S(js.job)
//line lib/promscrape/targetstatus.qtpl:80
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:80
qw422016.N().S(`(`)
//line lib/promscrape/targetstatus.qtpl:80
qw422016.N().D(js.upCount)
//line lib/promscrape/targetstatus.qtpl:80
qw422016.N().S(`/`)
//line lib/promscrape/targetstatus.qtpl:80
qw422016.N().D(js.targetsTotal)
//line lib/promscrape/targetstatus.qtpl:80
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:80
qw422016.N().S(`up)<button type="button" class="btn btn-primary" onclick="document.getElementById('table-`)
//line lib/promscrape/targetstatus.qtpl:81
qw422016.N().D(i)
//line lib/promscrape/targetstatus.qtpl:81
qw422016.N().S(`').style.display='none'">collapse</button><button type="button" class="btn btn-secondary" onclick="document.getElementById('table-`)
//line lib/promscrape/targetstatus.qtpl:82
qw422016.N().D(i)
//line lib/promscrape/targetstatus.qtpl:82
qw422016.N().S(`').style.display='block'">expand</button></h4><div id="table-`)
//line lib/promscrape/targetstatus.qtpl:84
qw422016.N().D(i)
//line lib/promscrape/targetstatus.qtpl:84
qw422016.N().S(`"><table class="table table-striped table-hover table-bordered table-sm"><thead><tr><th scope="col">Endpoint</th><th scope="col">State</th><th scope="col" title="scrape target labels">Labels</th><th scope="col" title="total scrapes">Scrapes</th><th scope="col" title="total scrape errors">Errors</th><th scope="col" title="the time of the last scrape">Last Scrape</th><th scope="col" title="the duration of the last scrape">Duration</th><th scope="col" title="the number of metrics scraped during the last scrape">Samples</th><th scope="col" title="error from the last scrape (if any)">Last error</th></tr></thead><tbody>`)
//line lib/promscrape/targetstatus.qtpl:100
for _, ts := range js.targetsStatus {
//line lib/promscrape/targetstatus.qtpl:102
endpoint := ts.sw.Config.ScrapeURL
targetID := getTargetID(ts.sw)
lastScrapeTime := ts.getDurationFromLastScrape()
//line lib/promscrape/targetstatus.qtpl:106
if onlyUnhealthy && ts.up {
//line lib/promscrape/targetstatus.qtpl:106
continue
//line lib/promscrape/targetstatus.qtpl:106
}
//line lib/promscrape/targetstatus.qtpl:106
qw422016.N().S(`<tr`)
//line lib/promscrape/targetstatus.qtpl:107
if !ts.up {
//line lib/promscrape/targetstatus.qtpl:107
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:107
qw422016.N().S(`class="alert alert-danger" role="alert"`)
//line lib/promscrape/targetstatus.qtpl:107
}
//line lib/promscrape/targetstatus.qtpl:107
qw422016.N().S(`><td><a href="`)
//line lib/promscrape/targetstatus.qtpl:108
qw422016.E().S(endpoint)
//line lib/promscrape/targetstatus.qtpl:108
qw422016.N().S(`" target="_blank">`)
//line lib/promscrape/targetstatus.qtpl:108
qw422016.E().S(endpoint)
//line lib/promscrape/targetstatus.qtpl:108
qw422016.N().S(`</a> (<a href="target_response?id=`)
//line lib/promscrape/targetstatus.qtpl:109
qw422016.E().S(targetID)
//line lib/promscrape/targetstatus.qtpl:109
qw422016.N().S(`" target="_blank" title="click to fetch target response on behalf of the scraper">response</a>)</td><td>`)
//line lib/promscrape/targetstatus.qtpl:111
if ts.up {
//line lib/promscrape/targetstatus.qtpl:111
qw422016.N().S(`UP`)
//line lib/promscrape/targetstatus.qtpl:111
} else {
//line lib/promscrape/targetstatus.qtpl:111
qw422016.N().S(`DOWN`)
//line lib/promscrape/targetstatus.qtpl:111
}
//line lib/promscrape/targetstatus.qtpl:111
qw422016.N().S(`</td><td><div title="click to show original labels" onclick="document.getElementById('original_labels_`)
//line lib/promscrape/targetstatus.qtpl:113
qw422016.E().S(targetID)
//line lib/promscrape/targetstatus.qtpl:113
qw422016.N().S(`').style.display='block'">`)
//line lib/promscrape/targetstatus.qtpl:114
streamformatLabel(qw422016, promrelabel.FinalizeLabels(nil, ts.sw.Config.Labels))
//line lib/promscrape/targetstatus.qtpl:114
qw422016.N().S(`</div><div style="display:none" id="original_labels_`)
//line lib/promscrape/targetstatus.qtpl:116
qw422016.E().S(targetID)
//line lib/promscrape/targetstatus.qtpl:116
qw422016.N().S(`">`)
//line lib/promscrape/targetstatus.qtpl:117
streamformatLabel(qw422016, ts.sw.Config.OriginalLabels)
//line lib/promscrape/targetstatus.qtpl:117
qw422016.N().S(`</div></td><td>`)
//line lib/promscrape/targetstatus.qtpl:120
qw422016.N().D(ts.scrapesTotal)
//line lib/promscrape/targetstatus.qtpl:120
qw422016.N().S(`</td><td>`)
//line lib/promscrape/targetstatus.qtpl:121
qw422016.N().D(ts.scrapesFailed)
//line lib/promscrape/targetstatus.qtpl:121
qw422016.N().S(`</td><td>`)
//line lib/promscrape/targetstatus.qtpl:123
if lastScrapeTime < 365*24*time.Hour {
//line lib/promscrape/targetstatus.qtpl:124
qw422016.N().FPrec(lastScrapeTime.Seconds(), 3)
//line lib/promscrape/targetstatus.qtpl:124
qw422016.N().S(`s ago`)
//line lib/promscrape/targetstatus.qtpl:125
} else {
//line lib/promscrape/targetstatus.qtpl:125
qw422016.N().S(`none`)
//line lib/promscrape/targetstatus.qtpl:127
}
//line lib/promscrape/targetstatus.qtpl:127
qw422016.N().S(`<td>`)
//line lib/promscrape/targetstatus.qtpl:128
qw422016.N().D(int(ts.scrapeDuration))
//line lib/promscrape/targetstatus.qtpl:128
qw422016.N().S(`ms</td><td>`)
//line lib/promscrape/targetstatus.qtpl:129
qw422016.N().D(ts.samplesScraped)
//line lib/promscrape/targetstatus.qtpl:129
qw422016.N().S(`</td><td>`)
//line lib/promscrape/targetstatus.qtpl:130
if ts.err != nil {
//line lib/promscrape/targetstatus.qtpl:130
qw422016.E().S(ts.err.Error())
//line lib/promscrape/targetstatus.qtpl:130
}
//line lib/promscrape/targetstatus.qtpl:130
qw422016.N().S(`</td></tr>`)
//line lib/promscrape/targetstatus.qtpl:132
}
//line lib/promscrape/targetstatus.qtpl:132
qw422016.N().S(`</tbody></table></div></div>`)
//line lib/promscrape/targetstatus.qtpl:137
}
//line lib/promscrape/targetstatus.qtpl:139
for _, jobName := range emptyJobs {
//line lib/promscrape/targetstatus.qtpl:139
qw422016.N().S(`<div><h4><a>`)
//line lib/promscrape/targetstatus.qtpl:142
qw422016.E().S(jobName)
//line lib/promscrape/targetstatus.qtpl:142
qw422016.N().S(`(0/0 up)</a></h4><table class="table table-striped table-hover table-bordered table-sm"><thead><tr><th scope="col">Endpoint</th><th scope="col">State</th><th scope="col">Labels</th><th scope="col">Last Scrape</th><th scope="col">Scrape Duration</th><th scope="col">Samples Scraped</th><th scope="col">Error</th></tr></thead></table></div>`)
//line lib/promscrape/targetstatus.qtpl:158
}
//line lib/promscrape/targetstatus.qtpl:158
qw422016.N().S(`</body></html>`)
//line lib/promscrape/targetstatus.qtpl:161
}
//line lib/promscrape/targetstatus.qtpl:161
func WriteTargetsResponseHTML(qq422016 qtio422016.Writer, jts []jobTargetsStatuses, emptyJobs []string, onlyUnhealthy bool) {
//line lib/promscrape/targetstatus.qtpl:161
qw422016 := qt422016.AcquireWriter(qq422016)
//line lib/promscrape/targetstatus.qtpl:161
StreamTargetsResponseHTML(qw422016, jts, emptyJobs, onlyUnhealthy)
//line lib/promscrape/targetstatus.qtpl:161
qt422016.ReleaseWriter(qw422016)
//line lib/promscrape/targetstatus.qtpl:161
}
//line lib/promscrape/targetstatus.qtpl:161
func TargetsResponseHTML(jts []jobTargetsStatuses, emptyJobs []string, onlyUnhealthy bool) string {
//line lib/promscrape/targetstatus.qtpl:161
qb422016 := qt422016.AcquireByteBuffer()
//line lib/promscrape/targetstatus.qtpl:161
WriteTargetsResponseHTML(qb422016, jts, emptyJobs, onlyUnhealthy)
//line lib/promscrape/targetstatus.qtpl:161
qs422016 := string(qb422016.B)
//line lib/promscrape/targetstatus.qtpl:161
qt422016.ReleaseByteBuffer(qb422016)
//line lib/promscrape/targetstatus.qtpl:161
return qs422016
//line lib/promscrape/targetstatus.qtpl:161
}
//line lib/promscrape/targetstatus.qtpl:163
func streamformatLabel(qw422016 *qt422016.Writer, labels []prompbmarshal.Label) {
//line lib/promscrape/targetstatus.qtpl:163
qw422016.N().S(`{`)
//line lib/promscrape/targetstatus.qtpl:165
for i, label := range labels {
//line lib/promscrape/targetstatus.qtpl:166
qw422016.E().S(label.Name)
//line lib/promscrape/targetstatus.qtpl:166
qw422016.N().S(`=`)
//line lib/promscrape/targetstatus.qtpl:166
qw422016.E().Q(label.Value)
//line lib/promscrape/targetstatus.qtpl:167
if i+1 < len(labels) {
//line lib/promscrape/targetstatus.qtpl:167
qw422016.N().S(`,`)
//line lib/promscrape/targetstatus.qtpl:167
qw422016.N().S(` `)
//line lib/promscrape/targetstatus.qtpl:167
}
//line lib/promscrape/targetstatus.qtpl:168
}
//line lib/promscrape/targetstatus.qtpl:168
qw422016.N().S(`}`)
//line lib/promscrape/targetstatus.qtpl:170
}
//line lib/promscrape/targetstatus.qtpl:170
func writeformatLabel(qq422016 qtio422016.Writer, labels []prompbmarshal.Label) {
//line lib/promscrape/targetstatus.qtpl:170
qw422016 := qt422016.AcquireWriter(qq422016)
//line lib/promscrape/targetstatus.qtpl:170
streamformatLabel(qw422016, labels)
//line lib/promscrape/targetstatus.qtpl:170
qt422016.ReleaseWriter(qw422016)
//line lib/promscrape/targetstatus.qtpl:170
}
//line lib/promscrape/targetstatus.qtpl:170
func formatLabel(labels []prompbmarshal.Label) string {
//line lib/promscrape/targetstatus.qtpl:170
qb422016 := qt422016.AcquireByteBuffer()
//line lib/promscrape/targetstatus.qtpl:170
writeformatLabel(qb422016, labels)
//line lib/promscrape/targetstatus.qtpl:170
qs422016 := string(qb422016.B)
//line lib/promscrape/targetstatus.qtpl:170
qt422016.ReleaseByteBuffer(qb422016)
//line lib/promscrape/targetstatus.qtpl:170
return qs422016
//line lib/promscrape/targetstatus.qtpl:170
}

View file

@ -2,5 +2,4 @@ scrape_configs:
- job_name: foo - job_name: foo
file_sd_configs: file_sd_configs:
- files: ["file_sd_*.yml"] - files: ["file_sd_*.yml"]
refresh_interval: 10s - files: ["file_sd.json"]
- fules: ["file_sd.json"]

52
lib/promutils/duration.go Normal file
View file

@ -0,0 +1,52 @@
package promutils
import (
"time"
"github.com/VictoriaMetrics/metricsql"
)
// Duration is duration, which must be used in Prometheus-compatible yaml configs.
type Duration struct {
d time.Duration
}
// NewDuration returns Duration for given d.
func NewDuration(d time.Duration) Duration {
return Duration{
d: d,
}
}
// MarshalYAML implements yaml.Marshaler interface.
func (pd Duration) MarshalYAML() (interface{}, error) {
return pd.d.String(), nil
}
// UnmarshalYAML implements yaml.Unmarshaler interface.
func (pd *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
ms, err := metricsql.DurationValue(s, 0)
if err != nil {
return err
}
pd.d = time.Duration(ms) * time.Millisecond
return nil
}
// Duration returns duration for pd.
func (pd Duration) Duration() time.Duration {
return pd.d
}
// ParseDuration parses duration string in Prometheus format
func ParseDuration(s string) (time.Duration, error) {
ms, err := metricsql.DurationValue(s, 0)
if err != nil {
return 0, err
}
return time.Duration(ms) * time.Millisecond, nil
}

View file

@ -0,0 +1,42 @@
package promutils
import (
"testing"
"time"
)
func TestDuration(t *testing.T) {
if _, err := ParseDuration("foobar"); err == nil {
t.Fatalf("expecting error for invalid duration")
}
dNative, err := ParseDuration("1w")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if dNative != 7*24*time.Hour {
t.Fatalf("unexpected duration; got %s; want %s", dNative, 7*24*time.Hour)
}
d := NewDuration(dNative)
if d.Duration() != dNative {
t.Fatalf("unexpected duration; got %s; want %s", d.Duration(), dNative)
}
v, err := d.MarshalYAML()
if err != nil {
t.Fatalf("unexpected error in MarshalYAML(): %s", err)
}
sExpected := "168h0m0s"
if s := v.(string); s != sExpected {
t.Fatalf("unexpected value from MarshalYAML(); got %q; want %q", s, sExpected)
}
if err := d.UnmarshalYAML(func(v interface{}) error {
sp := v.(*string)
s := "1w3d5h"
*sp = s
return nil
}); err != nil {
t.Fatalf("unexpected error in UnmarshalYAML(): %s", err)
}
if dNative := d.Duration(); dNative != (10*24+5)*time.Hour {
t.Fatalf("unexpected value; got %s; want %s", dNative, (10*24+5)*time.Hour)
}
}

View file

@ -8,6 +8,7 @@ import (
"io" "io"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -59,6 +60,9 @@ type indexDB struct {
// The counter for newly created time series. It can be used for determining time series churn rate. // The counter for newly created time series. It can be used for determining time series churn rate.
newTimeseriesCreated uint64 newTimeseriesCreated uint64
// The counter for time series which were re-populated from previous indexDB after the rotation.
timeseriesRepopulated uint64
// The number of missing MetricID -> TSID entries. // The number of missing MetricID -> TSID entries.
// High rate for this value means corrupted indexDB. // High rate for this value means corrupted indexDB.
missingTSIDsForMetricID uint64 missingTSIDsForMetricID uint64
@ -79,6 +83,13 @@ type indexDB struct {
mustDrop uint64 mustDrop uint64
// generation identifies the index generation ID
// and is used for syncing items from different indexDBs
generation uint64
// The unix timestamp in seconds for the indexDB rotation.
rotationTimestamp uint64
name string name string
tb *mergeset.Table tb *mergeset.Table
@ -98,26 +109,38 @@ type indexDB struct {
indexSearchPool sync.Pool indexSearchPool sync.Pool
} }
// openIndexDB opens index db from the given path with the given caches. // openIndexDB opens index db from the given path.
func openIndexDB(path string, s *Storage) (*indexDB, error) { //
// The last segment of the path should contain unique hex value which
// will be then used as indexDB.generation
//
// The rotationTimestamp must be set to the current unix timestamp when ipenIndexDB
// is called when creating new indexdb during indexdb rotation.
func openIndexDB(path string, s *Storage, rotationTimestamp uint64) (*indexDB, error) {
if s == nil { if s == nil {
logger.Panicf("BUG: Storage must be nin-nil") logger.Panicf("BUG: Storage must be nin-nil")
} }
name := filepath.Base(path)
gen, err := strconv.ParseUint(name, 16, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse indexdb path %q: %w", path, err)
}
tb, err := mergeset.OpenTable(path, invalidateTagFiltersCache, mergeTagToMetricIDsRows) tb, err := mergeset.OpenTable(path, invalidateTagFiltersCache, mergeTagToMetricIDsRows)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot open indexDB %q: %w", path, err) return nil, fmt.Errorf("cannot open indexDB %q: %w", path, err)
} }
name := filepath.Base(path)
// Do not persist tagFiltersCache in files, since it is very volatile. // Do not persist tagFiltersCache in files, since it is very volatile.
mem := memory.Allowed() mem := memory.Allowed()
db := &indexDB{ db := &indexDB{
refCount: 1, refCount: 1,
tb: tb, generation: gen,
name: name, rotationTimestamp: rotationTimestamp,
tb: tb,
name: name,
tagFiltersCache: workingsetcache.New(mem/32, time.Hour), tagFiltersCache: workingsetcache.New(mem/32, time.Hour),
s: s, s: s,
@ -141,6 +164,7 @@ type IndexDBMetrics struct {
IndexDBRefCount uint64 IndexDBRefCount uint64
NewTimeseriesCreated uint64 NewTimeseriesCreated uint64
TimeseriesRepopulated uint64
MissingTSIDsForMetricID uint64 MissingTSIDsForMetricID uint64
RecentHourMetricIDsSearchCalls uint64 RecentHourMetricIDsSearchCalls uint64
@ -182,6 +206,7 @@ func (db *indexDB) UpdateMetrics(m *IndexDBMetrics) {
m.IndexDBRefCount += atomic.LoadUint64(&db.refCount) m.IndexDBRefCount += atomic.LoadUint64(&db.refCount)
m.NewTimeseriesCreated += atomic.LoadUint64(&db.newTimeseriesCreated) m.NewTimeseriesCreated += atomic.LoadUint64(&db.newTimeseriesCreated)
m.TimeseriesRepopulated += atomic.LoadUint64(&db.timeseriesRepopulated)
m.MissingTSIDsForMetricID += atomic.LoadUint64(&db.missingTSIDsForMetricID) m.MissingTSIDsForMetricID += atomic.LoadUint64(&db.missingTSIDsForMetricID)
m.DateRangeSearchCalls += atomic.LoadUint64(&db.dateRangeSearchCalls) m.DateRangeSearchCalls += atomic.LoadUint64(&db.dateRangeSearchCalls)
@ -339,6 +364,34 @@ func (db *indexDB) putMetricNameToCache(metricID uint64, metricName []byte) {
db.s.metricNameCache.Set(key[:], metricName) db.s.metricNameCache.Set(key[:], metricName)
} }
// maybeCreateIndexes probabilistically creates indexes for the given (tsid, metricNameRaw) at db.
//
// The probability increases from 0 to 100% during the first hour since db rotation.
//
// It returns true if new index entry was created, and false if it was skipped.
func (db *indexDB) maybeCreateIndexes(tsid *TSID, metricNameRaw []byte) (bool, error) {
pMin := float64(fasttime.UnixTimestamp()-db.rotationTimestamp) / 3600
if pMin < 1 {
p := float64(uint32(fastHashUint64(tsid.MetricID))) / (1 << 32)
if p > pMin {
// Fast path: there is no need creating indexes for metricNameRaw yet.
return false, nil
}
}
// Slow path: create indexes for (tsid, metricNameRaw) at db.
mn := GetMetricName()
if err := mn.UnmarshalRaw(metricNameRaw); err != nil {
return false, fmt.Errorf("cannot unmarshal metricNameRaw %q: %w", metricNameRaw, err)
}
mn.sortTags()
if err := db.createIndexes(tsid, mn); err != nil {
return false, err
}
PutMetricName(mn)
atomic.AddUint64(&db.timeseriesRepopulated, 1)
return true, nil
}
func marshalTagFiltersKey(dst []byte, tfss []*TagFilters, tr TimeRange, versioned bool) []byte { func marshalTagFiltersKey(dst []byte, tfss []*TagFilters, tr TimeRange, versioned bool) []byte {
prefix := ^uint64(0) prefix := ^uint64(0)
if versioned { if versioned {
@ -499,7 +552,8 @@ func (db *indexDB) createTSIDByName(dst *TSID, metricName []byte) error {
return fmt.Errorf("cannot unmarshal metricName %q: %w", metricName, err) return fmt.Errorf("cannot unmarshal metricName %q: %w", metricName, err)
} }
if err := db.generateTSID(dst, metricName, mn); err != nil { created, err := db.getOrCreateTSID(dst, metricName, mn)
if err != nil {
return fmt.Errorf("cannot generate TSID: %w", err) return fmt.Errorf("cannot generate TSID: %w", err)
} }
if err := db.createIndexes(dst, mn); err != nil { if err := db.createIndexes(dst, mn); err != nil {
@ -508,9 +562,13 @@ func (db *indexDB) createTSIDByName(dst *TSID, metricName []byte) error {
// There is no need in invalidating tag cache, since it is invalidated // There is no need in invalidating tag cache, since it is invalidated
// on db.tb flush via invalidateTagFiltersCache flushCallback passed to OpenTable. // on db.tb flush via invalidateTagFiltersCache flushCallback passed to OpenTable.
atomic.AddUint64(&db.newTimeseriesCreated, 1)
if logNewSeries { if created {
logger.Infof("new series created: %s", mn.String()) // Increase the newTimeseriesCreated counter only if tsid wasn't found in indexDB
atomic.AddUint64(&db.newTimeseriesCreated, 1)
if logNewSeries {
logger.Infof("new series created: %s", mn.String())
}
} }
return nil return nil
} }
@ -524,7 +582,10 @@ func SetLogNewSeries(ok bool) {
var logNewSeries = false var logNewSeries = false
func (db *indexDB) generateTSID(dst *TSID, metricName []byte, mn *MetricName) error { // getOrCreateTSID looks for existing TSID for the given metricName in db.extDB or creates a new TSID if nothing was found.
//
// Returns true if TSID was created or false if TSID was in extDB
func (db *indexDB) getOrCreateTSID(dst *TSID, metricName []byte, mn *MetricName) (bool, error) {
// Search the TSID in the external storage. // Search the TSID in the external storage.
// This is usually the db from the previous period. // This is usually the db from the previous period.
var err error var err error
@ -533,15 +594,19 @@ func (db *indexDB) generateTSID(dst *TSID, metricName []byte, mn *MetricName) er
}) { }) {
if err == nil { if err == nil {
// The TSID has been found in the external storage. // The TSID has been found in the external storage.
return nil return false, nil
} }
if err != io.EOF { if err != io.EOF {
return fmt.Errorf("external search failed: %w", err) return false, fmt.Errorf("external search failed: %w", err)
} }
} }
// The TSID wasn't found in the external storage. // The TSID wasn't found in the external storage.
// Generate it locally. // Generate it locally.
generateTSID(dst, mn)
return true, nil
}
func generateTSID(dst *TSID, mn *MetricName) {
dst.MetricGroupID = xxhash.Sum64(mn.MetricGroup) dst.MetricGroupID = xxhash.Sum64(mn.MetricGroup)
if len(mn.Tags) > 0 { if len(mn.Tags) > 0 {
dst.JobID = uint32(xxhash.Sum64(mn.Tags[0].Value)) dst.JobID = uint32(xxhash.Sum64(mn.Tags[0].Value))
@ -550,7 +615,6 @@ func (db *indexDB) generateTSID(dst *TSID, metricName []byte, mn *MetricName) er
dst.InstanceID = uint32(xxhash.Sum64(mn.Tags[1].Value)) dst.InstanceID = uint32(xxhash.Sum64(mn.Tags[1].Value))
} }
dst.MetricID = generateUniqueMetricID() dst.MetricID = generateUniqueMetricID()
return nil
} }
func (db *indexDB) createIndexes(tsid *TSID, mn *MetricName) error { func (db *indexDB) createIndexes(tsid *TSID, mn *MetricName) error {

View file

@ -9,6 +9,7 @@ import (
"reflect" "reflect"
"regexp" "regexp"
"sort" "sort"
"sync/atomic"
"testing" "testing"
"time" "time"
@ -18,6 +19,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset" "github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set" "github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache" "github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
"github.com/VictoriaMetrics/fastcache"
) )
func TestReverseBytes(t *testing.T) { func TestReverseBytes(t *testing.T) {
@ -457,15 +459,15 @@ func TestMarshalUnmarshalTSIDs(t *testing.T) {
func TestIndexDBOpenClose(t *testing.T) { func TestIndexDBOpenClose(t *testing.T) {
s := newTestStorage() s := newTestStorage()
defer stopTestStorage(s) defer stopTestStorage(s)
tableName := nextIndexDBTableName()
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
db, err := openIndexDB("test-index-db", s) db, err := openIndexDB(tableName, s, 0)
if err != nil { if err != nil {
t.Fatalf("cannot open indexDB: %s", err) t.Fatalf("cannot open indexDB: %s", err)
} }
db.MustClose() db.MustClose()
} }
if err := os.RemoveAll("test-index-db"); err != nil { if err := os.RemoveAll(tableName); err != nil {
t.Fatalf("cannot remove indexDB: %s", err) t.Fatalf("cannot remove indexDB: %s", err)
} }
} }
@ -477,8 +479,8 @@ func TestIndexDB(t *testing.T) {
s := newTestStorage() s := newTestStorage()
defer stopTestStorage(s) defer stopTestStorage(s)
dbName := "test-index-db-serial" dbName := nextIndexDBTableName()
db, err := openIndexDB(dbName, s) db, err := openIndexDB(dbName, s, 0)
if err != nil { if err != nil {
t.Fatalf("cannot open indexDB: %s", err) t.Fatalf("cannot open indexDB: %s", err)
} }
@ -508,7 +510,7 @@ func TestIndexDB(t *testing.T) {
// Re-open the db and verify it works as expected. // Re-open the db and verify it works as expected.
db.MustClose() db.MustClose()
db, err = openIndexDB(dbName, s) db, err = openIndexDB(dbName, s, 0)
if err != nil { if err != nil {
t.Fatalf("cannot open indexDB: %s", err) t.Fatalf("cannot open indexDB: %s", err)
} }
@ -527,8 +529,8 @@ func TestIndexDB(t *testing.T) {
s := newTestStorage() s := newTestStorage()
defer stopTestStorage(s) defer stopTestStorage(s)
dbName := "test-index-db-concurrent" dbName := nextIndexDBTableName()
db, err := openIndexDB(dbName, s) db, err := openIndexDB(dbName, s, 0)
if err != nil { if err != nil {
t.Fatalf("cannot open indexDB: %s", err) t.Fatalf("cannot open indexDB: %s", err)
} }
@ -1485,12 +1487,113 @@ func TestMatchTagFilters(t *testing.T) {
} }
} }
func TestIndexDBRepopulateAfterRotation(t *testing.T) {
path := "TestIndexRepopulateAfterRotation"
s, err := OpenStorage(path, 0, 1e5, 1e5)
if err != nil {
t.Fatalf("cannot open storage: %s", err)
}
s.retentionMsecs = msecsPerMonth
defer func() {
s.MustClose()
if err := os.RemoveAll(path); err != nil {
t.Fatalf("cannot remove %q: %s", path, err)
}
}()
db := s.idb()
if db.generation == 0 {
t.Fatalf("expected indexDB generation to be not 0")
}
const metricRowsN = 1000
// use min-max timestamps of 1month range to create smaller number of partitions
timeMin, timeMax := time.Now().Add(-730*time.Hour), time.Now()
mrs := testGenerateMetricRows(metricRowsN, timeMin.UnixMilli(), timeMax.UnixMilli())
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
t.Fatalf("unexpected error when adding mrs: %s", err)
}
s.DebugFlush()
// verify the storage contains rows.
var m Metrics
s.UpdateMetrics(&m)
if m.TableMetrics.SmallRowsCount < uint64(metricRowsN) {
t.Fatalf("expecting at least %d rows in the table; got %d", metricRowsN, m.TableMetrics.SmallRowsCount)
}
// check new series were registered in indexDB
added := atomic.LoadUint64(&db.newTimeseriesCreated)
if added != metricRowsN {
t.Fatalf("expected indexDB to contain %d rows; got %d", metricRowsN, added)
}
// check new series were added to cache
var cs fastcache.Stats
s.tsidCache.UpdateStats(&cs)
if cs.EntriesCount != metricRowsN {
t.Fatalf("expected tsidCache to contain %d rows; got %d", metricRowsN, cs.EntriesCount)
}
// check if cache entries do belong to current indexDB generation
var genTSID generationTSID
for _, mr := range mrs {
s.getTSIDFromCache(&genTSID, mr.MetricNameRaw)
if genTSID.generation != db.generation {
t.Fatalf("expected all entries in tsidCache to have the same indexDB generation: %d;"+
"got %d", db.generation, genTSID.generation)
}
}
prevGeneration := db.generation
// force index rotation
s.mustRotateIndexDB()
// check tsidCache wasn't reset after the rotation
var cs2 fastcache.Stats
s.tsidCache.UpdateStats(&cs2)
if cs.EntriesCount != metricRowsN {
t.Fatalf("expected tsidCache after rotation to contain %d rows; got %d", metricRowsN, cs2.EntriesCount)
}
dbNew := s.idb()
if dbNew.generation == 0 {
t.Fatalf("expected new indexDB generation to be not 0")
}
if dbNew.generation == prevGeneration {
t.Fatalf("expected new indexDB generation %d to be different from prev indexDB", dbNew.generation)
}
// Re-insert rows again and verify that entries belong prevGeneration and dbNew.generation,
// while the majority of entries remain at prevGeneration.
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
t.Fatalf("unexpected error when adding mrs: %s", err)
}
s.DebugFlush()
entriesByGeneration := make(map[uint64]int)
for _, mr := range mrs {
s.getTSIDFromCache(&genTSID, mr.MetricNameRaw)
entriesByGeneration[genTSID.generation]++
}
if len(entriesByGeneration) > 2 {
t.Fatalf("expecting two generations; got %d", entriesByGeneration)
}
prevEntries := entriesByGeneration[prevGeneration]
currEntries := entriesByGeneration[dbNew.generation]
totalEntries := prevEntries + currEntries
if totalEntries != metricRowsN {
t.Fatalf("unexpected number of entries in tsid cache; got %d; want %d", totalEntries, metricRowsN)
}
if float64(currEntries)/float64(totalEntries) > 0.1 {
t.Fatalf("too big share of entries in the new generation; currEntries=%d, prevEntries=%d", currEntries, prevEntries)
}
}
func TestSearchTSIDWithTimeRange(t *testing.T) { func TestSearchTSIDWithTimeRange(t *testing.T) {
s := newTestStorage() s := newTestStorage()
defer stopTestStorage(s) defer stopTestStorage(s)
dbName := "test-index-db-ts-range" dbName := nextIndexDBTableName()
db, err := openIndexDB(dbName, s) db, err := openIndexDB(dbName, s, 0)
if err != nil { if err != nil {
t.Fatalf("cannot open indexDB: %s", err) t.Fatalf("cannot open indexDB: %s", err)
} }

View file

@ -43,8 +43,8 @@ func BenchmarkIndexDBAddTSIDs(b *testing.B) {
s := newTestStorage() s := newTestStorage()
defer stopTestStorage(s) defer stopTestStorage(s)
const dbName = "bench-index-db-add-tsids" dbName := nextIndexDBTableName()
db, err := openIndexDB(dbName, s) db, err := openIndexDB(dbName, s, 0)
if err != nil { if err != nil {
b.Fatalf("cannot open indexDB: %s", err) b.Fatalf("cannot open indexDB: %s", err)
} }
@ -104,8 +104,8 @@ func BenchmarkHeadPostingForMatchers(b *testing.B) {
s := newTestStorage() s := newTestStorage()
defer stopTestStorage(s) defer stopTestStorage(s)
const dbName = "bench-head-posting-for-matchers" dbName := nextIndexDBTableName()
db, err := openIndexDB(dbName, s) db, err := openIndexDB(dbName, s, 0)
if err != nil { if err != nil {
b.Fatalf("cannot open indexDB: %s", err) b.Fatalf("cannot open indexDB: %s", err)
} }
@ -198,7 +198,7 @@ func BenchmarkHeadPostingForMatchers(b *testing.B) {
b.Run(`i=~".*"`, func(b *testing.B) { b.Run(`i=~".*"`, func(b *testing.B) {
tfs := NewTagFilters() tfs := NewTagFilters()
addTagFilter(tfs, "i", ".*", false, true) addTagFilter(tfs, "i", ".*", false, true)
benchSearch(b, tfs, 5e6) benchSearch(b, tfs, 0)
}) })
b.Run(`i=~".+"`, func(b *testing.B) { b.Run(`i=~".+"`, func(b *testing.B) {
tfs := NewTagFilters() tfs := NewTagFilters()
@ -279,8 +279,8 @@ func BenchmarkIndexDBGetTSIDs(b *testing.B) {
s := newTestStorage() s := newTestStorage()
defer stopTestStorage(s) defer stopTestStorage(s)
const dbName = "bench-index-db-get-tsids" dbName := nextIndexDBTableName()
db, err := openIndexDB(dbName, s) db, err := openIndexDB(dbName, s, 0)
if err != nil { if err != nil {
b.Fatalf("cannot open indexDB: %s", err) b.Fatalf("cannot open indexDB: %s", err)
} }

View file

@ -11,7 +11,7 @@ import (
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/metricsql" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
// partHeader represents part header. // partHeader represents part header.
@ -140,11 +140,11 @@ func (ph *partHeader) readMinDedupInterval(partPath string) error {
} }
return fmt.Errorf("cannot read %q: %w", filePath, err) return fmt.Errorf("cannot read %q: %w", filePath, err)
} }
dedupInterval, err := metricsql.DurationValue(string(data), 0) dedupInterval, err := promutils.ParseDuration(string(data))
if err != nil { if err != nil {
return fmt.Errorf("cannot parse minimum dedup interval %q at %q: %w", data, filePath, err) return fmt.Errorf("cannot parse minimum dedup interval %q at %q: %w", data, filePath, err)
} }
ph.MinDedupInterval = dedupInterval ph.MinDedupInterval = dedupInterval.Milliseconds()
return nil return nil
} }

View file

@ -155,8 +155,7 @@ func OpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDailySer
path: path, path: path,
cachePath: path + "/cache", cachePath: path + "/cache",
retentionMsecs: retentionMsecs, retentionMsecs: retentionMsecs,
stop: make(chan struct{}),
stop: make(chan struct{}),
} }
if err := fs.MkdirAllIfNotExist(path); err != nil { if err := fs.MkdirAllIfNotExist(path); err != nil {
return nil, fmt.Errorf("cannot create a directory for the storage at %q: %w", path, err) return nil, fmt.Errorf("cannot create a directory for the storage at %q: %w", path, err)
@ -692,7 +691,8 @@ func (s *Storage) mustRotateIndexDB() {
// Create new indexdb table. // Create new indexdb table.
newTableName := nextIndexDBTableName() newTableName := nextIndexDBTableName()
idbNewPath := s.path + "/indexdb/" + newTableName idbNewPath := s.path + "/indexdb/" + newTableName
idbNew, err := openIndexDB(idbNewPath, s) rotationTimestamp := fasttime.UnixTimestamp()
idbNew, err := openIndexDB(idbNewPath, s, rotationTimestamp)
if err != nil { if err != nil {
logger.Panicf("FATAL: cannot create new indexDB at %q: %s", idbNewPath, err) logger.Panicf("FATAL: cannot create new indexDB at %q: %s", idbNewPath, err)
} }
@ -711,8 +711,9 @@ func (s *Storage) mustRotateIndexDB() {
// Persist changes on the file system. // Persist changes on the file system.
fs.MustSyncPath(s.path) fs.MustSyncPath(s.path)
// Flush tsidCache, so idbNew can be populated with fresh data. // Do not flush tsidCache to avoid read/write path slowdown
s.resetAndSaveTSIDCache() // and slowly re-populate new idb with entries from the cache via maybeCreateIndexes().
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
// Flush dateMetricIDCache, so idbNew can be populated with fresh data. // Flush dateMetricIDCache, so idbNew can be populated with fresh data.
s.dateMetricIDCache.Reset() s.dateMetricIDCache.Reset()
@ -1627,17 +1628,32 @@ var (
// Th MetricRow.Value field is ignored. // Th MetricRow.Value field is ignored.
func (s *Storage) RegisterMetricNames(mrs []MetricRow) error { func (s *Storage) RegisterMetricNames(mrs []MetricRow) error {
var ( var (
tsid TSID
metricName []byte metricName []byte
) )
var genTSID generationTSID
mn := GetMetricName() mn := GetMetricName()
defer PutMetricName(mn) defer PutMetricName(mn)
idb := s.idb() idb := s.idb()
is := idb.getIndexSearch(noDeadline) is := idb.getIndexSearch(noDeadline)
defer idb.putIndexSearch(is) defer idb.putIndexSearch(is)
for i := range mrs { for i := range mrs {
mr := &mrs[i] mr := &mrs[i]
if s.getTSIDFromCache(&tsid, mr.MetricNameRaw) { if s.getTSIDFromCache(&genTSID, mr.MetricNameRaw) {
if genTSID.generation != idb.generation {
// The found entry is from the previous cache generation
// so attempt to re-populate the current generation with this entry.
// This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
created, err := idb.maybeCreateIndexes(&genTSID.TSID, mr.MetricNameRaw)
if err != nil {
return fmt.Errorf("cannot create indexes in the current indexdb: %w", err)
}
if created {
genTSID.generation = idb.generation
s.putTSIDToCache(&genTSID, mr.MetricNameRaw)
}
}
// Fast path - mr.MetricNameRaw has been already registered. // Fast path - mr.MetricNameRaw has been already registered.
continue continue
} }
@ -1648,14 +1664,14 @@ func (s *Storage) RegisterMetricNames(mrs []MetricRow) error {
} }
mn.sortTags() mn.sortTags()
metricName = mn.Marshal(metricName[:0]) metricName = mn.Marshal(metricName[:0])
if err := is.GetOrCreateTSIDByName(&tsid, metricName); err != nil { if err := is.GetOrCreateTSIDByName(&genTSID.TSID, metricName); err != nil {
return fmt.Errorf("cannot register the metric because cannot create TSID for metricName %q: %w", metricName, err) return fmt.Errorf("cannot register the metric because cannot create TSID for metricName %q: %w", metricName, err)
} }
s.putTSIDToCache(&tsid, mr.MetricNameRaw) s.putTSIDToCache(&genTSID, mr.MetricNameRaw)
// Register the metric in per-day inverted index. // Register the metric in per-day inverted index.
date := uint64(mr.Timestamp) / msecPerDay date := uint64(mr.Timestamp) / msecPerDay
metricID := tsid.MetricID metricID := genTSID.TSID.MetricID
if s.dateMetricIDCache.Has(date, metricID) { if s.dateMetricIDCache.Has(date, metricID) {
// Fast path: the metric has been already registered in per-day inverted index // Fast path: the metric has been already registered in per-day inverted index
continue continue
@ -1690,6 +1706,9 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
) )
var pmrs *pendingMetricRows var pmrs *pendingMetricRows
minTimestamp, maxTimestamp := s.tb.getMinMaxTimestamps() minTimestamp, maxTimestamp := s.tb.getMinMaxTimestamps()
var genTSID generationTSID
// Return only the first error, since it has no sense in returning all errors. // Return only the first error, since it has no sense in returning all errors.
var firstWarn error var firstWarn error
for i := range mrs { for i := range mrs {
@ -1734,7 +1753,8 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
r.TSID = prevTSID r.TSID = prevTSID
continue continue
} }
if s.getTSIDFromCache(&r.TSID, mr.MetricNameRaw) { if s.getTSIDFromCache(&genTSID, mr.MetricNameRaw) {
r.TSID = genTSID.TSID
if s.isSeriesCardinalityExceeded(r.TSID.MetricID, mr.MetricNameRaw) { if s.isSeriesCardinalityExceeded(r.TSID.MetricID, mr.MetricNameRaw) {
// Skip the row, since the limit on the number of unique series has been exceeded. // Skip the row, since the limit on the number of unique series has been exceeded.
j-- j--
@ -1746,6 +1766,20 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
// See Storage.DeleteMetrics code for details. // See Storage.DeleteMetrics code for details.
prevTSID = r.TSID prevTSID = r.TSID
prevMetricNameRaw = mr.MetricNameRaw prevMetricNameRaw = mr.MetricNameRaw
if genTSID.generation != idb.generation {
// The found entry is from the previous cache generation
// so attempt to re-populate the current generation with this entry.
// This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
created, err := idb.maybeCreateIndexes(&genTSID.TSID, mr.MetricNameRaw)
if err != nil {
return fmt.Errorf("cannot create indexes in the current indexdb: %w", err)
}
if created {
genTSID.generation = idb.generation
s.putTSIDToCache(&genTSID, mr.MetricNameRaw)
}
}
continue continue
} }
@ -1805,7 +1839,9 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
j-- j--
continue continue
} }
s.putTSIDToCache(&r.TSID, mr.MetricNameRaw) genTSID.generation = idb.generation
genTSID.TSID = r.TSID
s.putTSIDToCache(&genTSID, mr.MetricNameRaw)
prevTSID = r.TSID prevTSID = r.TSID
prevMetricNameRaw = mr.MetricNameRaw prevMetricNameRaw = mr.MetricNameRaw
if s.isSeriesCardinalityExceeded(r.TSID.MetricID, mr.MetricNameRaw) { if s.isSeriesCardinalityExceeded(r.TSID.MetricID, mr.MetricNameRaw) {
@ -1947,7 +1983,10 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs) hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs)
hmPrevDate := hmPrev.hour / 24 hmPrevDate := hmPrev.hour / 24
nextDayMetricIDs := &s.nextDayMetricIDs.Load().(*byDateMetricIDEntry).v nextDayMetricIDs := &s.nextDayMetricIDs.Load().(*byDateMetricIDEntry).v
todayShare16bit := uint64((float64(fasttime.UnixTimestamp()%(3600*24)) / (3600 * 24)) * (1 << 16)) ts := fasttime.UnixTimestamp()
// Start pre-populating the next per-day inverted index during the last hour of the current day.
// pMin linearly increases from 0 to 1 during the last hour of the day.
pMin := (float64(ts%(3600*24)) / 3600) - 23
type pendingDateMetricID struct { type pendingDateMetricID struct {
date uint64 date uint64
metricID uint64 metricID uint64
@ -1976,18 +2015,20 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
// Fast path: the metricID is in the current hour cache. // Fast path: the metricID is in the current hour cache.
// This means the metricID has been already added to per-day inverted index. // This means the metricID has been already added to per-day inverted index.
// Gradually pre-populate per-day inverted index for the next day // Gradually pre-populate per-day inverted index for the next day during the last hour of the current day.
// during the current day.
// This should reduce CPU usage spike and slowdown at the beginning of the next day // This should reduce CPU usage spike and slowdown at the beginning of the next day
// when entries for all the active time series must be added to the index. // when entries for all the active time series must be added to the index.
// This should address https://github.com/VictoriaMetrics/VictoriaMetrics/issues/430 . // This should address https://github.com/VictoriaMetrics/VictoriaMetrics/issues/430 .
if todayShare16bit > (metricID&(1<<16-1)) && !nextDayMetricIDs.Has(metricID) { if pMin > 0 {
pendingDateMetricIDs = append(pendingDateMetricIDs, pendingDateMetricID{ p := float64(uint32(fastHashUint64(metricID))) / (1 << 32)
date: date + 1, if p < pMin && !nextDayMetricIDs.Has(metricID) {
metricID: metricID, pendingDateMetricIDs = append(pendingDateMetricIDs, pendingDateMetricID{
mr: mrs[i], date: date + 1,
}) metricID: metricID,
pendingNextDayMetricIDs = append(pendingNextDayMetricIDs, metricID) mr: mrs[i],
})
pendingNextDayMetricIDs = append(pendingNextDayMetricIDs, metricID)
}
} }
continue continue
} }
@ -2081,6 +2122,13 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
return firstError return firstError
} }
func fastHashUint64(x uint64) uint64 {
x ^= x >> 12 // a
x ^= x << 25 // b
x ^= x >> 27 // c
return x * 2685821657736338717
}
// dateMetricIDCache is fast cache for holding (date, metricID) entries. // dateMetricIDCache is fast cache for holding (date, metricID) entries.
// //
// It should be faster than map[date]*uint64set.Set on multicore systems. // It should be faster than map[date]*uint64set.Set on multicore systems.
@ -2342,13 +2390,20 @@ type hourMetricIDs struct {
isFull bool isFull bool
} }
func (s *Storage) getTSIDFromCache(dst *TSID, metricName []byte) bool { type generationTSID struct {
TSID TSID
// generation stores the indexdb.generation value to identify to which indexdb belongs this TSID
generation uint64
}
func (s *Storage) getTSIDFromCache(dst *generationTSID, metricName []byte) bool {
buf := (*[unsafe.Sizeof(*dst)]byte)(unsafe.Pointer(dst))[:] buf := (*[unsafe.Sizeof(*dst)]byte)(unsafe.Pointer(dst))[:]
buf = s.tsidCache.Get(buf[:0], metricName) buf = s.tsidCache.Get(buf[:0], metricName)
return uintptr(len(buf)) == unsafe.Sizeof(*dst) return uintptr(len(buf)) == unsafe.Sizeof(*dst)
} }
func (s *Storage) putTSIDToCache(tsid *TSID, metricName []byte) { func (s *Storage) putTSIDToCache(tsid *generationTSID, metricName []byte) {
buf := (*[unsafe.Sizeof(*tsid)]byte)(unsafe.Pointer(tsid))[:] buf := (*[unsafe.Sizeof(*tsid)]byte)(unsafe.Pointer(tsid))[:]
s.tsidCache.Set(metricName, buf) s.tsidCache.Set(metricName, buf)
} }
@ -2412,12 +2467,12 @@ func (s *Storage) openIndexDBTables(path string) (curr, prev *indexDB, err error
// Open the last two tables. // Open the last two tables.
currPath := path + "/" + tableNames[len(tableNames)-1] currPath := path + "/" + tableNames[len(tableNames)-1]
curr, err = openIndexDB(currPath, s) curr, err = openIndexDB(currPath, s, 0)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cannot open curr indexdb table at %q: %w", currPath, err) return nil, nil, fmt.Errorf("cannot open curr indexdb table at %q: %w", currPath, err)
} }
prevPath := path + "/" + tableNames[len(tableNames)-2] prevPath := path + "/" + tableNames[len(tableNames)-2]
prev, err = openIndexDB(prevPath, s) prev, err = openIndexDB(prevPath, s, 0)
if err != nil { if err != nil {
curr.MustClose() curr.MustClose()
return nil, nil, fmt.Errorf("cannot open prev indexdb table at %q: %w", prevPath, err) return nil, nil, fmt.Errorf("cannot open prev indexdb table at %q: %w", prevPath, err)

View file

@ -918,37 +918,42 @@ func TestStorageAddRowsConcurrent(t *testing.T) {
} }
} }
func testGenerateMetricRows(rows uint64, timestampMin, timestampMax int64) []MetricRow {
var mrs []MetricRow
var mn MetricName
mn.Tags = []Tag{
{[]byte("job"), []byte("webservice")},
{[]byte("instance"), []byte("1.2.3.4")},
}
for i := 0; i < int(rows); i++ {
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", i))
metricNameRaw := mn.marshalRaw(nil)
timestamp := rand.Int63n(timestampMax-timestampMin) + timestampMin
value := rand.NormFloat64() * 1e6
mr := MetricRow{
MetricNameRaw: metricNameRaw,
Timestamp: timestamp,
Value: value,
}
mrs = append(mrs, mr)
}
return mrs
}
func testStorageAddRows(s *Storage) error { func testStorageAddRows(s *Storage) error {
const rowsPerAdd = 1e3 const rowsPerAdd = 1e3
const addsCount = 10 const addsCount = 10
for i := 0; i < addsCount; i++ { for i := 0; i < addsCount; i++ {
var mrs []MetricRow mrs := testGenerateMetricRows(rowsPerAdd, 0, 1e10)
var mn MetricName
mn.Tags = []Tag{
{[]byte("job"), []byte("webservice")},
{[]byte("instance"), []byte("1.2.3.4")},
}
for j := 0; j < rowsPerAdd; j++ {
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", rand.Intn(100)))
metricNameRaw := mn.marshalRaw(nil)
timestamp := rand.Int63n(1e10)
value := rand.NormFloat64() * 1e6
mr := MetricRow{
MetricNameRaw: metricNameRaw,
Timestamp: timestamp,
Value: value,
}
mrs = append(mrs, mr)
}
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
return fmt.Errorf("unexpected error when adding mrs: %w", err) return fmt.Errorf("unexpected error when adding mrs: %w", err)
} }
} }
// Verify the storage contains rows. // Verify the storage contains rows.
minRowsExpected := uint64(rowsPerAdd) * addsCount minRowsExpected := uint64(rowsPerAdd * addsCount)
var m Metrics var m Metrics
s.UpdateMetrics(&m) s.UpdateMetrics(&m)
if m.TableMetrics.SmallRowsCount < minRowsExpected { if m.TableMetrics.SmallRowsCount < minRowsExpected {

View file

@ -162,7 +162,7 @@ func (tfs *TagFilters) Add(key, value []byte, isNegative, isRegexp bool) error {
} }
if isRegexp && string(value) == ".*" { if isRegexp && string(value) == ".*" {
if !isNegative { if !isNegative {
// Skip tag filter matching anything, since it equal to no filter. // Skip tag filter matching anything, since it equals to no filter.
return nil return nil
} }

View file

@ -36,6 +36,9 @@ type Cache struct {
// After the process of switching, this flag will be set to whole. // After the process of switching, this flag will be set to whole.
mode uint32 mode uint32
// The maxBytes value passed to New() or to Load().
maxBytes int
// mu serializes access to curr, prev and mode // mu serializes access to curr, prev and mode
// in expirationWatcher and cacheSizeWatcher. // in expirationWatcher and cacheSizeWatcher.
mu sync.Mutex mu sync.Mutex
@ -57,9 +60,9 @@ func Load(filePath string, maxBytes int, expireDuration time.Duration) *Cache {
// The cache couldn't be loaded with maxBytes size. // The cache couldn't be loaded with maxBytes size.
// This may mean that the cache is split into curr and prev caches. // This may mean that the cache is split into curr and prev caches.
// Try loading it again with maxBytes / 2 size. // Try loading it again with maxBytes / 2 size.
curr := fastcache.New(maxBytes / 2) curr := fastcache.LoadFromFileOrNew(filePath, maxBytes/2)
prev := fastcache.LoadFromFileOrNew(filePath, maxBytes/2) prev := fastcache.New(maxBytes / 2)
c := newCacheInternal(curr, prev, split) c := newCacheInternal(curr, prev, split, maxBytes)
c.runWatchers(expireDuration) c.runWatchers(expireDuration)
return c return c
} }
@ -68,7 +71,7 @@ func Load(filePath string, maxBytes int, expireDuration time.Duration) *Cache {
// Set its' mode to `whole`. // Set its' mode to `whole`.
// There is no need in runWatchers call. // There is no need in runWatchers call.
prev := fastcache.New(1024) prev := fastcache.New(1024)
return newCacheInternal(curr, prev, whole) return newCacheInternal(curr, prev, whole, maxBytes)
} }
// New creates new cache with the given maxBytes capacity and the given expireDuration // New creates new cache with the given maxBytes capacity and the given expireDuration
@ -78,13 +81,14 @@ func Load(filePath string, maxBytes int, expireDuration time.Duration) *Cache {
func New(maxBytes int, expireDuration time.Duration) *Cache { func New(maxBytes int, expireDuration time.Duration) *Cache {
curr := fastcache.New(maxBytes / 2) curr := fastcache.New(maxBytes / 2)
prev := fastcache.New(1024) prev := fastcache.New(1024)
c := newCacheInternal(curr, prev, split) c := newCacheInternal(curr, prev, split, maxBytes)
c.runWatchers(expireDuration) c.runWatchers(expireDuration)
return c return c
} }
func newCacheInternal(curr, prev *fastcache.Cache, mode int) *Cache { func newCacheInternal(curr, prev *fastcache.Cache, mode, maxBytes int) *Cache {
var c Cache var c Cache
c.maxBytes = maxBytes
c.curr.Store(curr) c.curr.Store(curr)
c.prev.Store(prev) c.prev.Store(prev)
c.stopCh = make(chan struct{}) c.stopCh = make(chan struct{})
@ -129,7 +133,10 @@ func (c *Cache) expirationWatcher(expireDuration time.Duration) {
var cs fastcache.Stats var cs fastcache.Stats
curr.UpdateStats(&cs) curr.UpdateStats(&cs)
c.prev.Store(curr) c.prev.Store(curr)
curr = fastcache.New(int(cs.MaxBytesSize)) // Use c.maxBytes/2 instead of cs.MaxBytesSize for creating new cache,
// since cs.MaxBytesSize may not match c.maxBytes/2, so the created cache
// couldn't be loaded from file with c.maxBytes/2 limit after saving with cs.MaxBytesSize size.
curr = fastcache.New(c.maxBytes / 2)
c.curr.Store(curr) c.curr.Store(curr)
c.mu.Unlock() c.mu.Unlock()
} }
@ -173,7 +180,9 @@ func (c *Cache) cacheSizeWatcher() {
prev.Reset() prev.Reset()
curr := c.curr.Load().(*fastcache.Cache) curr := c.curr.Load().(*fastcache.Cache)
c.prev.Store(curr) c.prev.Store(curr)
c.curr.Store(fastcache.New(int(maxBytesSize * 2))) // use c.maxBytes instead of maxBytesSize*2 for creating new cache, since otherwise the created cache
// couldn't be loaded from file with c.maxBytes limit after saving with maxBytesSize*2 limit.
c.curr.Store(fastcache.New(c.maxBytes))
c.mu.Unlock() c.mu.Unlock()
for { for {

View file

@ -271,20 +271,18 @@ func (b *bucket) Reset() {
b.mu.Unlock() b.mu.Unlock()
} }
func (b *bucket) Clean() { func (b *bucket) cleanLocked() {
b.mu.Lock()
bGen := b.gen & ((1 << genSizeBits) - 1) bGen := b.gen & ((1 << genSizeBits) - 1)
bIdx := b.idx bIdx := b.idx
bm := b.m bm := b.m
for k, v := range bm { for k, v := range bm {
gen := v >> bucketSizeBits gen := v >> bucketSizeBits
idx := v & ((1 << bucketSizeBits) - 1) idx := v & ((1 << bucketSizeBits) - 1)
if gen == bGen && idx < bIdx || gen+1 == bGen && idx >= bIdx || gen == maxGen && bGen == 1 && idx >= bIdx { if (gen+1 == bGen || gen == maxGen && bGen == 1) && idx >= bIdx || gen == bGen && idx < bIdx {
continue continue
} }
delete(bm, k) delete(bm, k)
} }
b.mu.Unlock()
} }
func (b *bucket) UpdateStats(s *Stats) { func (b *bucket) UpdateStats(s *Stats) {
@ -296,19 +294,17 @@ func (b *bucket) UpdateStats(s *Stats) {
b.mu.RLock() b.mu.RLock()
s.EntriesCount += uint64(len(b.m)) s.EntriesCount += uint64(len(b.m))
bytesSize := uint64(0)
for _, chunk := range b.chunks { for _, chunk := range b.chunks {
s.BytesSize += uint64(cap(chunk)) bytesSize += uint64(cap(chunk))
} }
s.MaxBytesSize += uint64(len(b.chunks))*chunkSize s.BytesSize += bytesSize
s.MaxBytesSize += uint64(len(b.chunks)) * chunkSize
b.mu.RUnlock() b.mu.RUnlock()
} }
func (b *bucket) Set(k, v []byte, h uint64) { func (b *bucket) Set(k, v []byte, h uint64) {
setCalls := atomic.AddUint64(&b.setCalls, 1) atomic.AddUint64(&b.setCalls, 1)
if setCalls%(1<<14) == 0 {
b.Clean()
}
if len(k) >= (1<<16) || len(v) >= (1<<16) { if len(k) >= (1<<16) || len(v) >= (1<<16) {
// Too big key or value - its length cannot be encoded // Too big key or value - its length cannot be encoded
// with 2 bytes (see below). Skip the entry. // with 2 bytes (see below). Skip the entry.
@ -326,13 +322,15 @@ func (b *bucket) Set(k, v []byte, h uint64) {
return return
} }
chunks := b.chunks
needClean := false
b.mu.Lock() b.mu.Lock()
idx := b.idx idx := b.idx
idxNew := idx + kvLen idxNew := idx + kvLen
chunkIdx := idx / chunkSize chunkIdx := idx / chunkSize
chunkIdxNew := idxNew / chunkSize chunkIdxNew := idxNew / chunkSize
if chunkIdxNew > chunkIdx { if chunkIdxNew > chunkIdx {
if chunkIdxNew >= uint64(len(b.chunks)) { if chunkIdxNew >= uint64(len(chunks)) {
idx = 0 idx = 0
idxNew = kvLen idxNew = kvLen
chunkIdx = 0 chunkIdx = 0
@ -340,14 +338,15 @@ func (b *bucket) Set(k, v []byte, h uint64) {
if b.gen&((1<<genSizeBits)-1) == 0 { if b.gen&((1<<genSizeBits)-1) == 0 {
b.gen++ b.gen++
} }
needClean = true
} else { } else {
idx = chunkIdxNew * chunkSize idx = chunkIdxNew * chunkSize
idxNew = idx + kvLen idxNew = idx + kvLen
chunkIdx = chunkIdxNew chunkIdx = chunkIdxNew
} }
b.chunks[chunkIdx] = b.chunks[chunkIdx][:0] chunks[chunkIdx] = chunks[chunkIdx][:0]
} }
chunk := b.chunks[chunkIdx] chunk := chunks[chunkIdx]
if chunk == nil { if chunk == nil {
chunk = getChunk() chunk = getChunk()
chunk = chunk[:0] chunk = chunk[:0]
@ -355,15 +354,19 @@ func (b *bucket) Set(k, v []byte, h uint64) {
chunk = append(chunk, kvLenBuf[:]...) chunk = append(chunk, kvLenBuf[:]...)
chunk = append(chunk, k...) chunk = append(chunk, k...)
chunk = append(chunk, v...) chunk = append(chunk, v...)
b.chunks[chunkIdx] = chunk chunks[chunkIdx] = chunk
b.m[h] = idx | (b.gen << bucketSizeBits) b.m[h] = idx | (b.gen << bucketSizeBits)
b.idx = idxNew b.idx = idxNew
if needClean {
b.cleanLocked()
}
b.mu.Unlock() b.mu.Unlock()
} }
func (b *bucket) Get(dst, k []byte, h uint64, returnDst bool) ([]byte, bool) { func (b *bucket) Get(dst, k []byte, h uint64, returnDst bool) ([]byte, bool) {
atomic.AddUint64(&b.getCalls, 1) atomic.AddUint64(&b.getCalls, 1)
found := false found := false
chunks := b.chunks
b.mu.RLock() b.mu.RLock()
v := b.m[h] v := b.m[h]
bGen := b.gen & ((1 << genSizeBits) - 1) bGen := b.gen & ((1 << genSizeBits) - 1)
@ -372,12 +375,12 @@ func (b *bucket) Get(dst, k []byte, h uint64, returnDst bool) ([]byte, bool) {
idx := v & ((1 << bucketSizeBits) - 1) idx := v & ((1 << bucketSizeBits) - 1)
if gen == bGen && idx < b.idx || gen+1 == bGen && idx >= b.idx || gen == maxGen && bGen == 1 && idx >= b.idx { if gen == bGen && idx < b.idx || gen+1 == bGen && idx >= b.idx || gen == maxGen && bGen == 1 && idx >= b.idx {
chunkIdx := idx / chunkSize chunkIdx := idx / chunkSize
if chunkIdx >= uint64(len(b.chunks)) { if chunkIdx >= uint64(len(chunks)) {
// Corrupted data during the load from file. Just skip it. // Corrupted data during the load from file. Just skip it.
atomic.AddUint64(&b.corruptions, 1) atomic.AddUint64(&b.corruptions, 1)
goto end goto end
} }
chunk := b.chunks[chunkIdx] chunk := chunks[chunkIdx]
idx %= chunkSize idx %= chunkSize
if idx+4 >= chunkSize { if idx+4 >= chunkSize {
// Corrupted data during the load from file. Just skip it. // Corrupted data during the load from file. Just skip it.

View file

@ -272,7 +272,9 @@ func loadBuckets(buckets []bucket, dataPath string, maxChunks uint64) error {
} }
func (b *bucket) Save(w io.Writer) error { func (b *bucket) Save(w io.Writer) error {
b.Clean() b.mu.Lock()
b.cleanLocked()
b.mu.Unlock()
b.mu.RLock() b.mu.RLock()
defer b.mu.RUnlock() defer b.mu.RUnlock()

View file

@ -1,3 +1,4 @@
//go:build appengine || windows
// +build appengine windows // +build appengine windows
package fastcache package fastcache

View file

@ -1,3 +1,4 @@
//go:build !appengine && !windows
// +build !appengine,!windows // +build !appengine,!windows
package fastcache package fastcache

View file

@ -20669,6 +20669,9 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ap-northeast-2", Region: "ap-northeast-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{ endpointKey{
Region: "ap-south-1", Region: "ap-south-1",
}: endpoint{}, }: endpoint{},

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.42.47" const SDKVersion = "1.42.52"

View file

@ -597,6 +597,7 @@ ccflags="$@"
$2 ~ /^DEVLINK_/ || $2 ~ /^DEVLINK_/ ||
$2 ~ /^ETHTOOL_/ || $2 ~ /^ETHTOOL_/ ||
$2 ~ /^LWTUNNEL_IP/ || $2 ~ /^LWTUNNEL_IP/ ||
$2 ~ /^ITIMER_/ ||
$2 !~ "WMESGLEN" && $2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ || $2 ~ /^W[A-Z0-9]+$/ ||
$2 ~/^PPPIOC/ || $2 ~/^PPPIOC/ ||

View file

@ -14,6 +14,7 @@ package unix
import ( import (
"encoding/binary" "encoding/binary"
"syscall" "syscall"
"time"
"unsafe" "unsafe"
) )
@ -2314,11 +2315,56 @@ type RemoteIovec struct {
//sys shmdt(addr uintptr) (err error) //sys shmdt(addr uintptr) (err error)
//sys shmget(key int, size int, flag int) (id int, err error) //sys shmget(key int, size int, flag int) (id int, err error)
//sys getitimer(which int, currValue *Itimerval) (err error)
//sys setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error)
// MakeItimerval creates an Itimerval from interval and value durations.
func MakeItimerval(interval, value time.Duration) Itimerval {
return Itimerval{
Interval: NsecToTimeval(interval.Nanoseconds()),
Value: NsecToTimeval(value.Nanoseconds()),
}
}
// A value which may be passed to the which parameter for Getitimer and
// Setitimer.
type ItimerWhich int
// Possible which values for Getitimer and Setitimer.
const (
ItimerReal ItimerWhich = ITIMER_REAL
ItimerVirtual ItimerWhich = ITIMER_VIRTUAL
ItimerProf ItimerWhich = ITIMER_PROF
)
// Getitimer wraps getitimer(2) to return the current value of the timer
// specified by which.
func Getitimer(which ItimerWhich) (Itimerval, error) {
var it Itimerval
if err := getitimer(int(which), &it); err != nil {
return Itimerval{}, err
}
return it, nil
}
// Setitimer wraps setitimer(2) to arm or disarm the timer specified by which.
// It returns the previous value of the timer.
//
// If the Itimerval argument is the zero value, the timer will be disarmed.
func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) {
var prev Itimerval
if err := setitimer(int(which), &it, &prev); err != nil {
return Itimerval{}, err
}
return prev, nil
}
/* /*
* Unimplemented * Unimplemented
*/ */
// AfsSyscall // AfsSyscall
// Alarm
// ArchPrctl // ArchPrctl
// Brk // Brk
// ClockNanosleep // ClockNanosleep
@ -2334,7 +2380,6 @@ type RemoteIovec struct {
// GetMempolicy // GetMempolicy
// GetRobustList // GetRobustList
// GetThreadArea // GetThreadArea
// Getitimer
// Getpmsg // Getpmsg
// IoCancel // IoCancel
// IoDestroy // IoDestroy

14
vendor/golang.org/x/sys/unix/syscall_linux_alarm.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64)
// +build linux
// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64
package unix
// SYS_ALARM is not defined on arm or riscv, but is available for other GOARCH
// values.
//sys Alarm(seconds uint) (remaining uint, err error)

View file

@ -1268,6 +1268,9 @@ const (
IP_XFRM_POLICY = 0x11 IP_XFRM_POLICY = 0x11
ISOFS_SUPER_MAGIC = 0x9660 ISOFS_SUPER_MAGIC = 0x9660
ISTRIP = 0x20 ISTRIP = 0x20
ITIMER_PROF = 0x2
ITIMER_REAL = 0x0
ITIMER_VIRTUAL = 0x1
IUTF8 = 0x4000 IUTF8 = 0x4000
IXANY = 0x800 IXANY = 0x800
JFFS2_SUPER_MAGIC = 0x72b6 JFFS2_SUPER_MAGIC = 0x72b6

View file

@ -2032,3 +2032,23 @@ func shmget(key int, size int, flag int) (id int, err error) {
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getitimer(which int, currValue *Itimerval) (err error) {
_, _, e1 := Syscall(SYS_GETITIMER, uintptr(which), uintptr(unsafe.Pointer(currValue)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error) {
_, _, e1 := Syscall(SYS_SETITIMER, uintptr(which), uintptr(unsafe.Pointer(newValue)), uintptr(unsafe.Pointer(oldValue)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -1,4 +1,4 @@
// go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go // go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go syscall_linux_alarm.go
// Code generated by the command above; see README.md. DO NOT EDIT. // Code generated by the command above; see README.md. DO NOT EDIT.
//go:build linux && 386 //go:build linux && 386
@ -524,3 +524,14 @@ func utimes(path string, times *[2]Timeval) (err error) {
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Alarm(seconds uint) (remaining uint, err error) {
r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0)
remaining = uint(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -1,4 +1,4 @@
// go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go // go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go syscall_linux_alarm.go
// Code generated by the command above; see README.md. DO NOT EDIT. // Code generated by the command above; see README.md. DO NOT EDIT.
//go:build linux && amd64 //go:build linux && amd64
@ -691,3 +691,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Alarm(seconds uint) (remaining uint, err error) {
r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0)
remaining = uint(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -1,4 +1,4 @@
// go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go // go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go syscall_linux_alarm.go
// Code generated by the command above; see README.md. DO NOT EDIT. // Code generated by the command above; see README.md. DO NOT EDIT.
//go:build linux && mips //go:build linux && mips
@ -702,3 +702,14 @@ func setrlimit(resource int, rlim *rlimit32) (err error) {
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Alarm(seconds uint) (remaining uint, err error) {
r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0)
remaining = uint(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -1,4 +1,4 @@
// go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go // go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go syscall_linux_alarm.go
// Code generated by the command above; see README.md. DO NOT EDIT. // Code generated by the command above; see README.md. DO NOT EDIT.
//go:build linux && mips64 //go:build linux && mips64
@ -696,3 +696,14 @@ func stat(path string, st *stat_t) (err error) {
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Alarm(seconds uint) (remaining uint, err error) {
r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0)
remaining = uint(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -1,4 +1,4 @@
// go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go // go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go syscall_linux_alarm.go
// Code generated by the command above; see README.md. DO NOT EDIT. // Code generated by the command above; see README.md. DO NOT EDIT.
//go:build linux && mipsle //go:build linux && mipsle
@ -702,3 +702,14 @@ func setrlimit(resource int, rlim *rlimit32) (err error) {
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Alarm(seconds uint) (remaining uint, err error) {
r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0)
remaining = uint(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -1,4 +1,4 @@
// go run mksyscall.go -b32 -tags linux,ppc syscall_linux.go syscall_linux_ppc.go // go run mksyscall.go -b32 -tags linux,ppc syscall_linux.go syscall_linux_ppc.go syscall_linux_alarm.go
// Code generated by the command above; see README.md. DO NOT EDIT. // Code generated by the command above; see README.md. DO NOT EDIT.
//go:build linux && ppc //go:build linux && ppc
@ -707,3 +707,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Alarm(seconds uint) (remaining uint, err error) {
r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0)
remaining = uint(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

Some files were not shown because too many files have changed in this diff Show more