mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-03-11 15:34:56 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
163f2a46fd
176 changed files with 4655 additions and 1512 deletions
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
|
@ -60,7 +60,7 @@ jobs:
|
|||
GOOS=darwin go build -mod=vendor ./app/vmctl
|
||||
CGO_ENABLED=0 GOOS=windows go build -mod=vendor ./app/vmagent
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v2.0.3
|
||||
uses: codecov/codecov-action@v2.1.0
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
|
||||
|
|
289
README.md
289
README.md
|
@ -12,16 +12,15 @@
|
|||
|
||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
|
||||
It is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap package](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and see [how to start it](#how-to-start-victoriametrics).
|
||||
If you use Ubuntu, then just run `snap install victoriametrics` in order to install and run it.
|
||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||
|
||||
Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need paid enterprise support for VictoriaMetrics.
|
||||
See [features available for enterprise customers](https://victoriametrics.com/enterprise.html).
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
|
@ -52,188 +51,101 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
|
|||
|
||||
## Prominent features
|
||||
|
||||
* VictoriaMetrics can be used as long-term storage for Prometheus or for [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
See [these docs](#prometheus-setup) for details.
|
||||
* VictoriaMetrics supports [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/), so it can be used as Prometheus drop-in replacement in Grafana.
|
||||
* VictoriaMetrics implements [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) query language backwards compatible with PromQL.
|
||||
* VictoriaMetrics provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics.
|
||||
Later this data may be queried via a single query.
|
||||
* High performance and good scalability for both [inserts](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
|
||||
and [selects](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
|
||||
[Outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* [Uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893)
|
||||
and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f)
|
||||
when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
|
||||
* Optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). Think about [prometheus-operator](https://github.com/coreos/prometheus-operator) metrics from frequent deployments in Kubernetes.
|
||||
* High data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
|
||||
may be crammed into limited storage comparing to TimescaleDB
|
||||
and [up to 7x less storage space is required comparing to Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
|
||||
* Optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc).
|
||||
See [graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB.
|
||||
See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae),
|
||||
[comparing Thanos to VictoriaMetrics cluster](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683)
|
||||
and [Remote Write Storage Wars](https://promcon.io/2019-munich/talks/remote-write-storage-wars/) talk
|
||||
from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
* Easy operation:
|
||||
VictoriaMetrics has the following prominent features:
|
||||
|
||||
* It can be used as long-term storage for Prometheus. See [these docs](#prometheus-setup) for details.
|
||||
* It can be used as drop-in replacement for Prometheus in Grafana, because it supports [Prometheus querying API](#prometheus-querying-api-usage).
|
||||
* It can be used as drop-in replacement for Graphite in Grafana, because it supports [Graphite API](#graphite-api-usage).
|
||||
* It features easy setup and operation:
|
||||
* VictoriaMetrics consists of a single [small executable](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d) without external dependencies.
|
||||
* All the configuration is done via explicit command-line flags with reasonable defaults.
|
||||
* All the data is stored in a single directory pointed by `-storageDataPath` command-line flag.
|
||||
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
to S3 or GCS with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html).
|
||||
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
* Storage is protected from corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
|
||||
* [Metrics from Prometheus exporters](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format)
|
||||
such as [node_exporter](https://github.com/prometheus/node_exporter). See [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for details.
|
||||
* [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon)
|
||||
if `-graphiteListenAddr` is set.
|
||||
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol) if `-opentsdbListenAddr` is set.
|
||||
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests) if `-opentsdbHTTPListenAddr` is set.
|
||||
* [JSON line format](#how-to-import-data-in-json-line-format).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) to S3 or GCS can be done with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools. See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
* It implements PromQL-based query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
|
||||
* It provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
|
||||
* It provides high performance and good vertical and horizontal scalability for both [data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and [data querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4). It [outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* It [uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
|
||||
* It is optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
* It provides high data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4) may be crammed into limited storage comparing to TimescaleDB and [up to 7x less storage space is required compared to Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
|
||||
* It is optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc). See [disk IO graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB. See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae), [comparing Thanos to VictoriaMetrics cluster](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683) and [Remote Write Storage Wars](https://promcon.io/2019-munich/talks/remote-write-storage-wars/) talk from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
* It protects the storage from data corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* It supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
|
||||
* [Metrics scraping from Prometheus exporters](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
* [Prometheus remote write API](#prometheus-setup).
|
||||
* [Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
|
||||
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol).
|
||||
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests).
|
||||
* [JSON line format](#how-to-import-data-in-json-line-format).
|
||||
* [Arbitrary CSV data](#how-to-import-csv-data).
|
||||
* Supports metrics' relabeling. See [these docs](#relabeling) for details.
|
||||
* Can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues using [series limiter](#cardinality-limiter).
|
||||
* Ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html).
|
||||
* Has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
* See also technical [Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* It supports metrics' relabeling. See [these docs](#relabeling) for details.
|
||||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
|
||||
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html).
|
||||
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
||||
## Operation
|
||||
|
||||
### Table of contents
|
||||
|
||||
* [How to start VictoriaMetrics](#how-to-start-victoriametrics)
|
||||
* [Environment variables](#environment-variables)
|
||||
* [Configuration with snap package](#configuration-with-snap-package)
|
||||
* [Prometheus setup](#prometheus-setup)
|
||||
* [Grafana setup](#grafana-setup)
|
||||
* [How to upgrade VictoriaMetrics](#how-to-upgrade-victoriametrics)
|
||||
* [How to apply new config to VictoriaMetrics](#how-to-apply-new-config-to-victoriametrics)
|
||||
* [How to scrape Prometheus exporters such as node_exporter](#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
* [How to send data from InfluxDB-compatible agents such as Telegraf](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
* [How to send data from Graphite-compatible agents such as StatsD](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||
* [Querying Graphite data](#querying-graphite-data)
|
||||
* [How to send data from OpenTSDB-compatible agents](#how-to-send-data-from-opentsdb-compatible-agents)
|
||||
* [Prometheus querying API usage](#prometheus-querying-api-usage)
|
||||
* [Prometheus querying API enhancements](#prometheus-querying-api-enhancements)
|
||||
* [Graphite API usage](#graphite-api-usage)
|
||||
* [Graphite Render API usage](#graphite-render-api-usage)
|
||||
* [Graphite Metrics API usage](#graphite-metrics-api-usage)
|
||||
* [Graphite Tags API usage](#graphite-tags-api-usage)
|
||||
* [How to build from sources](#how-to-build-from-sources)
|
||||
* [Development build](#development-build)
|
||||
* [Production build](#production-build)
|
||||
* [ARM build](#arm-build)
|
||||
* [Pure Go build (CGO_ENABLED=0)](#pure-go-build-cgo_enabled0)
|
||||
* [Building docker images](#building-docker-images)
|
||||
* [Start with docker-compose](#start-with-docker-compose)
|
||||
* [Setting up service](#setting-up-service)
|
||||
* [How to work with snapshots](#how-to-work-with-snapshots)
|
||||
* [How to delete time series](#how-to-delete-time-series)
|
||||
* [Forced merge](#forced-merge)
|
||||
* [How to export time series](#how-to-export-time-series)
|
||||
* [How to export data in native format](#how-to-export-data-in-native-format)
|
||||
* [How to export data in JSON line format](#how-to-export-data-in-json-line-format)
|
||||
* [How to export CSV data](#how-to-export-csv-data)
|
||||
* [How to import time series data](#how-to-import-time-series-data)
|
||||
* [How to import data in native format](#how-to-import-data-in-native-format)
|
||||
* [How to import data in json line format](#how-to-import-data-in-json-line-format)
|
||||
* [How to import CSV data](#how-to-import-csv-data)
|
||||
* [How to import data in Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format)
|
||||
* [Relabeling](#relabeling)
|
||||
* [Federation](#federation)
|
||||
* [Capacity planning](#capacity-planning)
|
||||
* [High availability](#high-availability)
|
||||
* [Deduplication](#deduplication)
|
||||
* [Retention](#retention)
|
||||
* [Multiple retentions](#multiple-retentions)
|
||||
* [Downsampling](#downsampling)
|
||||
* [Multi-tenancy](#multi-tenancy)
|
||||
* [Scalability and cluster version](#scalability-and-cluster-version)
|
||||
* [Alerting](#alerting)
|
||||
* [Security](#security)
|
||||
* [Tuning](#tuning)
|
||||
* [Monitoring](#monitoring)
|
||||
* [TSDB stats](#tsdb-stats)
|
||||
* [Cardinality limiter](#cardinality-limiter)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Data migration](#data-migration)
|
||||
* [Backfilling](#backfilling)
|
||||
* [Data updates](#data-updates)
|
||||
* [Replication](#replication)
|
||||
* [Backups](#backups)
|
||||
* [Profiling](#profiling)
|
||||
* [Integrations](#integrations)
|
||||
* [Third-party contributions](#third-party-contributions)
|
||||
* [Contacts](#contacts)
|
||||
* [Community and contributions](#community-and-contributions)
|
||||
* [Reporting bugs](#reporting-bugs)
|
||||
* [VictoriaMetrics Logo](#victoria-metrics-logo)
|
||||
* [Logo Usage Guidelines](#logo-usage-guidelines)
|
||||
* [Font used](#font-used)
|
||||
* [Color Palette](#color-palette)
|
||||
* [We kindly ask](#we-kindly-ask)
|
||||
* [List of command-line flags](#list-of-command-line-flags)
|
||||
|
||||
|
||||
## How to start VictoriaMetrics
|
||||
|
||||
Start VictoriaMetrics [executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
or [docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) with the desired command-line flags.
|
||||
Just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
||||
|
||||
The following command-line flags are used the most:
|
||||
|
||||
* `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month. See [these docs](#retention) for more details.
|
||||
|
||||
Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
|
||||
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics](#grafana-setup)
|
||||
and how to [handle alerts](#alerting).
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics via Grafana](#grafana-setup), how to [query VictoriaMetrics via Graphite API](#graphite-api-usage) and how to [handle alerts](#alerting).
|
||||
|
||||
VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-api-usage) on port `8428` by default.
|
||||
|
||||
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
|
||||
|
||||
|
||||
### Environment variables
|
||||
|
||||
Each flag value can be set via environment variables according to these rules:
|
||||
|
||||
* The `-envflag.enable` flag must be set
|
||||
* Each `.` char in flag name must be substituted by `_` (for example `-insert.maxQueueDuration <duration>` will translate to `insert_maxQueueDuration=<duration>`)
|
||||
* For repeating flags an alternative syntax can be used by joining the different values into one using `,` char as separator (for example `-storageNode <nodeA> -storageNode <nodeB>` will translate to `storageNode=<nodeA>,<nodeB>`)
|
||||
* It is possible setting prefix for environment vars with `-envflag.prefix`. For instance, if `-envflag.prefix=VM_`, then env vars must be prepended with `VM_`
|
||||
* The `-envflag.enable` flag must be set.
|
||||
* Each `.` char in flag name must be substituted with `_` (for example `-insert.maxQueueDuration <duration>` will translate to `insert_maxQueueDuration=<duration>`).
|
||||
* For repeating flags an alternative syntax can be used by joining the different values into one using `,` char as separator (for example `-storageNode <nodeA> -storageNode <nodeB>` will translate to `storageNode=<nodeA>,<nodeB>`).
|
||||
* Environment var prefix can be set via `-envflag.prefix` flag. For instance, if `-envflag.prefix=VM_`, then env vars must be prepended with `VM_`.
|
||||
|
||||
|
||||
### Configuration with snap package
|
||||
|
||||
|
||||
Command-line flags can be changed with following command:
|
||||
Snap package for VictoriaMetrics is available [here](https://snapcraft.io/victoriametrics).
|
||||
|
||||
Command-line flags for Snap package can be set with following command:
|
||||
|
||||
```text
|
||||
echo 'FLAGS="-selfScrapeInterval=10s -search.logSlowQueryDuration=20s"' > $SNAP_DATA/var/snap/victoriametrics/current/extra_flags
|
||||
snap restart victoriametrics
|
||||
```
|
||||
Or add needed command-line flags to the file `$SNAP_DATA/var/snap/victoriametrics/current/extra_flags`.
|
||||
|
||||
Note you cannot change value for `-storageDataPath` flag, for safety snap package has limited access to host system.
|
||||
Do not change value for `-storageDataPath` flag, because snap package has limited access to host filesystem.
|
||||
|
||||
|
||||
Changing scrape configuration is possible with text editor:
|
||||
```text
|
||||
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
|
||||
```
|
||||
After changes was made, trigger config re-read with command `curl 127.0.0.1:8248/-/reload`.
|
||||
Changing scrape configuration is possible with text editor:
|
||||
|
||||
```text
|
||||
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
|
||||
```
|
||||
|
||||
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8248/-/reload`.
|
||||
|
||||
|
||||
## Prometheus setup
|
||||
|
||||
Prometheus must be configured with [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
in order to send data to VictoriaMetrics. Add the following lines
|
||||
to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`):
|
||||
Add the following lines to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`) in order to send data to VictoriaMetrics:
|
||||
|
||||
```yml
|
||||
remote_write:
|
||||
|
@ -251,7 +163,7 @@ Prometheus writes incoming data to local storage and replicates it to remote sto
|
|||
This means that data remains available in local storage for `--storage.tsdb.retention.time` duration
|
||||
even if remote storage is unavailable.
|
||||
|
||||
If you plan to send data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
If you plan sending data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
of [Prometheus config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file):
|
||||
|
||||
```yml
|
||||
|
@ -260,11 +172,11 @@ global:
|
|||
datacenter: dc-123
|
||||
```
|
||||
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each time series sent to remote storage.
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each sample before sending it to remote storage.
|
||||
The label name can be arbitrary - `datacenter` is just an example. The label value must be unique
|
||||
across Prometheus instances, so those time series may be filtered and grouped by this label.
|
||||
across Prometheus instances, so time series could be filtered and grouped by this label.
|
||||
|
||||
For highly loaded Prometheus instances (400k+ samples per second) the following tuning may be applied:
|
||||
For highly loaded Prometheus instances (200k+ samples per second) the following tuning may be applied:
|
||||
|
||||
```yaml
|
||||
remote_write:
|
||||
|
@ -275,14 +187,13 @@ remote_write:
|
|||
max_shards: 30
|
||||
```
|
||||
|
||||
Using remote write increases memory usage for Prometheus up to ~25% and depends on the shape of data. If you are experiencing issues with
|
||||
too high memory consumption try to lower `max_samples_per_send` and `capacity` params (keep in mind that these two params are tightly connected).
|
||||
Using remote write increases memory usage for Prometheus by up to ~25%. If you are experiencing issues with
|
||||
too high memory consumption of Prometheus, then try to lower `max_samples_per_send` and `capacity` params. Keep in mind that these two params are tightly connected.
|
||||
Read more about tuning remote write for Prometheus [here](https://prometheus.io/docs/practices/remote_write).
|
||||
|
||||
It is recommended upgrading Prometheus to [v2.12.0](https://github.com/prometheus/prometheus/releases) or newer, since previous versions may have issues with `remote_write`.
|
||||
|
||||
Take a look also at [vmagent](https://docs.victoriametrics.com/vmagent.html)
|
||||
and [vmalert](https://docs.victoriametrics.com/vmalert.html),
|
||||
Take a look also at [vmagent](https://docs.victoriametrics.com/vmagent.html) and [vmalert](https://docs.victoriametrics.com/vmalert.html),
|
||||
which can be used as faster and less resource-hungry alternative to Prometheus.
|
||||
|
||||
|
||||
|
@ -296,27 +207,22 @@ http://<victoriametrics-addr>:8428
|
|||
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
|
||||
Then build graphs with the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/)
|
||||
or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). VictoriaMetrics supports [Prometheus querying API](#prometheus-querying-api-usage),
|
||||
which is used by Grafana.
|
||||
Then build graphs and dashboards for the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
|
||||
|
||||
## How to upgrade VictoriaMetrics
|
||||
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
|
||||
It is also safe downgrading to the previous version unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
It is also safe downgrading to older versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
|
||||
The following steps must be performed during the upgrade / downgrade:
|
||||
The following steps must be performed during the upgrade / downgrade procedure:
|
||||
|
||||
* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
* Wait until the process stops. This can take a few seconds.
|
||||
* Start the upgraded VictoriaMetrics.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details. The same applies also to [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
|
||||
## How to apply new config to VictoriaMetrics
|
||||
|
@ -327,15 +233,12 @@ VictoriaMetrics is configured via command-line flags, so it must be restarted wh
|
|||
* Wait until the process stops. This can take a few seconds.
|
||||
* Start VictoriaMetrics with the new command-line flags.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details. The same applies alos to [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
|
||||
## How to scrape Prometheus exporters such as [node-exporter](https://github.com/prometheus/node_exporter)
|
||||
|
||||
VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file).
|
||||
Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets.
|
||||
Currently the following [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) types are supported:
|
||||
VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file). Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets. Currently the following [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) types are supported:
|
||||
|
||||
* [static_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config)
|
||||
* [file_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config)
|
||||
|
@ -352,7 +255,7 @@ Currently the following [scrape_config](https://prometheus.io/docs/prometheus/la
|
|||
* [http_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config)
|
||||
|
||||
|
||||
Other `*_sd_config` types will be supported in the future.
|
||||
File a [feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need support for other `*_sd_config` types.
|
||||
|
||||
The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
||||
|
@ -371,21 +274,18 @@ For instance, put the following lines into `Telegraf` config, so it sends data t
|
|||
urls = ["http://<victoriametrics-addr>:8428"]
|
||||
```
|
||||
|
||||
Another option is to enable TCP and UDP receiver for Influx line protocol via `-influxListenAddr` command-line flag
|
||||
and stream plain Influx line protocol data to the configured TCP and/or UDP addresses.
|
||||
Another option is to enable TCP and UDP receiver for InfluxDB line protocol via `-influxListenAddr` command-line flag
|
||||
and stream plain InfluxDB line protocol data to the configured TCP and/or UDP addresses.
|
||||
|
||||
VictoriaMetrics maps Influx data using the following rules:
|
||||
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
||||
|
||||
* [`db` query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
||||
unless `db` tag exists in the Influx line.
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value,
|
||||
where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag.
|
||||
See also `-influxSkipSingleField` command-line flag.
|
||||
If `{measurement}` is empty or `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||
unless `db` tag exists in the InfluxDB line.
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
For example, the following InfluxDB line:
|
||||
|
||||
```raw
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
|
@ -398,7 +298,7 @@ foo_field1{tag1="value1", tag2="value2"} 12
|
|||
foo_field2{tag1="value1", tag2="value2"} 40
|
||||
```
|
||||
|
||||
Example for writing data with [Influx line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
Example for writing data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
to local VictoriaMetrics using `curl`:
|
||||
|
||||
```bash
|
||||
|
@ -419,7 +319,7 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[1.23],"timestamps":[1560272508147]}
|
||||
```
|
||||
|
||||
Note that Influx line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
Note that InfluxDB line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
while VictoriaMetrics stores them with *milliseconds* precision.
|
||||
|
||||
Extra labels may be added to all the written time series by passing `extra_label=name=value` query args.
|
||||
|
@ -889,7 +789,7 @@ The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv
|
|||
Time series data can be imported via any supported ingestion protocol:
|
||||
|
||||
* [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). See [these docs](#prometheus-setup) for details.
|
||||
* Influx line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
|
||||
* OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
|
||||
* OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
|
||||
|
@ -1068,14 +968,7 @@ Example contents for `-relabelConfig` file:
|
|||
regex: true
|
||||
```
|
||||
|
||||
VictoriaMetrics provides the following extra actions for relabeling rules:
|
||||
|
||||
* `replace_all`: replaces all the occurences of `regex` in the values of `source_labels` with the `replacement` and stores the result in the `target_label`.
|
||||
* `labelmap_all`: replaces all the occurences of `regex` in all the label names with the `replacement`.
|
||||
* `keep_if_equal`: keeps the entry if all label values from `source_labels` are equal.
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal.
|
||||
|
||||
See also [relabeling in vmagent](https://docs.victoriametrics.com/vmagent.html#relabeling).
|
||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details about relabeling in VictoriaMetrics.
|
||||
|
||||
|
||||
## Federation
|
||||
|
@ -1215,7 +1108,7 @@ only a single data point out of 20 initial data points per each 5m interval.
|
|||
|
||||
## Multi-tenancy
|
||||
|
||||
Single-node VictoriaMetrics doesn't support multi-tenancy. Use [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) instead.
|
||||
Single-node VictoriaMetrics doesn't support multi-tenancy. Use [cluster version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) instead.
|
||||
|
||||
|
||||
## Scalability and cluster version
|
||||
|
@ -1338,6 +1231,8 @@ The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
|||
|
||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||
|
||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
@ -1639,18 +1534,18 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-influx.maxLineSize size
|
||||
The maximum size in bytes for a single Influx line during parsing
|
||||
The maximum size in bytes for a single InfluxDB line during parsing
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144)
|
||||
-influxListenAddr string
|
||||
TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
||||
TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
||||
-influxMeasurementFieldSeparator string
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_")
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol (default "_")
|
||||
-influxSkipMeasurement
|
||||
Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'
|
||||
-influxSkipSingleField
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
|
||||
-influxTrimTimestamp duration
|
||||
Trim timestamps for Influx line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-insert.maxQueueDuration duration
|
||||
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
|
||||
-logNewSeries
|
||||
|
|
|
@ -21,13 +21,13 @@ to `vmagent` such as the ability to push metrics instead of pulling them. We did
|
|||
See [Quick Start](#quick-start) for details.
|
||||
* Can add, remove and modify labels (aka tags) via Prometheus relabeling. Can filter data before sending it to remote storage. See [these docs](#relabeling) for details.
|
||||
* Accepts data via all ingestion protocols supported by VictoriaMetrics:
|
||||
* Influx line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
* InfluxDB line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
* Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).
|
||||
* OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents).
|
||||
* Prometheus remote write protocol via `http://<vmagent>:8429/api/v1/write`.
|
||||
* JSON lines import protocol via `http://<vmagent>:8429/api/v1/import`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format).
|
||||
* Native data import protocol via `http://<vmagent>:8429/api/v1/import/native`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-native-format).
|
||||
* Data in Prometheus exposition format. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details.
|
||||
* Prometheus exposition format via `http://<vmagent>:8429/api/v1/import/prometheus`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details.
|
||||
* Arbitrary CSV data via `http://<vmagent>:8429/api/v1/import/csv`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-csv-data).
|
||||
* Can replicate collected metrics simultaneously to multiple remote storage systems.
|
||||
* Works smoothly in environments with unstable connections to remote storage. If the remote storage is unavailable, the collected metrics
|
||||
|
@ -53,13 +53,13 @@ Example command line:
|
|||
/path/to/vmagent -promscrape.config=/path/to/prometheus.yml -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||
```
|
||||
|
||||
If you only need to collect Influx data, then the following command is sufficient:
|
||||
If you only need to collect InfluxDB data, then the following command is sufficient:
|
||||
|
||||
```
|
||||
/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||
```
|
||||
|
||||
Then send Influx data to `http://vmagent-host:8429`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for more details.
|
||||
Then send InfluxDB data to `http://vmagent-host:8429`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for more details.
|
||||
|
||||
`vmagent` is also available in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags).
|
||||
|
||||
|
@ -248,13 +248,30 @@ Labels can be added to metrics by the following mechanisms:
|
|||
|
||||
## Relabeling
|
||||
|
||||
`vmagent` supports [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config).
|
||||
and also provides the following actions:
|
||||
`vmagent` and VictoriaMetrics support Prometheus-compatible relabeling].
|
||||
They provide the following additional actions on top of actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
|
||||
* `replace_all`: replaces all of the occurences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`.
|
||||
* `labelmap_all`: replaces all of the occurences of `regex` in all the label names with the `replacement`.
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal.
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal.
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`.
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`.
|
||||
|
||||
The `regex` value can be split into multiple lines for improved readability and maintainability. These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "metric_a|metric_b|foo_.+"
|
||||
```
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex:
|
||||
- "metric_a"
|
||||
- "metric_b"
|
||||
- "foo_.+"
|
||||
```
|
||||
|
||||
The relabeling can be defined in the following places:
|
||||
|
||||
|
@ -275,26 +292,40 @@ You can read more about relabeling in the following articles:
|
|||
|
||||
## Prometheus staleness markers
|
||||
|
||||
Starting from [v1.64.0](https://docs.victoriametrics.com/CHANGELOG.html#v1640), `vmagent` sends [Prometheus staleness markers](https://www.robustperception.io/staleness-and-promql) for scraped metrics when the scrape target is removed from the list of targets. Prometheus staleness markers aren't sent in [stream parsing mode](#stream-parsing-mode) or if `-promscrape.noStaleMarkers` command-line is set.
|
||||
`vmagent` sends [Prometheus staleness markers](https://www.robustperception.io/staleness-and-promql) to `-remoteWrite.url` in the following cases:
|
||||
|
||||
* If they are passed to `vmagent` via [Prometheus remote_write protocol](#prometheus-remote_write-proxy).
|
||||
* If the metric disappears from the list of scraped metrics, then stale marker is sent to this particular metrics.
|
||||
* If the scrape target becomes temporarily unavailable, then stale markers are sent for all the metrics scraped from this target.
|
||||
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
|
||||
* Stale markers are sent for all the scraped metrics on graceful shutdown of `vmagent`.
|
||||
|
||||
Prometheus staleness markers aren't sent in [stream parsing mode](#stream-parsing-mode) or if `-promscrape.noStaleMarkers` command-line is set.
|
||||
|
||||
|
||||
## Stream parsing mode
|
||||
|
||||
By default `vmagent` reads the full response from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics. Stream parsing mode may be enabled either globally for all of the scrape targets by passing `-promscrape.streamParse` command-line flag or on a per-scrape target basis with `stream_parse: true` option. For example:
|
||||
By default `vmagent` reads the full response from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics. Stream parsing mode may be enabled in the following places:
|
||||
|
||||
```yml
|
||||
scrape_configs:
|
||||
- job_name: 'big-federate'
|
||||
stream_parse: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- big-prometeus1
|
||||
- big-prometeus2
|
||||
honor_labels: true
|
||||
metrics_path: /federate
|
||||
params:
|
||||
'match[]': ['{__name__!=""}']
|
||||
```
|
||||
- Via `-promscrape.streamParse` command-line flag. In this case all the scrape targets defined in the file pointed by `-promscrape.config` are scraped in stream parsing mode.
|
||||
- Via `stream_parse: true` option at `scrape_configs` section. In this case all the scrape targets defined in this section are scraped in stream parsing mode.
|
||||
- Via `__stream_parse__=true` label, which can be set via [relabeling](#relabeling) at `relabel_configs` section. In this case stream parsing mode is enabled for the corresponding scrape targets. Typical use case: to set the label via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets exposing big number of metrics.
|
||||
|
||||
Examples:
|
||||
|
||||
```yml
|
||||
scrape_configs:
|
||||
- job_name: 'big-federate'
|
||||
stream_parse: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- big-prometeus1
|
||||
- big-prometeus2
|
||||
honor_labels: true
|
||||
metrics_path: /federate
|
||||
params:
|
||||
'match[]': ['{__name__!=""}']
|
||||
```
|
||||
|
||||
Note that `sample_limit` option doesn't prevent from data push to remote storage if stream parsing is enabled because the parsed data is pushed to remote storage as soon as it is parsed.
|
||||
|
||||
|
@ -364,7 +395,13 @@ scrape_configs:
|
|||
|
||||
## Cardinality limiter
|
||||
|
||||
By default `vmagent` doesn't limit the number of time series each scrape target can expose. The limit can be enforced across all the scrape targets by specifying `-promscrape.seriesLimitPerTarget` command-line option. The limit also can be specified via `series_limit` option at `scrape_config` section. All the scraped metrics are dropped for time series exceeding the given limit. The exceeded limit can be [monitored](#monitoring) via `promscrape_series_limit_rows_dropped_total` metric, which shows the number of metrics dropped due to the exceeded limit.
|
||||
By default `vmagent` doesn't limit the number of time series each scrape target can expose. The limit can be enforced in the following places:
|
||||
|
||||
- Via `-promscrape.seriesLimitPerTarget` command-line option. This limit is applied individually to all the scrape targets defined in the file pointed by `-promscrape.config`.
|
||||
- Via `series_limit` config option at `scrape_config` section. This limit is applied individually to all the scrape targets defined in the given `scrape_config`.
|
||||
- Via `__series_limit__` label, which can be set with [relabeling](#relabeling) at `relabel_configs` section. This limit is applied to the corresponding scrape targets. Typical use case: to set the limit via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets, which may expose too high number of time series.
|
||||
|
||||
All the scraped metrics are dropped for time series exceeding the given limit. The exceeded limit can be [monitored](#monitoring) via `promscrape_series_limit_rows_dropped_total` metric.
|
||||
|
||||
See also `sample_limit` option at [scrape_config section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
||||
|
||||
|
@ -436,7 +473,7 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
|
|||
|
||||
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses. The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
|
||||
|
||||
* Use `-remoteWrite.queues=1` when `-remoteWrite.url` points to remote storage, which doesn't accept out-of-order samples (aka data backfilling). Such storage systems include Prometheus, Cortex and Thanos.
|
||||
* Use `-remoteWrite.queues=1` when `-remoteWrite.url` points to remote storage, which doesn't accept out-of-order samples (aka data backfilling). Such storage systems include Prometheus, Cortex and Thanos, which typically emit `out of order sample` errors. The best solution is to use remote storage with [backfilling support](https://docs.victoriametrics.com/#backfilling).
|
||||
|
||||
* `vmagent` buffers scraped data at the `-remoteWrite.tmpDataPath` directory until it is sent to `-remoteWrite.url`.
|
||||
The directory can grow large when remote storage is unavailable for extended periods of time and if `-remoteWrite.maxDiskUsagePerURL` isn't set.
|
||||
|
@ -604,18 +641,18 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-influx.maxLineSize size
|
||||
The maximum size in bytes for a single Influx line during parsing
|
||||
The maximum size in bytes for a single InfluxDB line during parsing
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144)
|
||||
-influxListenAddr string
|
||||
TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<vmagent>:8429/write
|
||||
TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<vmagent>:8429/write
|
||||
-influxMeasurementFieldSeparator string
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_")
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol (default "_")
|
||||
-influxSkipMeasurement
|
||||
Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'
|
||||
-influxSkipSingleField
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
|
||||
-influxTrimTimestamp duration
|
||||
Trim timestamps for Influx line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-insert.maxQueueDuration duration
|
||||
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
|
||||
-loggerDisableTimestamps
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol")
|
||||
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field")
|
||||
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol")
|
||||
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field")
|
||||
skipMeasurement = flag.Bool("influxSkipMeasurement", false, "Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'")
|
||||
)
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ var (
|
|||
httpListenAddr = flag.String("httpListenAddr", ":8429", "TCP address to listen for http connections. "+
|
||||
"Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. "+
|
||||
"Note that /targets and /metrics pages aren't available if -httpListenAddr=''")
|
||||
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. "+
|
||||
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. "+
|
||||
"This flag isn't needed when ingesting data over HTTP - just send it to http://<vmagent>:8429/write")
|
||||
graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty")
|
||||
opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB metrics. "+
|
||||
|
|
|
@ -24,7 +24,6 @@ may fail;
|
|||
* by default, rules execution is sequential within one group, but persisting of execution results to remote
|
||||
storage is asynchronous. Hence, user shouldn't rely on recording rules chaining when result of previous
|
||||
recording rule is reused in next one;
|
||||
* `vmalert` has no UI, just an API for getting groups and rules statuses.
|
||||
|
||||
## QuickStart
|
||||
|
||||
|
@ -233,7 +232,7 @@ groups:
|
|||
|
||||
If `-clusterMode` is enabled, then `-datasource.url`, `-remoteRead.url` and `-remoteWrite.url` must
|
||||
contain only the hostname without tenant id. For example: `-datasource.url=http://vmselect:8481`.
|
||||
`vmselect` automatically adds the specified tenant to urls per each recording rule in this case.
|
||||
`vmalert` automatically adds the specified tenant to urls per each recording rule in this case.
|
||||
|
||||
The enterprise version of vmalert is available in `vmutils-*-enterprise.tar.gz` files
|
||||
at [release page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) and in `*-enterprise`
|
||||
|
@ -243,6 +242,7 @@ tags at [Docker Hub](https://hub.docker.com/r/victoriametrics/vmalert/tags).
|
|||
### WEB
|
||||
|
||||
`vmalert` runs a web-server (`-httpListenAddr`) for serving metrics and alerts endpoints:
|
||||
* `http://<vmalert-addr>` - UI;
|
||||
* `http://<vmalert-addr>/api/v1/groups` - list of all loaded groups and rules;
|
||||
* `http://<vmalert-addr>/api/v1/alerts` - list of all active alerts;
|
||||
* `http://<vmalert-addr>/api/v1/<groupID>/<alertID>/status" ` - get alert status by ID.
|
||||
|
@ -371,8 +371,14 @@ The shortlist of configuration flags is the following:
|
|||
Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.
|
||||
-datasource.basicAuth.password string
|
||||
Optional basic auth password for -datasource.url
|
||||
-datasource.basicAuth.passwordFile string
|
||||
Optional path to basic auth password to use for -datasource.url
|
||||
-datasource.basicAuth.username string
|
||||
Optional basic auth username for -datasource.url
|
||||
-datasource.bearerToken string
|
||||
Optional bearer auth token to use for -datasource.url.
|
||||
-datasource.bearerTokenFile string
|
||||
Optional path to bearer token file to use for -datasource.url.
|
||||
-datasource.lookback duration
|
||||
Lookback defines how far into the past to look when evaluating queries. For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.
|
||||
-datasource.maxIdleConnections int
|
||||
|
@ -482,8 +488,14 @@ The shortlist of configuration flags is the following:
|
|||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
-remoteRead.basicAuth.password string
|
||||
Optional basic auth password for -remoteRead.url
|
||||
-remoteRead.basicAuth.passwordFile string
|
||||
Optional path to basic auth password to use for -remoteRead.url
|
||||
-remoteRead.basicAuth.username string
|
||||
Optional basic auth username for -remoteRead.url
|
||||
-remoteRead.bearerToken string
|
||||
Optional bearer auth token to use for -remoteRead.url.
|
||||
-remoteRead.bearerTokenFile string
|
||||
Optional path to bearer token file to use for -remoteRead.url.
|
||||
-remoteRead.ignoreRestoreErrors
|
||||
Whether to ignore errors from remote storage when restoring alerts state on startup. (default true)
|
||||
-remoteRead.lookback duration
|
||||
|
@ -502,8 +514,14 @@ The shortlist of configuration flags is the following:
|
|||
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428
|
||||
-remoteWrite.basicAuth.password string
|
||||
Optional basic auth password for -remoteWrite.url
|
||||
-remoteWrite.basicAuth.passwordFile string
|
||||
Optional path to basic auth password to use for -remoteWrite.url
|
||||
-remoteWrite.basicAuth.username string
|
||||
Optional basic auth username for -remoteWrite.url
|
||||
-remoteWrite.bearerToken string
|
||||
Optional bearer auth token to use for -remoteWrite.url.
|
||||
-remoteWrite.bearerTokenFile string
|
||||
Optional path to bearer token file to use for -remoteWrite.url.
|
||||
-remoteWrite.concurrency int
|
||||
Defines number of writers for concurrent writing into remote querier (default 1)
|
||||
-remoteWrite.disablePathAppend
|
||||
|
@ -547,6 +565,8 @@ The shortlist of configuration flags is the following:
|
|||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-rule.configCheckInterval duration
|
||||
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
|
||||
-rule.maxResolveDuration duration
|
||||
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
|
||||
-rule.validateExpressions
|
||||
Whether to validate rules expressions via MetricsQL engine (default true)
|
||||
-rule.validateTemplates
|
||||
|
|
|
@ -419,6 +419,7 @@ func (ar *AlertingRule) newAlertAPI(a notifier.Alert) *APIAlert {
|
|||
// encode as strings to avoid rounding
|
||||
ID: fmt.Sprintf("%d", a.ID),
|
||||
GroupID: fmt.Sprintf("%d", a.GroupID),
|
||||
RuleID: fmt.Sprintf("%d", ar.RuleID),
|
||||
|
||||
Name: a.Name,
|
||||
Expression: ar.Expr,
|
||||
|
@ -426,7 +427,7 @@ func (ar *AlertingRule) newAlertAPI(a notifier.Alert) *APIAlert {
|
|||
Annotations: a.Annotations,
|
||||
State: a.State.String(),
|
||||
ActiveAt: a.Start,
|
||||
Value: strconv.FormatFloat(a.Value, 'e', -1, 64),
|
||||
Value: strconv.FormatFloat(a.Value, 'f', -1, 32),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,9 +12,12 @@ import (
|
|||
var (
|
||||
addr = flag.String("datasource.url", "", "VictoriaMetrics or vmselect url. Required parameter. "+
|
||||
"E.g. http://127.0.0.1:8428")
|
||||
appendTypePrefix = flag.Bool("datasource.appendTypePrefix", false, "Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.")
|
||||
basicAuthUsername = flag.String("datasource.basicAuth.username", "", "Optional basic auth username for -datasource.url")
|
||||
basicAuthPassword = flag.String("datasource.basicAuth.password", "", "Optional basic auth password for -datasource.url")
|
||||
appendTypePrefix = flag.Bool("datasource.appendTypePrefix", false, "Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.")
|
||||
basicAuthUsername = flag.String("datasource.basicAuth.username", "", "Optional basic auth username for -datasource.url")
|
||||
basicAuthPassword = flag.String("datasource.basicAuth.password", "", "Optional basic auth password for -datasource.url")
|
||||
basicAuthPasswordFile = flag.String("datasource.basicAuth.passwordFile", "", "Optional path to basic auth password to use for -datasource.url")
|
||||
bearerToken = flag.String("datasource.bearerToken", "", "Optional bearer auth token to use for -datasource.url.")
|
||||
bearerTokenFile = flag.String("datasource.bearerTokenFile", "", "Optional path to bearer token file to use for -datasource.url.")
|
||||
|
||||
tlsInsecureSkipVerify = flag.Bool("datasource.tlsInsecureSkipVerify", false, "Whether to skip tls verification when connecting to -datasource.url")
|
||||
tlsCertFile = flag.String("datasource.tlsCertFile", "", "Optional path to client-side TLS certificate file to use when connecting to -datasource.url")
|
||||
|
@ -57,10 +60,14 @@ func Init(extraParams []Param) (QuerierBuilder, error) {
|
|||
})
|
||||
}
|
||||
|
||||
authCfg, err := utils.AuthConfig(*basicAuthUsername, *basicAuthPassword, *basicAuthPasswordFile, *bearerToken, *bearerTokenFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure auth: %w", err)
|
||||
}
|
||||
|
||||
return &VMStorage{
|
||||
c: &http.Client{Transport: tr},
|
||||
basicAuthUser: *basicAuthUsername,
|
||||
basicAuthPass: *basicAuthPassword,
|
||||
authCfg: authCfg,
|
||||
datasourceURL: strings.TrimSuffix(*addr, "/"),
|
||||
appendTypePrefix: *appendTypePrefix,
|
||||
lookBack: *lookBack,
|
||||
|
|
|
@ -7,14 +7,15 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
// VMStorage represents vmstorage entity with ability to read and write metrics
|
||||
type VMStorage struct {
|
||||
c *http.Client
|
||||
authCfg *promauth.Config
|
||||
datasourceURL string
|
||||
basicAuthUser string
|
||||
basicAuthPass string
|
||||
appendTypePrefix bool
|
||||
lookBack time.Duration
|
||||
queryStep time.Duration
|
||||
|
@ -29,9 +30,8 @@ type VMStorage struct {
|
|||
func (s *VMStorage) Clone() *VMStorage {
|
||||
return &VMStorage{
|
||||
c: s.c,
|
||||
authCfg: s.authCfg,
|
||||
datasourceURL: s.datasourceURL,
|
||||
basicAuthUser: s.basicAuthUser,
|
||||
basicAuthPass: s.basicAuthPass,
|
||||
lookBack: s.lookBack,
|
||||
queryStep: s.queryStep,
|
||||
appendTypePrefix: s.appendTypePrefix,
|
||||
|
@ -57,11 +57,10 @@ func (s *VMStorage) BuildWithParams(params QuerierParams) Querier {
|
|||
}
|
||||
|
||||
// NewVMStorage is a constructor for VMStorage
|
||||
func NewVMStorage(baseURL, basicAuthUser, basicAuthPass string, lookBack time.Duration, queryStep time.Duration, appendTypePrefix bool, c *http.Client) *VMStorage {
|
||||
func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Duration, queryStep time.Duration, appendTypePrefix bool, c *http.Client) *VMStorage {
|
||||
return &VMStorage{
|
||||
c: c,
|
||||
basicAuthUser: basicAuthUser,
|
||||
basicAuthPass: basicAuthPass,
|
||||
authCfg: authCfg,
|
||||
datasourceURL: strings.TrimSuffix(baseURL, "/"),
|
||||
appendTypePrefix: appendTypePrefix,
|
||||
lookBack: lookBack,
|
||||
|
@ -149,8 +148,10 @@ func (s *VMStorage) newRequestPOST() (*http.Request, error) {
|
|||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json; charset=utf-8")
|
||||
if s.basicAuthPass != "" {
|
||||
req.SetBasicAuth(s.basicAuthUser, s.basicAuthPass)
|
||||
if s.authCfg != nil {
|
||||
if auth := s.authCfg.GetAuthHeader(); auth != "" {
|
||||
req.Header.Set("Authorization", auth)
|
||||
}
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
|
|
@ -10,14 +10,20 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
basicAuthName = "foo"
|
||||
basicAuthPass = "bar"
|
||||
query = "vm_rows"
|
||||
queryRender = "constantLine(10)"
|
||||
baCfg = &promauth.BasicAuthConfig{
|
||||
Username: basicAuthName,
|
||||
Password: basicAuthPass,
|
||||
}
|
||||
query = "vm_rows"
|
||||
queryRender = "constantLine(10)"
|
||||
)
|
||||
|
||||
func TestVMInstantQuery(t *testing.T) {
|
||||
|
@ -73,7 +79,11 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
srv := httptest.NewServer(mux)
|
||||
defer srv.Close()
|
||||
|
||||
s := NewVMStorage(srv.URL, basicAuthName, basicAuthPass, time.Minute, 0, false, srv.Client())
|
||||
authCfg, err := promauth.NewConfig(".", nil, baCfg, "", "", nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected: %s", err)
|
||||
}
|
||||
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client())
|
||||
|
||||
p := NewPrometheusType()
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: &p, EvaluationInterval: 15 * time.Second})
|
||||
|
@ -179,12 +189,16 @@ func TestVMRangeQuery(t *testing.T) {
|
|||
srv := httptest.NewServer(mux)
|
||||
defer srv.Close()
|
||||
|
||||
s := NewVMStorage(srv.URL, basicAuthName, basicAuthPass, time.Minute, 0, false, srv.Client())
|
||||
authCfg, err := promauth.NewConfig(".", nil, baCfg, "", "", nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected: %s", err)
|
||||
}
|
||||
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client())
|
||||
|
||||
p := NewPrometheusType()
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: &p, EvaluationInterval: 15 * time.Second})
|
||||
|
||||
_, err := pq.QueryRange(ctx, query, time.Now(), time.Time{})
|
||||
_, err = pq.QueryRange(ctx, query, time.Now(), time.Time{})
|
||||
expectError(t, err, "is missing")
|
||||
|
||||
_, err = pq.QueryRange(ctx, query, time.Time{}, time.Now())
|
||||
|
@ -216,6 +230,10 @@ func TestVMRangeQuery(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRequestParams(t *testing.T) {
|
||||
authCfg, err := promauth.NewConfig(".", nil, baCfg, "", "", nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected: %s", err)
|
||||
}
|
||||
query := "up"
|
||||
timestamp := time.Date(2001, 2, 3, 4, 5, 6, 0, time.UTC)
|
||||
testCases := []struct {
|
||||
|
@ -308,10 +326,7 @@ func TestRequestParams(t *testing.T) {
|
|||
{
|
||||
"basic auth",
|
||||
false,
|
||||
&VMStorage{
|
||||
basicAuthUser: "foo",
|
||||
basicAuthPass: "bar",
|
||||
},
|
||||
&VMStorage{authCfg: authCfg},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
u, p, _ := r.BasicAuth()
|
||||
checkEqualString(t, "foo", u)
|
||||
|
@ -321,10 +336,7 @@ func TestRequestParams(t *testing.T) {
|
|||
{
|
||||
"basic auth range",
|
||||
true,
|
||||
&VMStorage{
|
||||
basicAuthUser: "foo",
|
||||
basicAuthPass: "bar",
|
||||
},
|
||||
&VMStorage{authCfg: authCfg},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
u, p, _ := r.BasicAuth()
|
||||
checkEqualString(t, "foo", u)
|
||||
|
|
|
@ -277,8 +277,8 @@ func (g *Group) start(ctx context.Context, nts []notifier.Notifier, rw *remotewr
|
|||
case <-t.C:
|
||||
g.metrics.iterationTotal.Inc()
|
||||
iterationStart := time.Now()
|
||||
|
||||
errs := e.execConcurrently(ctx, g.Rules, g.Concurrency, g.Interval)
|
||||
resolveDuration := getResolveDuration(g.Interval)
|
||||
errs := e.execConcurrently(ctx, g.Rules, g.Concurrency, resolveDuration)
|
||||
for err := range errs {
|
||||
if err != nil {
|
||||
logger.Errorf("group %q: %s", g.Name, err)
|
||||
|
@ -290,6 +290,17 @@ func (g *Group) start(ctx context.Context, nts []notifier.Notifier, rw *remotewr
|
|||
}
|
||||
}
|
||||
|
||||
// resolveDuration for alerts is equal to 3 interval evaluations
|
||||
// so in case if vmalert stops sending updates for some reason,
|
||||
// notifier could automatically resolve the alert.
|
||||
func getResolveDuration(groupInterval time.Duration) time.Duration {
|
||||
resolveInterval := groupInterval * 3
|
||||
if *maxResolveDuration > 0 && (resolveInterval > *maxResolveDuration) {
|
||||
return *maxResolveDuration
|
||||
}
|
||||
return resolveInterval
|
||||
}
|
||||
|
||||
type executor struct {
|
||||
notifiers []eNotifier
|
||||
rw *remotewrite.Client
|
||||
|
@ -301,12 +312,12 @@ type eNotifier struct {
|
|||
alertsSendErrors *counter
|
||||
}
|
||||
|
||||
func (e *executor) execConcurrently(ctx context.Context, rules []Rule, concurrency int, interval time.Duration) chan error {
|
||||
func (e *executor) execConcurrently(ctx context.Context, rules []Rule, concurrency int, resolveDuration time.Duration) chan error {
|
||||
res := make(chan error, len(rules))
|
||||
if concurrency == 1 {
|
||||
// fast path
|
||||
for _, rule := range rules {
|
||||
res <- e.exec(ctx, rule, interval)
|
||||
res <- e.exec(ctx, rule, resolveDuration)
|
||||
}
|
||||
close(res)
|
||||
return res
|
||||
|
@ -319,7 +330,7 @@ func (e *executor) execConcurrently(ctx context.Context, rules []Rule, concurren
|
|||
sem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(r Rule) {
|
||||
res <- e.exec(ctx, r, interval)
|
||||
res <- e.exec(ctx, r, resolveDuration)
|
||||
<-sem
|
||||
wg.Done()
|
||||
}(rule)
|
||||
|
@ -339,7 +350,7 @@ var (
|
|||
remoteWriteErrors = metrics.NewCounter(`vmalert_remotewrite_errors_total`)
|
||||
)
|
||||
|
||||
func (e *executor) exec(ctx context.Context, rule Rule, interval time.Duration) error {
|
||||
func (e *executor) exec(ctx context.Context, rule Rule, resolveDuration time.Duration) error {
|
||||
execTotal.Inc()
|
||||
|
||||
tss, err := rule.Exec(ctx)
|
||||
|
@ -365,10 +376,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, interval time.Duration)
|
|||
for _, a := range ar.alerts {
|
||||
switch a.State {
|
||||
case notifier.StateFiring:
|
||||
// set End to execStart + 3 intervals
|
||||
// so notifier can resolve it automatically if `vmalert`
|
||||
// won't be able to send resolve for some reason
|
||||
a.End = time.Now().Add(3 * interval)
|
||||
a.End = time.Now().Add(resolveDuration)
|
||||
alerts = append(alerts, *a)
|
||||
case notifier.StateInactive:
|
||||
// set End to execStart to notify
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -235,3 +236,27 @@ func TestGroupStart(t *testing.T) {
|
|||
g.close()
|
||||
<-finished
|
||||
}
|
||||
|
||||
func TestResolveDuration(t *testing.T) {
|
||||
testCases := []struct {
|
||||
groupInterval time.Duration
|
||||
maxDuration time.Duration
|
||||
expected time.Duration
|
||||
}{
|
||||
{time.Minute, 0, 3 * time.Minute},
|
||||
{3 * time.Minute, 0, 9 * time.Minute},
|
||||
{time.Minute, 2 * time.Minute, 2 * time.Minute},
|
||||
{0, 0, 0},
|
||||
}
|
||||
defaultResolveDuration := *maxResolveDuration
|
||||
defer func() { *maxResolveDuration = defaultResolveDuration }()
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%v-%v-%v", tc.groupInterval, tc.expected, tc.maxDuration), func(t *testing.T) {
|
||||
*maxResolveDuration = tc.maxDuration
|
||||
got := getResolveDuration(tc.groupInterval)
|
||||
if got != tc.expected {
|
||||
t.Errorf("expected to have %v; got %v", tc.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,8 @@ Rule files may contain %{ENV_VAR} placeholders, which are substituted by the cor
|
|||
|
||||
validateTemplates = flag.Bool("rule.validateTemplates", true, "Whether to validate annotation and label templates")
|
||||
validateExpressions = flag.Bool("rule.validateExpressions", true, "Whether to validate rules expressions via MetricsQL engine")
|
||||
maxResolveDuration = flag.Duration("rule.maxResolveDuration", 0, "Limits the maximum duration for automatic alert expiration, "+
|
||||
"which is by default equal to 3 evaluation intervals of the parent group.")
|
||||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
|
||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
||||
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used`)
|
||||
|
|
|
@ -94,14 +94,18 @@ groups:
|
|||
*rulesCheckInterval = 200 * time.Millisecond
|
||||
*rulePath = []string{f.Name()}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
m := &manager{
|
||||
querierBuilder: &fakeQuerier{},
|
||||
groups: make(map[uint64]*Group),
|
||||
labels: map[string]string{},
|
||||
}
|
||||
go configReload(ctx, m, nil)
|
||||
|
||||
syncCh := make(chan struct{})
|
||||
go func() {
|
||||
configReload(ctx, m, nil)
|
||||
close(syncCh)
|
||||
}()
|
||||
|
||||
lenLocked := func(m *manager) int {
|
||||
m.groupsMu.RLock()
|
||||
|
@ -138,6 +142,9 @@ groups:
|
|||
if groupsLen != 1 { // should remain unchanged
|
||||
t.Fatalf("expected to have exactly 1 group loaded; got %d", groupsLen)
|
||||
}
|
||||
|
||||
cancel()
|
||||
<-syncCh
|
||||
}
|
||||
|
||||
func writeToFile(t *testing.T, file, b string) {
|
||||
|
|
|
@ -14,17 +14,26 @@ import (
|
|||
// Alert the triggered alert
|
||||
// TODO: Looks like alert name isn't unique
|
||||
type Alert struct {
|
||||
GroupID uint64
|
||||
Name string
|
||||
Labels map[string]string
|
||||
// GroupID contains the ID of the parent rules group
|
||||
GroupID uint64
|
||||
// Name represents Alert name
|
||||
Name string
|
||||
// Labels is the list of label-value pairs attached to the Alert
|
||||
Labels map[string]string
|
||||
// Annotations is the list of annotations generated on Alert evaluation
|
||||
Annotations map[string]string
|
||||
State AlertState
|
||||
|
||||
Expr string
|
||||
// State represents the current state of the Alert
|
||||
State AlertState
|
||||
// Expr contains expression that was executed to generate the Alert
|
||||
Expr string
|
||||
// Start defines the moment of time when Alert has triggered
|
||||
Start time.Time
|
||||
End time.Time
|
||||
// End defines the moment of time when Alert supposed to expire
|
||||
End time.Time
|
||||
// Value stores the value returned from evaluating expression from Expr field
|
||||
Value float64
|
||||
ID uint64
|
||||
// ID is the unique identifer for the Alert
|
||||
ID uint64
|
||||
}
|
||||
|
||||
// AlertState type indicates the Alert state
|
||||
|
|
|
@ -15,6 +15,10 @@ var (
|
|||
"E.g. http://127.0.0.1:8428")
|
||||
basicAuthUsername = flag.String("remoteRead.basicAuth.username", "", "Optional basic auth username for -remoteRead.url")
|
||||
basicAuthPassword = flag.String("remoteRead.basicAuth.password", "", "Optional basic auth password for -remoteRead.url")
|
||||
basicAuthPasswordFile = flag.String("remoteRead.basicAuth.passwordFile", "", "Optional path to basic auth password to use for -remoteRead.url")
|
||||
bearerToken = flag.String("remoteRead.bearerToken", "", "Optional bearer auth token to use for -remoteRead.url.")
|
||||
bearerTokenFile = flag.String("remoteRead.bearerTokenFile", "", "Optional path to bearer token file to use for -remoteRead.url.")
|
||||
|
||||
tlsInsecureSkipVerify = flag.Bool("remoteRead.tlsInsecureSkipVerify", false, "Whether to skip tls verification when connecting to -remoteRead.url")
|
||||
tlsCertFile = flag.String("remoteRead.tlsCertFile", "", "Optional path to client-side TLS certificate file to use when connecting to -remoteRead.url")
|
||||
tlsKeyFile = flag.String("remoteRead.tlsKeyFile", "", "Optional path to client-side TLS certificate key to use when connecting to -remoteRead.url")
|
||||
|
@ -34,6 +38,10 @@ func Init() (datasource.QuerierBuilder, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
authCfg, err := utils.AuthConfig(*basicAuthUsername, *basicAuthPassword, *basicAuthPasswordFile, *bearerToken, *bearerTokenFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure auth: %w", err)
|
||||
}
|
||||
c := &http.Client{Transport: tr}
|
||||
return datasource.NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, 0, 0, false, c), nil
|
||||
return datasource.NewVMStorage(*addr, authCfg, 0, 0, false, c), nil
|
||||
}
|
||||
|
|
|
@ -13,8 +13,11 @@ var (
|
|||
addr = flag.String("remoteWrite.url", "", "Optional URL to VictoriaMetrics or vminsert where to persist alerts state "+
|
||||
"and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, "+
|
||||
"then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend")
|
||||
basicAuthUsername = flag.String("remoteWrite.basicAuth.username", "", "Optional basic auth username for -remoteWrite.url")
|
||||
basicAuthPassword = flag.String("remoteWrite.basicAuth.password", "", "Optional basic auth password for -remoteWrite.url")
|
||||
basicAuthUsername = flag.String("remoteWrite.basicAuth.username", "", "Optional basic auth username for -remoteWrite.url")
|
||||
basicAuthPassword = flag.String("remoteWrite.basicAuth.password", "", "Optional basic auth password for -remoteWrite.url")
|
||||
basicAuthPasswordFile = flag.String("remoteWrite.basicAuth.passwordFile", "", "Optional path to basic auth password to use for -remoteWrite.url")
|
||||
bearerToken = flag.String("remoteWrite.bearerToken", "", "Optional bearer auth token to use for -remoteWrite.url.")
|
||||
bearerTokenFile = flag.String("remoteWrite.bearerTokenFile", "", "Optional path to bearer token file to use for -remoteWrite.url.")
|
||||
|
||||
maxQueueSize = flag.Int("remoteWrite.maxQueueSize", 1e5, "Defines the max number of pending datapoints to remote write endpoint")
|
||||
maxBatchSize = flag.Int("remoteWrite.maxBatchSize", 1e3, "Defines defines max number of timeseries to be flushed at once")
|
||||
|
@ -43,14 +46,18 @@ func Init(ctx context.Context) (*Client, error) {
|
|||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
|
||||
authCfg, err := utils.AuthConfig(*basicAuthUsername, *basicAuthPassword, *basicAuthPasswordFile, *bearerToken, *bearerTokenFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure auth: %w", err)
|
||||
}
|
||||
|
||||
return NewClient(ctx, Config{
|
||||
Addr: *addr,
|
||||
AuthCfg: authCfg,
|
||||
Concurrency: *concurrency,
|
||||
MaxQueueSize: *maxQueueSize,
|
||||
MaxBatchSize: *maxBatchSize,
|
||||
FlushInterval: *flushInterval,
|
||||
BasicAuthUser: *basicAuthUsername,
|
||||
BasicAuthPass: *basicAuthPassword,
|
||||
DisablePathAppend: *disablePathAppend,
|
||||
Transport: t,
|
||||
})
|
||||
|
|
|
@ -10,10 +10,12 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
// Client is an asynchronous HTTP client for writing
|
||||
|
@ -21,8 +23,8 @@ import (
|
|||
type Client struct {
|
||||
addr string
|
||||
c *http.Client
|
||||
authCfg *promauth.Config
|
||||
input chan prompbmarshal.TimeSeries
|
||||
baUser, baPass string
|
||||
flushInterval time.Duration
|
||||
maxBatchSize int
|
||||
maxQueueSize int
|
||||
|
@ -35,10 +37,8 @@ type Client struct {
|
|||
// Config is config for remote write.
|
||||
type Config struct {
|
||||
// Addr of remote storage
|
||||
Addr string
|
||||
|
||||
BasicAuthUser string
|
||||
BasicAuthPass string
|
||||
Addr string
|
||||
AuthCfg *promauth.Config
|
||||
|
||||
// Concurrency defines number of readers that
|
||||
// concurrently read from the queue and flush data
|
||||
|
@ -98,8 +98,7 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
|
|||
Transport: cfg.Transport,
|
||||
},
|
||||
addr: strings.TrimSuffix(cfg.Addr, "/"),
|
||||
baUser: cfg.BasicAuthUser,
|
||||
baPass: cfg.BasicAuthPass,
|
||||
authCfg: cfg.AuthCfg,
|
||||
flushInterval: cfg.FlushInterval,
|
||||
maxBatchSize: cfg.MaxBatchSize,
|
||||
maxQueueSize: cfg.MaxQueueSize,
|
||||
|
@ -232,8 +231,10 @@ func (c *Client) send(ctx context.Context, data []byte) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create new HTTP request: %w", err)
|
||||
}
|
||||
if c.baPass != "" {
|
||||
req.SetBasicAuth(c.baUser, c.baPass)
|
||||
if c.authCfg != nil {
|
||||
if auth := c.authCfg.GetAuthHeader(); auth != "" {
|
||||
req.Header.Set("Authorization", auth)
|
||||
}
|
||||
}
|
||||
if !c.disablePathAppend {
|
||||
req.URL.Path += writePath
|
||||
|
|
36
app/vmalert/tpl/footer.qtpl
Normal file
36
app/vmalert/tpl/footer.qtpl
Normal file
|
@ -0,0 +1,36 @@
|
|||
{% func Footer() %}
|
||||
</main>
|
||||
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/js/bootstrap.bundle.min.js" integrity="sha384-MrcW6ZMFYlzcLA8Nl+NtUVF0sA7MsXsP1UyJoMp4YLEuNSfAP+JcXn/tWtIaxVXM" crossorigin="anonymous"></script>
|
||||
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
|
||||
<script type="text/javascript">
|
||||
function expandAll() {
|
||||
$('.collapse').addClass('show');
|
||||
}
|
||||
function collapseAll() {
|
||||
$('.collapse').removeClass('show');
|
||||
}
|
||||
|
||||
$(document).ready(function() {
|
||||
// prevent collapse logic on link click
|
||||
$(".group-heading a").click(function(e) {
|
||||
e.stopPropagation();
|
||||
});
|
||||
|
||||
$(".group-heading").click(function(e) {
|
||||
let target = $(this).attr('data-bs-target');
|
||||
let el = $('#'+target);
|
||||
new bootstrap.Collapse(el, {
|
||||
toggle: true
|
||||
});
|
||||
});
|
||||
|
||||
var hash = window.location.hash.substr(1);
|
||||
let group = $('#'+hash);
|
||||
if (group.length > 0) {
|
||||
group.click();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
{% endfunc %}
|
86
app/vmalert/tpl/footer.qtpl.go
Normal file
86
app/vmalert/tpl/footer.qtpl.go
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Code generated by qtc from "footer.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:1
|
||||
package tpl
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:1
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:1
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:1
|
||||
func StreamFooter(qw422016 *qt422016.Writer) {
|
||||
//line app/vmalert/tpl/footer.qtpl:1
|
||||
qw422016.N().S(`
|
||||
</main>
|
||||
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/js/bootstrap.bundle.min.js" integrity="sha384-MrcW6ZMFYlzcLA8Nl+NtUVF0sA7MsXsP1UyJoMp4YLEuNSfAP+JcXn/tWtIaxVXM" crossorigin="anonymous"></script>
|
||||
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
|
||||
<script type="text/javascript">
|
||||
function expandAll() {
|
||||
$('.collapse').addClass('show');
|
||||
}
|
||||
function collapseAll() {
|
||||
$('.collapse').removeClass('show');
|
||||
}
|
||||
|
||||
$(document).ready(function() {
|
||||
// prevent collapse logic on link click
|
||||
$(".group-heading a").click(function(e) {
|
||||
e.stopPropagation();
|
||||
});
|
||||
|
||||
$(".group-heading").click(function(e) {
|
||||
let target = $(this).attr('data-bs-target');
|
||||
let el = $('#'+target);
|
||||
new bootstrap.Collapse(el, {
|
||||
toggle: true
|
||||
});
|
||||
});
|
||||
|
||||
var hash = window.location.hash.substr(1);
|
||||
let group = $('#'+hash);
|
||||
if (group.length > 0) {
|
||||
group.click();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
`)
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
func WriteFooter(qq422016 qtio422016.Writer) {
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
StreamFooter(qw422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
func Footer() string {
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
WriteFooter(qb422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/footer.qtpl:36
|
||||
}
|
43
app/vmalert/tpl/header.qtpl
Normal file
43
app/vmalert/tpl/header.qtpl
Normal file
|
@ -0,0 +1,43 @@
|
|||
{% func Header(title string, pages []NavItem) %}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>vmalert{% if title != "" %} - {%s title %}{% endif %}</title>
|
||||
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous">
|
||||
<style>
|
||||
body{
|
||||
min-height: 75rem;
|
||||
padding-top: 4.5rem;
|
||||
}
|
||||
pre {
|
||||
overflow: scroll;
|
||||
max-width: 600px;
|
||||
min-height: 30px;
|
||||
}
|
||||
.group-heading {
|
||||
cursor: pointer;
|
||||
padding: 5px;
|
||||
margin-top: 5px;
|
||||
position: relative;
|
||||
}
|
||||
.group-heading .anchor {
|
||||
position:absolute;
|
||||
top:-60px;
|
||||
}
|
||||
.group-heading span {
|
||||
float: right;
|
||||
margin-left: 5px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
.group-heading:hover {
|
||||
background-color: #f8f9fa!important;
|
||||
}
|
||||
.table .error-cell{
|
||||
word-break: break-word;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
{%= PrintNavItems(title, pages) %}
|
||||
<main class="px-2">
|
||||
{% endfunc %}
|
107
app/vmalert/tpl/header.qtpl.go
Normal file
107
app/vmalert/tpl/header.qtpl.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
// Code generated by qtc from "header.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:1
|
||||
package tpl
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:1
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:1
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:1
|
||||
func StreamHeader(qw422016 *qt422016.Writer, title string, pages []NavItem) {
|
||||
//line app/vmalert/tpl/header.qtpl:1
|
||||
qw422016.N().S(`
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>vmalert`)
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
if title != "" {
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
qw422016.N().S(` - `)
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
qw422016.E().S(title)
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
qw422016.N().S(`</title>
|
||||
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous">
|
||||
<style>
|
||||
body{
|
||||
min-height: 75rem;
|
||||
padding-top: 4.5rem;
|
||||
}
|
||||
pre {
|
||||
overflow: scroll;
|
||||
max-width: 600px;
|
||||
min-height: 30px;
|
||||
}
|
||||
.group-heading {
|
||||
cursor: pointer;
|
||||
padding: 5px;
|
||||
margin-top: 5px;
|
||||
position: relative;
|
||||
}
|
||||
.group-heading .anchor {
|
||||
position:absolute;
|
||||
top:-60px;
|
||||
}
|
||||
.group-heading span {
|
||||
float: right;
|
||||
margin-left: 5px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
.group-heading:hover {
|
||||
background-color: #f8f9fa!important;
|
||||
}
|
||||
.table .error-cell{
|
||||
word-break: break-word;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:41
|
||||
StreamPrintNavItems(qw422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:41
|
||||
qw422016.N().S(`
|
||||
<main class="px-2">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
func WriteHeader(qq422016 qtio422016.Writer, title string, pages []NavItem) {
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
StreamHeader(qw422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
func Header(title string, pages []NavItem) string {
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
WriteHeader(qb422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
}
|
25
app/vmalert/tpl/nav.qtpl
Normal file
25
app/vmalert/tpl/nav.qtpl
Normal file
|
@ -0,0 +1,25 @@
|
|||
{% code
|
||||
type NavItem struct {
|
||||
Name string
|
||||
Url string
|
||||
}
|
||||
%}
|
||||
|
||||
{% func PrintNavItems(current string, items []NavItem) %}
|
||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||
<div class="container-fluid">
|
||||
<div class="collapse navbar-collapse" id="navbarCollapse">
|
||||
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
||||
{% for _, item := range items %}
|
||||
<li class="nav-item">
|
||||
<a class="nav-link{% if current == item.Name %} active{% endif %}" href="{%s item.Url %}">
|
||||
{%s item.Name %}
|
||||
</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
{% endfunc %}
|
||||
|
||||
|
96
app/vmalert/tpl/nav.qtpl.go
Normal file
96
app/vmalert/tpl/nav.qtpl.go
Normal file
|
@ -0,0 +1,96 @@
|
|||
// Code generated by qtc from "nav.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:1
|
||||
package tpl
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:1
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:1
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:2
|
||||
type NavItem struct {
|
||||
Name string
|
||||
Url string
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:8
|
||||
func StreamPrintNavItems(qw422016 *qt422016.Writer, current string, items []NavItem) {
|
||||
//line app/vmalert/tpl/nav.qtpl:8
|
||||
qw422016.N().S(`
|
||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||
<div class="container-fluid">
|
||||
<div class="collapse navbar-collapse" id="navbarCollapse">
|
||||
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
||||
`)
|
||||
//line app/vmalert/tpl/nav.qtpl:13
|
||||
for _, item := range items {
|
||||
//line app/vmalert/tpl/nav.qtpl:13
|
||||
qw422016.N().S(`
|
||||
<li class="nav-item">
|
||||
<a class="nav-link`)
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
if current == item.Name {
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
qw422016.N().S(` active`)
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
}
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
qw422016.N().S(`" href="`)
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
qw422016.E().S(item.Url)
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
qw422016.N().S(`">
|
||||
`)
|
||||
//line app/vmalert/tpl/nav.qtpl:16
|
||||
qw422016.E().S(item.Name)
|
||||
//line app/vmalert/tpl/nav.qtpl:16
|
||||
qw422016.N().S(`
|
||||
</a>
|
||||
</li>
|
||||
`)
|
||||
//line app/vmalert/tpl/nav.qtpl:19
|
||||
}
|
||||
//line app/vmalert/tpl/nav.qtpl:19
|
||||
qw422016.N().S(`
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
`)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
func WritePrintNavItems(qq422016 qtio422016.Writer, current string, items []NavItem) {
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
StreamPrintNavItems(qw422016, current, items)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
func PrintNavItems(current string, items []NavItem) string {
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
WritePrintNavItems(qb422016, current, items)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
}
|
18
app/vmalert/utils/auth.go
Normal file
18
app/vmalert/utils/auth.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
// AuthConfig returns promauth.Config based on the given params
|
||||
func AuthConfig(baUser, baPass, baFile, bearerToken, bearerTokenFile string) (*promauth.Config, error) {
|
||||
var baCfg *promauth.BasicAuthConfig
|
||||
if baUser != "" || baPass != "" || baFile != "" {
|
||||
baCfg = &promauth.BasicAuthConfig{
|
||||
Username: baUser,
|
||||
Password: baPass,
|
||||
PasswordFile: baFile,
|
||||
}
|
||||
}
|
||||
return promauth.NewConfig(".", nil, baCfg, bearerToken, bearerTokenFile, nil, nil)
|
||||
}
|
|
@ -23,7 +23,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
if r.Method != "GET" {
|
||||
return false
|
||||
}
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
WriteWelcome(w, [][2]string{
|
||||
{"/api/v1/groups", "list all loaded groups and rules"},
|
||||
{"/api/v1/alerts", "list all active alerts"},
|
||||
{"/api/v1/groupID/alertID/status", "get alert status by ID"},
|
||||
|
@ -31,6 +31,12 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
{"/-/reload", "reload configuration"},
|
||||
})
|
||||
return true
|
||||
case "/alerts":
|
||||
WriteListAlerts(w, rh.groupAlerts())
|
||||
return true
|
||||
case "/groups":
|
||||
WriteListGroups(w, rh.groups())
|
||||
return true
|
||||
case "/api/v1/groups":
|
||||
data, err := rh.listGroups()
|
||||
if err != nil {
|
||||
|
@ -58,14 +64,26 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
if !strings.HasSuffix(r.URL.Path, "/status") {
|
||||
return false
|
||||
}
|
||||
// /api/v1/<groupName>/<alertID>/status
|
||||
data, err := rh.alert(r.URL.Path)
|
||||
alert, err := rh.alertByPath(strings.TrimPrefix(r.URL.Path, "/api/v1/"))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Write(data)
|
||||
|
||||
// /api/v1/<groupID>/<alertID>/status
|
||||
if strings.HasPrefix(r.URL.Path, "/api/v1/") {
|
||||
data, err := json.Marshal(alert)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "failed to marshal alert: %s", err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Write(data)
|
||||
return true
|
||||
}
|
||||
|
||||
// <groupID>/<alertID>/status
|
||||
WriteAlert(w, alert)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -77,20 +95,25 @@ type listGroupsResponse struct {
|
|||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listGroups() ([]byte, error) {
|
||||
func (rh *requestHandler) groups() []APIGroup {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
lr := listGroupsResponse{Status: "success"}
|
||||
var groups []APIGroup
|
||||
for _, g := range rh.m.groups {
|
||||
lr.Data.Groups = append(lr.Data.Groups, g.toAPI())
|
||||
groups = append(groups, g.toAPI())
|
||||
}
|
||||
|
||||
// sort list of alerts for deterministic output
|
||||
sort.Slice(lr.Data.Groups, func(i, j int) bool {
|
||||
return lr.Data.Groups[i].Name < lr.Data.Groups[j].Name
|
||||
sort.Slice(groups, func(i, j int) bool {
|
||||
return groups[i].Name < groups[j].Name
|
||||
})
|
||||
|
||||
return groups
|
||||
}
|
||||
func (rh *requestHandler) listGroups() ([]byte, error) {
|
||||
lr := listGroupsResponse{Status: "success"}
|
||||
lr.Data.Groups = rh.groups()
|
||||
b, err := json.Marshal(lr)
|
||||
if err != nil {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
|
@ -108,6 +131,30 @@ type listAlertsResponse struct {
|
|||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func (rh *requestHandler) groupAlerts() []GroupAlerts {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
var groupAlerts []GroupAlerts
|
||||
for _, g := range rh.m.groups {
|
||||
var alerts []*APIAlert
|
||||
for _, r := range g.Rules {
|
||||
a, ok := r.(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
alerts = append(alerts, a.AlertsAPI()...)
|
||||
}
|
||||
if len(alerts) > 0 {
|
||||
groupAlerts = append(groupAlerts, GroupAlerts{
|
||||
Group: g.toAPI(),
|
||||
Alerts: alerts,
|
||||
})
|
||||
}
|
||||
}
|
||||
return groupAlerts
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listAlerts() ([]byte, error) {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
@ -138,18 +185,17 @@ func (rh *requestHandler) listAlerts() ([]byte, error) {
|
|||
return b, nil
|
||||
}
|
||||
|
||||
func (rh *requestHandler) alert(path string) ([]byte, error) {
|
||||
func (rh *requestHandler) alertByPath(path string) (*APIAlert, error) {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
parts := strings.SplitN(strings.TrimPrefix(path, "/api/v1/"), "/", 3)
|
||||
parts := strings.SplitN(strings.TrimLeft(path, "/"), "/", 3)
|
||||
if len(parts) != 3 {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf(`path %q cointains /status suffix but doesn't match pattern "/group/alert/status"`, path),
|
||||
Err: fmt.Errorf(`path %q cointains /status suffix but doesn't match pattern "/groupID/alertID/status"`, path),
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
groupID, err := uint64FromPath(parts[0])
|
||||
if err != nil {
|
||||
return nil, badRequest(fmt.Errorf(`cannot parse groupID: %w`, err))
|
||||
|
@ -162,7 +208,7 @@ func (rh *requestHandler) alert(path string) ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, errResponse(err, http.StatusNotFound)
|
||||
}
|
||||
return json.Marshal(resp)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func uint64FromPath(path string) (uint64, error) {
|
||||
|
|
285
app/vmalert/web.qtpl
Normal file
285
app/vmalert/web.qtpl
Normal file
|
@ -0,0 +1,285 @@
|
|||
{% package main %}
|
||||
|
||||
{% import (
|
||||
"time"
|
||||
"sort"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl"
|
||||
) %}
|
||||
|
||||
|
||||
{% code
|
||||
var navItems = []tpl.NavItem{
|
||||
{Name: "vmalert", Url: "/"},
|
||||
{Name: "Groups", Url: "/groups"},
|
||||
{Name: "Alerts", Url: "/alerts"},
|
||||
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert.html"},
|
||||
}
|
||||
%}
|
||||
|
||||
{% func Welcome(pathList [][2]string) %}
|
||||
{%= tpl.Header("vmalert", navItems) %}
|
||||
<p>
|
||||
API:<br>
|
||||
{% for _, p := range pathList %}
|
||||
{%code
|
||||
p, doc := p[0], p[1]
|
||||
%}
|
||||
<a href="{%s p %}">{%s p %}</a> - {%s doc %}<br/>
|
||||
{% endfor %}
|
||||
</p>
|
||||
{%= tpl.Footer() %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ListGroups(groups []APIGroup) %}
|
||||
{%= tpl.Header("Groups", navItems) %}
|
||||
{% if len(groups) > 0 %}
|
||||
{%code
|
||||
rOk := make(map[string]int)
|
||||
rNotOk := make(map[string]int)
|
||||
for _, g := range groups {
|
||||
for _, r := range g.AlertingRules{
|
||||
if r.LastError != "" {
|
||||
rNotOk[g.Name]++
|
||||
} else {
|
||||
rOk[g.Name]++
|
||||
}
|
||||
}
|
||||
for _, r := range g.RecordingRules{
|
||||
if r.LastError != "" {
|
||||
rNotOk[g.Name]++
|
||||
} else {
|
||||
rOk[g.Name]++
|
||||
}
|
||||
}
|
||||
}
|
||||
%}
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
{% for _, g := range groups %}
|
||||
<div class="group-heading{% if rNotOk[g.Name] > 0 %} alert-danger{% endif %}" data-bs-target="rules-{%s g.ID %}">
|
||||
<span class="anchor" id="group-{%s g.ID %}"></span>
|
||||
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %} (every {%s g.Interval %})</a>
|
||||
{% if rNotOk[g.Name] > 0 %}<span class="badge bg-danger" title="Number of rules withs status Error">{%d rNotOk[g.Name] %}</span> {% endif %}
|
||||
<span class="badge bg-success" title="Number of rules withs status Ok">{%d rOk[g.Name] %}</span>
|
||||
<p class="fs-6 fw-lighter">{%s g.File %}</p>
|
||||
{% if len(g.ExtraFilterLabels) > 0 %}
|
||||
<div class="fs-6 fw-lighter">Extra filter labels
|
||||
{% for k, v := range g.ExtraFilterLabels %}
|
||||
<span class="float-left badge bg-primary">{%s k %}={%s v %}</span>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="collapse" id="rules-{%s g.ID %}">
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Rule</th>
|
||||
<th scope="col" title="Shows if rule's execution ended with error">Error</th>
|
||||
<th scope="col" title="How many samples were produced by the rule">Samples</th>
|
||||
<th scope="col" title="How many seconds ago rule was executed">Updated</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for _, ar := range g.AlertingRules %}
|
||||
<tr{% if ar.LastError != "" %} class="alert-danger"{% endif %}>
|
||||
<td>
|
||||
<b>alert:</b> {%s ar.Name %} (for: {%v ar.For %})<br>
|
||||
<code><pre>{%s ar.Expression %}</pre></code><br>
|
||||
{% if len(ar.Labels) > 0 %} <b>Labels:</b>{% endif %}
|
||||
{% for k, v := range ar.Labels %}
|
||||
<span class="ms-1 badge bg-primary">{%s k %}={%s v %}</span>
|
||||
{% endfor %}
|
||||
</td>
|
||||
<td><div class="error-cell">{%s ar.LastError %}</div></td>
|
||||
<td>{%d ar.LastSamples %}</td>
|
||||
<td>{%f.3 time.Since(ar.LastExec).Seconds() %}s ago</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% for _, rr := range g.RecordingRules %}
|
||||
<tr>
|
||||
<td>
|
||||
<b>record:</b> {%s rr.Name %}<br>
|
||||
<code><pre>{%s rr.Expression %}</pre></code>
|
||||
{% if len(rr.Labels) > 0 %} <b>Labels:</b>{% endif %}
|
||||
{% for k, v := range rr.Labels %}
|
||||
<span class="ms-1 badge bg-primary">{%s k %}={%s v %}</span>
|
||||
{% endfor %}
|
||||
</td>
|
||||
<td><div class="error-cell">{%s rr.LastError %}</div></td>
|
||||
<td>{%d rr.LastSamples %}</td>
|
||||
<td>{%f.3 time.Since(rr.LastExec).Seconds() %}s ago</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
<div>
|
||||
<p>No items...</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{%= tpl.Footer() %}
|
||||
|
||||
{% endfunc %}
|
||||
|
||||
|
||||
{% func ListAlerts(groupAlerts []GroupAlerts) %}
|
||||
{%= tpl.Header("Alerts", navItems) %}
|
||||
{% if len(groupAlerts) > 0 %}
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
{% for _, ga := range groupAlerts %}
|
||||
{%code g := ga.Group %}
|
||||
<div class="group-heading alert-danger" data-bs-target="rules-{%s g.ID %}">
|
||||
<span class="anchor" id="group-{%s g.ID %}"></span>
|
||||
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %}</a>
|
||||
<span class="badge bg-danger" title="Number of active alerts">{%d len(ga.Alerts) %}</span>
|
||||
<br>
|
||||
<p class="fs-6 fw-lighter">{%s g.File %}</p>
|
||||
</div>
|
||||
{%code
|
||||
var keys []string
|
||||
alertsByRule := make(map[string][]*APIAlert)
|
||||
for _, alert := range ga.Alerts {
|
||||
if len(alertsByRule[alert.RuleID]) < 1 {
|
||||
keys = append(keys, alert.RuleID)
|
||||
}
|
||||
alertsByRule[alert.RuleID] = append(alertsByRule[alert.RuleID], alert)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
%}
|
||||
<div class="collapse" id="rules-{%s g.ID %}">
|
||||
{% for _, ruleID := range keys %}
|
||||
{%code
|
||||
defaultAR := alertsByRule[ruleID][0]
|
||||
var labelKeys []string
|
||||
for k := range defaultAR.Labels {
|
||||
labelKeys = append(labelKeys, k)
|
||||
}
|
||||
sort.Strings(labelKeys)
|
||||
%}
|
||||
<br>
|
||||
<b>alert:</b> {%s defaultAR.Name %} ({%d len(alertsByRule[ruleID]) %})<br>
|
||||
<b>expr:</b><code><pre>{%s defaultAR.Expression %}</pre></code>
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Labels</th>
|
||||
<th scope="col">State</th>
|
||||
<th scope="col">Active at</th>
|
||||
<th scope="col">Value</th>
|
||||
<th scope="col">Link</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for _, ar := range alertsByRule[ruleID] %}
|
||||
<tr>
|
||||
<td>
|
||||
{% for _, k := range labelKeys %}
|
||||
<span class="ms-1 badge bg-primary">{%s k %}={%s ar.Labels[k] %}</span>
|
||||
{% endfor %}
|
||||
</td>
|
||||
<td><span class="badge {% if ar.State=="firing" %}bg-danger{% else %} bg-warning text-dark{% endif %}">{%s ar.State %}</span></td>
|
||||
<td>{%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}</td>
|
||||
<td>{%s ar.Value %}</td>
|
||||
<td>
|
||||
<a href="/{%s g.ID %}/{%s ar.ID %}/status">Details</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<br>
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
<div>
|
||||
<p>No items...</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{%= tpl.Footer() %}
|
||||
|
||||
{% endfunc %}
|
||||
|
||||
{% func Alert(alert *APIAlert) %}
|
||||
{%= tpl.Header("", navItems) %}
|
||||
{%code
|
||||
var labelKeys []string
|
||||
for k := range alert.Labels {
|
||||
labelKeys = append(labelKeys, k)
|
||||
}
|
||||
sort.Strings(labelKeys)
|
||||
|
||||
var annotationKeys []string
|
||||
for k := range alert.Annotations {
|
||||
annotationKeys = append(annotationKeys, k)
|
||||
}
|
||||
sort.Strings(annotationKeys)
|
||||
%}
|
||||
<div class="display-6 pb-3 mb-3">{%s alert.Name %}<span class="ms-2 badge {% if alert.State=="firing" %}bg-danger{% else %} bg-warning text-dark{% endif %}">{%s alert.State %}</span></div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Active at
|
||||
</div>
|
||||
<div class="col">
|
||||
{%s alert.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Expr
|
||||
</div>
|
||||
<div class="col">
|
||||
<code><pre>{%s alert.Expression %}</pre></code>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Labels
|
||||
</div>
|
||||
<div class="col">
|
||||
{% for _, k := range labelKeys %}
|
||||
<span class="m-1 badge bg-primary">{%s k %}={%s alert.Labels[k] %}</span>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Annotations
|
||||
</div>
|
||||
<div class="col">
|
||||
{% for _, k := range annotationKeys %}
|
||||
<b>{%s k %}:</b><br>
|
||||
<p>{%s alert.Annotations[k] %}</p>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Group
|
||||
</div>
|
||||
<div class="col">
|
||||
<a target="_blank" href="/groups#group-{%s alert.GroupID %}">{%s alert.GroupID %}</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%= tpl.Footer() %}
|
||||
|
||||
{% endfunc %}
|
920
app/vmalert/web.qtpl.go
Normal file
920
app/vmalert/web.qtpl.go
Normal file
|
@ -0,0 +1,920 @@
|
|||
// Code generated by qtc from "web.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmalert/web.qtpl:1
|
||||
package main
|
||||
|
||||
//line app/vmalert/web.qtpl:3
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl"
|
||||
)
|
||||
|
||||
//line app/vmalert/web.qtpl:11
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/web.qtpl:11
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/web.qtpl:12
|
||||
var navItems = []tpl.NavItem{
|
||||
{Name: "vmalert", Url: "/"},
|
||||
{Name: "Groups", Url: "/groups"},
|
||||
{Name: "Alerts", Url: "/alerts"},
|
||||
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert.html"},
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:20
|
||||
func StreamWelcome(qw422016 *qt422016.Writer, pathList [][2]string) {
|
||||
//line app/vmalert/web.qtpl:20
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:21
|
||||
tpl.StreamHeader(qw422016, "vmalert", navItems)
|
||||
//line app/vmalert/web.qtpl:21
|
||||
qw422016.N().S(`
|
||||
<p>
|
||||
API:<br>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:24
|
||||
for _, p := range pathList {
|
||||
//line app/vmalert/web.qtpl:24
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:26
|
||||
p, doc := p[0], p[1]
|
||||
|
||||
//line app/vmalert/web.qtpl:27
|
||||
qw422016.N().S(`
|
||||
<a href="`)
|
||||
//line app/vmalert/web.qtpl:28
|
||||
qw422016.E().S(p)
|
||||
//line app/vmalert/web.qtpl:28
|
||||
qw422016.N().S(`">`)
|
||||
//line app/vmalert/web.qtpl:28
|
||||
qw422016.E().S(p)
|
||||
//line app/vmalert/web.qtpl:28
|
||||
qw422016.N().S(`</a> - `)
|
||||
//line app/vmalert/web.qtpl:28
|
||||
qw422016.E().S(doc)
|
||||
//line app/vmalert/web.qtpl:28
|
||||
qw422016.N().S(`<br/>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:29
|
||||
}
|
||||
//line app/vmalert/web.qtpl:29
|
||||
qw422016.N().S(`
|
||||
</p>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:31
|
||||
tpl.StreamFooter(qw422016)
|
||||
//line app/vmalert/web.qtpl:31
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:32
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:32
|
||||
func WriteWelcome(qq422016 qtio422016.Writer, pathList [][2]string) {
|
||||
//line app/vmalert/web.qtpl:32
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:32
|
||||
StreamWelcome(qw422016, pathList)
|
||||
//line app/vmalert/web.qtpl:32
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/web.qtpl:32
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:32
|
||||
func Welcome(pathList [][2]string) string {
|
||||
//line app/vmalert/web.qtpl:32
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:32
|
||||
WriteWelcome(qb422016, pathList)
|
||||
//line app/vmalert/web.qtpl:32
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/web.qtpl:32
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/web.qtpl:32
|
||||
return qs422016
|
||||
//line app/vmalert/web.qtpl:32
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:34
|
||||
func StreamListGroups(qw422016 *qt422016.Writer, groups []APIGroup) {
|
||||
//line app/vmalert/web.qtpl:34
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:35
|
||||
tpl.StreamHeader(qw422016, "Groups", navItems)
|
||||
//line app/vmalert/web.qtpl:35
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:36
|
||||
if len(groups) > 0 {
|
||||
//line app/vmalert/web.qtpl:36
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:38
|
||||
rOk := make(map[string]int)
|
||||
rNotOk := make(map[string]int)
|
||||
for _, g := range groups {
|
||||
for _, r := range g.AlertingRules {
|
||||
if r.LastError != "" {
|
||||
rNotOk[g.Name]++
|
||||
} else {
|
||||
rOk[g.Name]++
|
||||
}
|
||||
}
|
||||
for _, r := range g.RecordingRules {
|
||||
if r.LastError != "" {
|
||||
rNotOk[g.Name]++
|
||||
} else {
|
||||
rOk[g.Name]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:56
|
||||
qw422016.N().S(`
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:59
|
||||
for _, g := range groups {
|
||||
//line app/vmalert/web.qtpl:59
|
||||
qw422016.N().S(`
|
||||
<div class="group-heading`)
|
||||
//line app/vmalert/web.qtpl:60
|
||||
if rNotOk[g.Name] > 0 {
|
||||
//line app/vmalert/web.qtpl:60
|
||||
qw422016.N().S(` alert-danger`)
|
||||
//line app/vmalert/web.qtpl:60
|
||||
}
|
||||
//line app/vmalert/web.qtpl:60
|
||||
qw422016.N().S(`" data-bs-target="rules-`)
|
||||
//line app/vmalert/web.qtpl:60
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:60
|
||||
qw422016.N().S(`">
|
||||
<span class="anchor" id="group-`)
|
||||
//line app/vmalert/web.qtpl:61
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:61
|
||||
qw422016.N().S(`"></span>
|
||||
<a href="#group-`)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.N().S(`">`)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.E().S(g.Name)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
if g.Type != "prometheus" {
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.N().S(` (`)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.E().S(g.Type)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.N().S(`)`)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
}
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.N().S(` (every `)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.E().S(g.Interval)
|
||||
//line app/vmalert/web.qtpl:62
|
||||
qw422016.N().S(`)</a>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:63
|
||||
if rNotOk[g.Name] > 0 {
|
||||
//line app/vmalert/web.qtpl:63
|
||||
qw422016.N().S(`<span class="badge bg-danger" title="Number of rules withs status Error">`)
|
||||
//line app/vmalert/web.qtpl:63
|
||||
qw422016.N().D(rNotOk[g.Name])
|
||||
//line app/vmalert/web.qtpl:63
|
||||
qw422016.N().S(`</span> `)
|
||||
//line app/vmalert/web.qtpl:63
|
||||
}
|
||||
//line app/vmalert/web.qtpl:63
|
||||
qw422016.N().S(`
|
||||
<span class="badge bg-success" title="Number of rules withs status Ok">`)
|
||||
//line app/vmalert/web.qtpl:64
|
||||
qw422016.N().D(rOk[g.Name])
|
||||
//line app/vmalert/web.qtpl:64
|
||||
qw422016.N().S(`</span>
|
||||
<p class="fs-6 fw-lighter">`)
|
||||
//line app/vmalert/web.qtpl:65
|
||||
qw422016.E().S(g.File)
|
||||
//line app/vmalert/web.qtpl:65
|
||||
qw422016.N().S(`</p>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:66
|
||||
if len(g.ExtraFilterLabels) > 0 {
|
||||
//line app/vmalert/web.qtpl:66
|
||||
qw422016.N().S(`
|
||||
<div class="fs-6 fw-lighter">Extra filter labels
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:68
|
||||
for k, v := range g.ExtraFilterLabels {
|
||||
//line app/vmalert/web.qtpl:68
|
||||
qw422016.N().S(`
|
||||
<span class="float-left badge bg-primary">`)
|
||||
//line app/vmalert/web.qtpl:69
|
||||
qw422016.E().S(k)
|
||||
//line app/vmalert/web.qtpl:69
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmalert/web.qtpl:69
|
||||
qw422016.E().S(v)
|
||||
//line app/vmalert/web.qtpl:69
|
||||
qw422016.N().S(`</span>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:70
|
||||
}
|
||||
//line app/vmalert/web.qtpl:70
|
||||
qw422016.N().S(`
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:72
|
||||
}
|
||||
//line app/vmalert/web.qtpl:72
|
||||
qw422016.N().S(`
|
||||
</div>
|
||||
<div class="collapse" id="rules-`)
|
||||
//line app/vmalert/web.qtpl:74
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:74
|
||||
qw422016.N().S(`">
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Rule</th>
|
||||
<th scope="col" title="Shows if rule's execution ended with error">Error</th>
|
||||
<th scope="col" title="How many samples were produced by the rule">Samples</th>
|
||||
<th scope="col" title="How many seconds ago rule was executed">Updated</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:85
|
||||
for _, ar := range g.AlertingRules {
|
||||
//line app/vmalert/web.qtpl:85
|
||||
qw422016.N().S(`
|
||||
<tr`)
|
||||
//line app/vmalert/web.qtpl:86
|
||||
if ar.LastError != "" {
|
||||
//line app/vmalert/web.qtpl:86
|
||||
qw422016.N().S(` class="alert-danger"`)
|
||||
//line app/vmalert/web.qtpl:86
|
||||
}
|
||||
//line app/vmalert/web.qtpl:86
|
||||
qw422016.N().S(`>
|
||||
<td>
|
||||
<b>alert:</b> `)
|
||||
//line app/vmalert/web.qtpl:88
|
||||
qw422016.E().S(ar.Name)
|
||||
//line app/vmalert/web.qtpl:88
|
||||
qw422016.N().S(` (for: `)
|
||||
//line app/vmalert/web.qtpl:88
|
||||
qw422016.E().V(ar.For)
|
||||
//line app/vmalert/web.qtpl:88
|
||||
qw422016.N().S(`)<br>
|
||||
<code><pre>`)
|
||||
//line app/vmalert/web.qtpl:89
|
||||
qw422016.E().S(ar.Expression)
|
||||
//line app/vmalert/web.qtpl:89
|
||||
qw422016.N().S(`</pre></code><br>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:90
|
||||
if len(ar.Labels) > 0 {
|
||||
//line app/vmalert/web.qtpl:90
|
||||
qw422016.N().S(` <b>Labels:</b>`)
|
||||
//line app/vmalert/web.qtpl:90
|
||||
}
|
||||
//line app/vmalert/web.qtpl:90
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:91
|
||||
for k, v := range ar.Labels {
|
||||
//line app/vmalert/web.qtpl:91
|
||||
qw422016.N().S(`
|
||||
<span class="ms-1 badge bg-primary">`)
|
||||
//line app/vmalert/web.qtpl:92
|
||||
qw422016.E().S(k)
|
||||
//line app/vmalert/web.qtpl:92
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmalert/web.qtpl:92
|
||||
qw422016.E().S(v)
|
||||
//line app/vmalert/web.qtpl:92
|
||||
qw422016.N().S(`</span>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:93
|
||||
}
|
||||
//line app/vmalert/web.qtpl:93
|
||||
qw422016.N().S(`
|
||||
</td>
|
||||
<td><div class="error-cell">`)
|
||||
//line app/vmalert/web.qtpl:95
|
||||
qw422016.E().S(ar.LastError)
|
||||
//line app/vmalert/web.qtpl:95
|
||||
qw422016.N().S(`</div></td>
|
||||
<td>`)
|
||||
//line app/vmalert/web.qtpl:96
|
||||
qw422016.N().D(ar.LastSamples)
|
||||
//line app/vmalert/web.qtpl:96
|
||||
qw422016.N().S(`</td>
|
||||
<td>`)
|
||||
//line app/vmalert/web.qtpl:97
|
||||
qw422016.N().FPrec(time.Since(ar.LastExec).Seconds(), 3)
|
||||
//line app/vmalert/web.qtpl:97
|
||||
qw422016.N().S(`s ago</td>
|
||||
</tr>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:99
|
||||
}
|
||||
//line app/vmalert/web.qtpl:99
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:100
|
||||
for _, rr := range g.RecordingRules {
|
||||
//line app/vmalert/web.qtpl:100
|
||||
qw422016.N().S(`
|
||||
<tr>
|
||||
<td>
|
||||
<b>record:</b> `)
|
||||
//line app/vmalert/web.qtpl:103
|
||||
qw422016.E().S(rr.Name)
|
||||
//line app/vmalert/web.qtpl:103
|
||||
qw422016.N().S(`<br>
|
||||
<code><pre>`)
|
||||
//line app/vmalert/web.qtpl:104
|
||||
qw422016.E().S(rr.Expression)
|
||||
//line app/vmalert/web.qtpl:104
|
||||
qw422016.N().S(`</pre></code>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:105
|
||||
if len(rr.Labels) > 0 {
|
||||
//line app/vmalert/web.qtpl:105
|
||||
qw422016.N().S(` <b>Labels:</b>`)
|
||||
//line app/vmalert/web.qtpl:105
|
||||
}
|
||||
//line app/vmalert/web.qtpl:105
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:106
|
||||
for k, v := range rr.Labels {
|
||||
//line app/vmalert/web.qtpl:106
|
||||
qw422016.N().S(`
|
||||
<span class="ms-1 badge bg-primary">`)
|
||||
//line app/vmalert/web.qtpl:107
|
||||
qw422016.E().S(k)
|
||||
//line app/vmalert/web.qtpl:107
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmalert/web.qtpl:107
|
||||
qw422016.E().S(v)
|
||||
//line app/vmalert/web.qtpl:107
|
||||
qw422016.N().S(`</span>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:108
|
||||
}
|
||||
//line app/vmalert/web.qtpl:108
|
||||
qw422016.N().S(`
|
||||
</td>
|
||||
<td><div class="error-cell">`)
|
||||
//line app/vmalert/web.qtpl:110
|
||||
qw422016.E().S(rr.LastError)
|
||||
//line app/vmalert/web.qtpl:110
|
||||
qw422016.N().S(`</div></td>
|
||||
<td>`)
|
||||
//line app/vmalert/web.qtpl:111
|
||||
qw422016.N().D(rr.LastSamples)
|
||||
//line app/vmalert/web.qtpl:111
|
||||
qw422016.N().S(`</td>
|
||||
<td>`)
|
||||
//line app/vmalert/web.qtpl:112
|
||||
qw422016.N().FPrec(time.Since(rr.LastExec).Seconds(), 3)
|
||||
//line app/vmalert/web.qtpl:112
|
||||
qw422016.N().S(`s ago</td>
|
||||
</tr>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:114
|
||||
}
|
||||
//line app/vmalert/web.qtpl:114
|
||||
qw422016.N().S(`
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:118
|
||||
}
|
||||
//line app/vmalert/web.qtpl:118
|
||||
qw422016.N().S(`
|
||||
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:120
|
||||
} else {
|
||||
//line app/vmalert/web.qtpl:120
|
||||
qw422016.N().S(`
|
||||
<div>
|
||||
<p>No items...</p>
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:124
|
||||
}
|
||||
//line app/vmalert/web.qtpl:124
|
||||
qw422016.N().S(`
|
||||
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:126
|
||||
tpl.StreamFooter(qw422016)
|
||||
//line app/vmalert/web.qtpl:126
|
||||
qw422016.N().S(`
|
||||
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:128
|
||||
func WriteListGroups(qq422016 qtio422016.Writer, groups []APIGroup) {
|
||||
//line app/vmalert/web.qtpl:128
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:128
|
||||
StreamListGroups(qw422016, groups)
|
||||
//line app/vmalert/web.qtpl:128
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/web.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:128
|
||||
func ListGroups(groups []APIGroup) string {
|
||||
//line app/vmalert/web.qtpl:128
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:128
|
||||
WriteListGroups(qb422016, groups)
|
||||
//line app/vmalert/web.qtpl:128
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/web.qtpl:128
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/web.qtpl:128
|
||||
return qs422016
|
||||
//line app/vmalert/web.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:131
|
||||
func StreamListAlerts(qw422016 *qt422016.Writer, groupAlerts []GroupAlerts) {
|
||||
//line app/vmalert/web.qtpl:131
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:132
|
||||
tpl.StreamHeader(qw422016, "Alerts", navItems)
|
||||
//line app/vmalert/web.qtpl:132
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:133
|
||||
if len(groupAlerts) > 0 {
|
||||
//line app/vmalert/web.qtpl:133
|
||||
qw422016.N().S(`
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:136
|
||||
for _, ga := range groupAlerts {
|
||||
//line app/vmalert/web.qtpl:136
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:137
|
||||
g := ga.Group
|
||||
|
||||
//line app/vmalert/web.qtpl:137
|
||||
qw422016.N().S(`
|
||||
<div class="group-heading alert-danger" data-bs-target="rules-`)
|
||||
//line app/vmalert/web.qtpl:138
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:138
|
||||
qw422016.N().S(`">
|
||||
<span class="anchor" id="group-`)
|
||||
//line app/vmalert/web.qtpl:139
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:139
|
||||
qw422016.N().S(`"></span>
|
||||
<a href="#group-`)
|
||||
//line app/vmalert/web.qtpl:140
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:140
|
||||
qw422016.N().S(`">`)
|
||||
//line app/vmalert/web.qtpl:140
|
||||
qw422016.E().S(g.Name)
|
||||
//line app/vmalert/web.qtpl:140
|
||||
if g.Type != "prometheus" {
|
||||
//line app/vmalert/web.qtpl:140
|
||||
qw422016.N().S(` (`)
|
||||
//line app/vmalert/web.qtpl:140
|
||||
qw422016.E().S(g.Type)
|
||||
//line app/vmalert/web.qtpl:140
|
||||
qw422016.N().S(`)`)
|
||||
//line app/vmalert/web.qtpl:140
|
||||
}
|
||||
//line app/vmalert/web.qtpl:140
|
||||
qw422016.N().S(`</a>
|
||||
<span class="badge bg-danger" title="Number of active alerts">`)
|
||||
//line app/vmalert/web.qtpl:141
|
||||
qw422016.N().D(len(ga.Alerts))
|
||||
//line app/vmalert/web.qtpl:141
|
||||
qw422016.N().S(`</span>
|
||||
<br>
|
||||
<p class="fs-6 fw-lighter">`)
|
||||
//line app/vmalert/web.qtpl:143
|
||||
qw422016.E().S(g.File)
|
||||
//line app/vmalert/web.qtpl:143
|
||||
qw422016.N().S(`</p>
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:146
|
||||
var keys []string
|
||||
alertsByRule := make(map[string][]*APIAlert)
|
||||
for _, alert := range ga.Alerts {
|
||||
if len(alertsByRule[alert.RuleID]) < 1 {
|
||||
keys = append(keys, alert.RuleID)
|
||||
}
|
||||
alertsByRule[alert.RuleID] = append(alertsByRule[alert.RuleID], alert)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
//line app/vmalert/web.qtpl:155
|
||||
qw422016.N().S(`
|
||||
<div class="collapse" id="rules-`)
|
||||
//line app/vmalert/web.qtpl:156
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:156
|
||||
qw422016.N().S(`">
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:157
|
||||
for _, ruleID := range keys {
|
||||
//line app/vmalert/web.qtpl:157
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:159
|
||||
defaultAR := alertsByRule[ruleID][0]
|
||||
var labelKeys []string
|
||||
for k := range defaultAR.Labels {
|
||||
labelKeys = append(labelKeys, k)
|
||||
}
|
||||
sort.Strings(labelKeys)
|
||||
|
||||
//line app/vmalert/web.qtpl:165
|
||||
qw422016.N().S(`
|
||||
<br>
|
||||
<b>alert:</b> `)
|
||||
//line app/vmalert/web.qtpl:167
|
||||
qw422016.E().S(defaultAR.Name)
|
||||
//line app/vmalert/web.qtpl:167
|
||||
qw422016.N().S(` (`)
|
||||
//line app/vmalert/web.qtpl:167
|
||||
qw422016.N().D(len(alertsByRule[ruleID]))
|
||||
//line app/vmalert/web.qtpl:167
|
||||
qw422016.N().S(`)<br>
|
||||
<b>expr:</b><code><pre>`)
|
||||
//line app/vmalert/web.qtpl:168
|
||||
qw422016.E().S(defaultAR.Expression)
|
||||
//line app/vmalert/web.qtpl:168
|
||||
qw422016.N().S(`</pre></code>
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Labels</th>
|
||||
<th scope="col">State</th>
|
||||
<th scope="col">Active at</th>
|
||||
<th scope="col">Value</th>
|
||||
<th scope="col">Link</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:180
|
||||
for _, ar := range alertsByRule[ruleID] {
|
||||
//line app/vmalert/web.qtpl:180
|
||||
qw422016.N().S(`
|
||||
<tr>
|
||||
<td>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:183
|
||||
for _, k := range labelKeys {
|
||||
//line app/vmalert/web.qtpl:183
|
||||
qw422016.N().S(`
|
||||
<span class="ms-1 badge bg-primary">`)
|
||||
//line app/vmalert/web.qtpl:184
|
||||
qw422016.E().S(k)
|
||||
//line app/vmalert/web.qtpl:184
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmalert/web.qtpl:184
|
||||
qw422016.E().S(ar.Labels[k])
|
||||
//line app/vmalert/web.qtpl:184
|
||||
qw422016.N().S(`</span>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:185
|
||||
}
|
||||
//line app/vmalert/web.qtpl:185
|
||||
qw422016.N().S(`
|
||||
</td>
|
||||
<td><span class="badge `)
|
||||
//line app/vmalert/web.qtpl:187
|
||||
if ar.State == "firing" {
|
||||
//line app/vmalert/web.qtpl:187
|
||||
qw422016.N().S(`bg-danger`)
|
||||
//line app/vmalert/web.qtpl:187
|
||||
} else {
|
||||
//line app/vmalert/web.qtpl:187
|
||||
qw422016.N().S(` bg-warning text-dark`)
|
||||
//line app/vmalert/web.qtpl:187
|
||||
}
|
||||
//line app/vmalert/web.qtpl:187
|
||||
qw422016.N().S(`">`)
|
||||
//line app/vmalert/web.qtpl:187
|
||||
qw422016.E().S(ar.State)
|
||||
//line app/vmalert/web.qtpl:187
|
||||
qw422016.N().S(`</span></td>
|
||||
<td>`)
|
||||
//line app/vmalert/web.qtpl:188
|
||||
qw422016.E().S(ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00"))
|
||||
//line app/vmalert/web.qtpl:188
|
||||
qw422016.N().S(`</td>
|
||||
<td>`)
|
||||
//line app/vmalert/web.qtpl:189
|
||||
qw422016.E().S(ar.Value)
|
||||
//line app/vmalert/web.qtpl:189
|
||||
qw422016.N().S(`</td>
|
||||
<td>
|
||||
<a href="/`)
|
||||
//line app/vmalert/web.qtpl:191
|
||||
qw422016.E().S(g.ID)
|
||||
//line app/vmalert/web.qtpl:191
|
||||
qw422016.N().S(`/`)
|
||||
//line app/vmalert/web.qtpl:191
|
||||
qw422016.E().S(ar.ID)
|
||||
//line app/vmalert/web.qtpl:191
|
||||
qw422016.N().S(`/status">Details</a>
|
||||
</td>
|
||||
</tr>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:194
|
||||
}
|
||||
//line app/vmalert/web.qtpl:194
|
||||
qw422016.N().S(`
|
||||
</tbody>
|
||||
</table>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:197
|
||||
}
|
||||
//line app/vmalert/web.qtpl:197
|
||||
qw422016.N().S(`
|
||||
</div>
|
||||
<br>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:200
|
||||
}
|
||||
//line app/vmalert/web.qtpl:200
|
||||
qw422016.N().S(`
|
||||
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:202
|
||||
} else {
|
||||
//line app/vmalert/web.qtpl:202
|
||||
qw422016.N().S(`
|
||||
<div>
|
||||
<p>No items...</p>
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:206
|
||||
}
|
||||
//line app/vmalert/web.qtpl:206
|
||||
qw422016.N().S(`
|
||||
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:208
|
||||
tpl.StreamFooter(qw422016)
|
||||
//line app/vmalert/web.qtpl:208
|
||||
qw422016.N().S(`
|
||||
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:210
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:210
|
||||
func WriteListAlerts(qq422016 qtio422016.Writer, groupAlerts []GroupAlerts) {
|
||||
//line app/vmalert/web.qtpl:210
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:210
|
||||
StreamListAlerts(qw422016, groupAlerts)
|
||||
//line app/vmalert/web.qtpl:210
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/web.qtpl:210
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:210
|
||||
func ListAlerts(groupAlerts []GroupAlerts) string {
|
||||
//line app/vmalert/web.qtpl:210
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:210
|
||||
WriteListAlerts(qb422016, groupAlerts)
|
||||
//line app/vmalert/web.qtpl:210
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/web.qtpl:210
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/web.qtpl:210
|
||||
return qs422016
|
||||
//line app/vmalert/web.qtpl:210
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:212
|
||||
func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
|
||||
//line app/vmalert/web.qtpl:212
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:213
|
||||
tpl.StreamHeader(qw422016, "", navItems)
|
||||
//line app/vmalert/web.qtpl:213
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:215
|
||||
var labelKeys []string
|
||||
for k := range alert.Labels {
|
||||
labelKeys = append(labelKeys, k)
|
||||
}
|
||||
sort.Strings(labelKeys)
|
||||
|
||||
var annotationKeys []string
|
||||
for k := range alert.Annotations {
|
||||
annotationKeys = append(annotationKeys, k)
|
||||
}
|
||||
sort.Strings(annotationKeys)
|
||||
|
||||
//line app/vmalert/web.qtpl:226
|
||||
qw422016.N().S(`
|
||||
<div class="display-6 pb-3 mb-3">`)
|
||||
//line app/vmalert/web.qtpl:227
|
||||
qw422016.E().S(alert.Name)
|
||||
//line app/vmalert/web.qtpl:227
|
||||
qw422016.N().S(`<span class="ms-2 badge `)
|
||||
//line app/vmalert/web.qtpl:227
|
||||
if alert.State == "firing" {
|
||||
//line app/vmalert/web.qtpl:227
|
||||
qw422016.N().S(`bg-danger`)
|
||||
//line app/vmalert/web.qtpl:227
|
||||
} else {
|
||||
//line app/vmalert/web.qtpl:227
|
||||
qw422016.N().S(` bg-warning text-dark`)
|
||||
//line app/vmalert/web.qtpl:227
|
||||
}
|
||||
//line app/vmalert/web.qtpl:227
|
||||
qw422016.N().S(`">`)
|
||||
//line app/vmalert/web.qtpl:227
|
||||
qw422016.E().S(alert.State)
|
||||
//line app/vmalert/web.qtpl:227
|
||||
qw422016.N().S(`</span></div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Active at
|
||||
</div>
|
||||
<div class="col">
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:234
|
||||
qw422016.E().S(alert.ActiveAt.Format("2006-01-02T15:04:05Z07:00"))
|
||||
//line app/vmalert/web.qtpl:234
|
||||
qw422016.N().S(`
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Expr
|
||||
</div>
|
||||
<div class="col">
|
||||
<code><pre>`)
|
||||
//line app/vmalert/web.qtpl:244
|
||||
qw422016.E().S(alert.Expression)
|
||||
//line app/vmalert/web.qtpl:244
|
||||
qw422016.N().S(`</pre></code>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Labels
|
||||
</div>
|
||||
<div class="col">
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:254
|
||||
for _, k := range labelKeys {
|
||||
//line app/vmalert/web.qtpl:254
|
||||
qw422016.N().S(`
|
||||
<span class="m-1 badge bg-primary">`)
|
||||
//line app/vmalert/web.qtpl:255
|
||||
qw422016.E().S(k)
|
||||
//line app/vmalert/web.qtpl:255
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmalert/web.qtpl:255
|
||||
qw422016.E().S(alert.Labels[k])
|
||||
//line app/vmalert/web.qtpl:255
|
||||
qw422016.N().S(`</span>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:256
|
||||
}
|
||||
//line app/vmalert/web.qtpl:256
|
||||
qw422016.N().S(`
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Annotations
|
||||
</div>
|
||||
<div class="col">
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:266
|
||||
for _, k := range annotationKeys {
|
||||
//line app/vmalert/web.qtpl:266
|
||||
qw422016.N().S(`
|
||||
<b>`)
|
||||
//line app/vmalert/web.qtpl:267
|
||||
qw422016.E().S(k)
|
||||
//line app/vmalert/web.qtpl:267
|
||||
qw422016.N().S(`:</b><br>
|
||||
<p>`)
|
||||
//line app/vmalert/web.qtpl:268
|
||||
qw422016.E().S(alert.Annotations[k])
|
||||
//line app/vmalert/web.qtpl:268
|
||||
qw422016.N().S(`</p>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:269
|
||||
}
|
||||
//line app/vmalert/web.qtpl:269
|
||||
qw422016.N().S(`
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Group
|
||||
</div>
|
||||
<div class="col">
|
||||
<a target="_blank" href="/groups#group-`)
|
||||
//line app/vmalert/web.qtpl:279
|
||||
qw422016.E().S(alert.GroupID)
|
||||
//line app/vmalert/web.qtpl:279
|
||||
qw422016.N().S(`">`)
|
||||
//line app/vmalert/web.qtpl:279
|
||||
qw422016.E().S(alert.GroupID)
|
||||
//line app/vmalert/web.qtpl:279
|
||||
qw422016.N().S(`</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:283
|
||||
tpl.StreamFooter(qw422016)
|
||||
//line app/vmalert/web.qtpl:283
|
||||
qw422016.N().S(`
|
||||
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:285
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:285
|
||||
func WriteAlert(qq422016 qtio422016.Writer, alert *APIAlert) {
|
||||
//line app/vmalert/web.qtpl:285
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:285
|
||||
StreamAlert(qw422016, alert)
|
||||
//line app/vmalert/web.qtpl:285
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/web.qtpl:285
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:285
|
||||
func Alert(alert *APIAlert) string {
|
||||
//line app/vmalert/web.qtpl:285
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:285
|
||||
WriteAlert(qb422016, alert)
|
||||
//line app/vmalert/web.qtpl:285
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/web.qtpl:285
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/web.qtpl:285
|
||||
return qs422016
|
||||
//line app/vmalert/web.qtpl:285
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
type APIAlert struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
RuleID string `json:"rule_id"`
|
||||
GroupID string `json:"group_id"`
|
||||
Expression string `json:"expression"`
|
||||
State string `json:"state"`
|
||||
|
@ -59,3 +60,9 @@ type APIRecordingRule struct {
|
|||
LastExec time.Time `json:"last_exec"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
// GroupAlerts represents a group of alerts for WEB view
|
||||
type GroupAlerts struct {
|
||||
Group APIGroup
|
||||
Alerts []*APIAlert
|
||||
}
|
||||
|
|
|
@ -230,6 +230,8 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password
|
||||
-httpListenAddr string
|
||||
TCP address to listen for http connections (default ":8427")
|
||||
-logInvalidAuthTokens
|
||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmagent_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||
-loggerDisableTimestamps
|
||||
Whether to disable writing timestamps in logs
|
||||
-loggerErrorsPerSecondLimit int
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
|
@ -21,6 +22,8 @@ var (
|
|||
httpListenAddr = flag.String("httpListenAddr", ":8427", "TCP address to listen for http connections")
|
||||
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host")
|
||||
reloadAuthKey = flag.String("reloadAuthKey", "", "Auth key for /-/reload http endpoint. It must be passed as authKey=...")
|
||||
logInvalidAuthTokens = flag.Bool("logInvalidAuthTokens", false, "Whether to log requests with invalid auth tokens. "+
|
||||
`Such requests are always counted at vmagent_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page`)
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -71,7 +74,13 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
ac := authConfig.Load().(map[string]*UserInfo)
|
||||
ui := ac[authToken]
|
||||
if ui == nil {
|
||||
httpserver.Errorf(w, r, "cannot find the provided auth token %q in config", authToken)
|
||||
invalidAuthTokenRequests.Inc()
|
||||
if *logInvalidAuthTokens {
|
||||
httpserver.Errorf(w, r, "cannot find the provided auth token %q in config", authToken)
|
||||
} else {
|
||||
errStr := fmt.Sprintf("cannot find the provided auth token %q in config", authToken)
|
||||
http.Error(w, errStr, http.StatusBadRequest)
|
||||
}
|
||||
return true
|
||||
}
|
||||
ui.requests.Inc()
|
||||
|
@ -99,7 +108,11 @@ func proxyRequest(w http.ResponseWriter, r *http.Request) {
|
|||
reverseProxy.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
var configReloadRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/-/reload"}`)
|
||||
var (
|
||||
configReloadRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/-/reload"}`)
|
||||
invalidAuthTokenRequests = metrics.NewCounter(`vmagent_http_request_errors_total{reason="invalid_auth_token"}`)
|
||||
missingRouteRequests = metrics.NewCounter(`vmagent_http_request_errors_total{reason="missing_route"}`)
|
||||
)
|
||||
|
||||
var reverseProxy = &httputil.ReverseProxy{
|
||||
Director: func(r *http.Request) {
|
||||
|
|
|
@ -53,5 +53,6 @@ func createTargetURL(ui *UserInfo, uOrig *url.URL) (*url.URL, error) {
|
|||
if ui.URLPrefix != nil {
|
||||
return ui.URLPrefix.mergeURLs(&u), nil
|
||||
}
|
||||
missingRouteRequests.Inc()
|
||||
return nil, fmt.Errorf("missing route for %q", u.String())
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ Backed up data can be restored with [vmrestore](https://docs.victoriametrics.com
|
|||
|
||||
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
|
||||
See also [vmbackupmanager](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) tool built on top of `vmbackup`. This tool simplifies
|
||||
See also [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool built on top of `vmbackup`. This tool simplifies
|
||||
creation of hourly, daily, weekly and monthly backups.
|
||||
|
||||
|
||||
|
@ -89,7 +89,7 @@ or from any day (`YYYYMMDD` backups). Note that hourly backup shouldn't run when
|
|||
|
||||
Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs.
|
||||
|
||||
See also [vmbackupmanager tool](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for automating smart backups.
|
||||
See also [vmbackupmanager tool](https://docs.victoriametrics.com/vmbackupmanager.html) for automating smart backups.
|
||||
|
||||
|
||||
## How does it work?
|
||||
|
|
|
@ -34,8 +34,8 @@ to the data source and common list of flags for destination (prefixed with `vm`
|
|||
```
|
||||
./vmctl influx --help
|
||||
OPTIONS:
|
||||
--influx-addr value Influx server addr (default: "http://localhost:8086")
|
||||
--influx-user value Influx user [$INFLUX_USERNAME]
|
||||
--influx-addr value InfluxDB server addr (default: "http://localhost:8086")
|
||||
--influx-user value InfluxDB user [$INFLUX_USERNAME]
|
||||
...
|
||||
--vm-addr vmctl VictoriaMetrics address to perform import requests.
|
||||
Should be the same as --httpListenAddr value for single-node version or vminsert component.
|
||||
|
@ -216,16 +216,16 @@ Found 40000 timeseries to import. Continue? [Y/n] y
|
|||
|
||||
### Data mapping
|
||||
|
||||
Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules:
|
||||
Vmctl maps InfluxDB data the same way as VictoriaMetrics does by using the following rules:
|
||||
|
||||
* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line.
|
||||
* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the InfluxDB line.
|
||||
* Field names are mapped to time series names prefixed with {measurement}{separator} value,
|
||||
where {separator} equals to _ by default.
|
||||
It can be changed with `--influx-measurement-field-separator` command-line flag.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels format as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
For example, the following InfluxDB line:
|
||||
```
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
```
|
||||
|
@ -294,7 +294,7 @@ if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The ex
|
|||
Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process.
|
||||
|
||||
The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one
|
||||
accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage,
|
||||
accumulating timeseries and samples. Please note, that `vmctl` relies on responses from InfluxDB on this stage,
|
||||
so ensure that Explore queries are executed without errors or limits. Please see this
|
||||
[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details.
|
||||
The data processed in chunks and then sent to VM.
|
||||
|
@ -484,7 +484,7 @@ See more details for cluster version [here](https://github.com/VictoriaMetrics/V
|
|||
|
||||
## Tuning
|
||||
|
||||
### Influx mode
|
||||
### InfluxDB mode
|
||||
|
||||
The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching
|
||||
timeseries. Please set it wisely to avoid InfluxDB overwhelming.
|
||||
|
|
|
@ -189,26 +189,26 @@ var (
|
|||
&cli.StringFlag{
|
||||
Name: influxAddr,
|
||||
Value: "http://localhost:8086",
|
||||
Usage: "Influx server addr",
|
||||
Usage: "InfluxDB server addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxUser,
|
||||
Usage: "Influx user",
|
||||
Usage: "InfluxDB user",
|
||||
EnvVars: []string{"INFLUX_USERNAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxPassword,
|
||||
Usage: "Influx user password",
|
||||
Usage: "InfluxDB user password",
|
||||
EnvVars: []string{"INFLUX_PASSWORD"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxDB,
|
||||
Usage: "Influx database",
|
||||
Usage: "InfluxDB database",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxRetention,
|
||||
Usage: "Influx retention policy",
|
||||
Usage: "InfluxDB retention policy",
|
||||
Value: "autogen",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
|
@ -223,7 +223,7 @@ var (
|
|||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxFilterSeries,
|
||||
Usage: "Influx filter expression to select series. E.g. \"from cpu where arch='x86' AND hostname='host_2753'\".\n" +
|
||||
Usage: "InfluxDB filter expression to select series. E.g. \"from cpu where arch='x86' AND hostname='host_2753'\".\n" +
|
||||
"See for details https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#show-series",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol")
|
||||
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field")
|
||||
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol")
|
||||
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field")
|
||||
skipMeasurement = flag.Bool("influxSkipMeasurement", false, "Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'")
|
||||
)
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
|
||||
var (
|
||||
graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty")
|
||||
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. "+
|
||||
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. "+
|
||||
"This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write")
|
||||
opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB metrics. "+
|
||||
"Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+
|
||||
|
|
|
@ -85,12 +85,6 @@ var vmuiFileServer = http.FileServer(http.FS(vmuiFiles))
|
|||
|
||||
// RequestHandler handles remote read API requests
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// vmui access.
|
||||
if strings.HasPrefix(r.URL.Path, "/vmui") {
|
||||
vmuiFileServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
defer requestDuration.UpdateDuration(startTime)
|
||||
|
||||
|
@ -153,11 +147,32 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
//
|
||||
// See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/prometheus"):
|
||||
case strings.HasPrefix(path, "/prometheus/"):
|
||||
path = path[len("/prometheus"):]
|
||||
case strings.HasPrefix(path, "/graphite"):
|
||||
case strings.HasPrefix(path, "/graphite/"):
|
||||
path = path[len("/graphite"):]
|
||||
}
|
||||
// vmui access.
|
||||
if strings.HasPrefix(path, "/vmui") {
|
||||
r.URL.Path = path
|
||||
vmuiFileServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/graph") {
|
||||
// This is needed for serving /graph URLs from Prometheus datasource in Grafana.
|
||||
if path == "/graph" {
|
||||
// Redirect to /graph/, otherwise vmui redirects to /vmui/, which can be inaccessible in user env.
|
||||
// Use relative redirect, since, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
_ = r.ParseForm()
|
||||
newURL := "graph/?" + r.Form.Encode()
|
||||
http.Redirect(w, r, newURL, http.StatusFound)
|
||||
return true
|
||||
}
|
||||
r.URL.Path = strings.Replace(path, "/graph/", "/vmui/", 1)
|
||||
vmuiFileServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
if strings.HasPrefix(path, "/api/v1/label/") {
|
||||
s := path[len("/api/v1/label/"):]
|
||||
|
|
|
@ -1256,7 +1256,6 @@ func setupTfss(tr storage.TimeRange, tagFilterss [][]storage.TagFilter, deadline
|
|||
}
|
||||
}
|
||||
tfss = append(tfss, tfs)
|
||||
tfss = append(tfss, tfs.Finalize()...)
|
||||
}
|
||||
return tfss, nil
|
||||
}
|
||||
|
|
|
@ -576,7 +576,7 @@ func aggrFuncCountValues(afa *aggrFuncArg) ([]*timeseries, error) {
|
|||
var dst timeseries
|
||||
dst.CopyFromShallowTimestamps(tss[0])
|
||||
dst.MetricName.RemoveTag(dstLabel)
|
||||
dst.MetricName.AddTag(dstLabel, strconv.FormatFloat(v, 'g', -1, 64))
|
||||
dst.MetricName.AddTag(dstLabel, strconv.FormatFloat(v, 'f', -1, 64))
|
||||
for i := range dst.Values {
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
|
|
|
@ -6242,12 +6242,13 @@ func TestExecSuccess(t *testing.T) {
|
|||
})
|
||||
t.Run(`rollup_candlestick()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(rollup_candlestick(round(rand(0),0.01)[:10s]))`
|
||||
q := `sort(rollup_candlestick(alias(round(rand(0),0.01),"foobar")[:10s]))`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.02, 0.02, 0.03, 0, 0.03, 0.02},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.MetricGroup = []byte("foobar")
|
||||
r1.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("rollup"),
|
||||
Value: []byte("low"),
|
||||
|
@ -6257,6 +6258,7 @@ func TestExecSuccess(t *testing.T) {
|
|||
Values: []float64{0.9, 0.32, 0.82, 0.13, 0.28, 0.86},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.MetricGroup = []byte("foobar")
|
||||
r2.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("rollup"),
|
||||
Value: []byte("open"),
|
||||
|
@ -6266,6 +6268,7 @@ func TestExecSuccess(t *testing.T) {
|
|||
Values: []float64{0.1, 0.04, 0.49, 0.46, 0.57, 0.92},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r3.MetricName.MetricGroup = []byte("foobar")
|
||||
r3.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("rollup"),
|
||||
Value: []byte("close"),
|
||||
|
@ -6275,6 +6278,7 @@ func TestExecSuccess(t *testing.T) {
|
|||
Values: []float64{0.9, 0.94, 0.97, 0.93, 0.98, 0.92},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r4.MetricName.MetricGroup = []byte("foobar")
|
||||
r4.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("rollup"),
|
||||
Value: []byte("high"),
|
||||
|
@ -6658,6 +6662,37 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r1, r2, r3, r4}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`count_values_big_numbers`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
count_values("xxx", (alias(772424014, "first"), alias(772424230, "second"))),
|
||||
"xxx"
|
||||
)`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1, 1, 1, 1, 1, 1},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("xxx"),
|
||||
Value: []byte("772424014"),
|
||||
},
|
||||
}
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1, 1, 1, 1, 1, 1},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("xxx"),
|
||||
Value: []byte("772424230"),
|
||||
},
|
||||
}
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`count_values`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `count_values("xxx", label_set(10, "foo", "bar") or label_set(time()/100, "foo", "bar", "baz", "xx"))`
|
||||
|
|
|
@ -198,6 +198,7 @@ var rollupFuncsKeepMetricGroup = map[string]bool{
|
|||
"first_over_time": true,
|
||||
"last_over_time": true,
|
||||
"mode_over_time": true,
|
||||
"rollup_candlestick": true,
|
||||
}
|
||||
|
||||
func getRollupAggrFuncNames(expr metricsql.Expr) ([]string, error) {
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.6452b577.chunk.css",
|
||||
"main.js": "./static/js/main.e5416b79.chunk.js",
|
||||
"runtime-main.js": "./static/js/runtime-main.0270250c.js",
|
||||
"static/js/2.63374ed0.chunk.js": "./static/js/2.63374ed0.chunk.js",
|
||||
"static/js/3.a5d02d16.chunk.js": "./static/js/3.a5d02d16.chunk.js",
|
||||
"main.js": "./static/js/main.801aa0ec.chunk.js",
|
||||
"runtime-main.js": "./static/js/runtime-main.55798746.js",
|
||||
"static/js/2.fd2c2c30.chunk.js": "./static/js/2.fd2c2c30.chunk.js",
|
||||
"static/js/3.c36fc28c.chunk.js": "./static/js/3.c36fc28c.chunk.js",
|
||||
"index.html": "./index.html",
|
||||
"static/js/2.63374ed0.chunk.js.LICENSE.txt": "./static/js/2.63374ed0.chunk.js.LICENSE.txt"
|
||||
"static/js/2.fd2c2c30.chunk.js.LICENSE.txt": "./static/js/2.fd2c2c30.chunk.js.LICENSE.txt"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/js/runtime-main.0270250c.js",
|
||||
"static/js/2.63374ed0.chunk.js",
|
||||
"static/js/runtime-main.55798746.js",
|
||||
"static/js/2.fd2c2c30.chunk.js",
|
||||
"static/css/main.6452b577.chunk.css",
|
||||
"static/js/main.e5416b79.chunk.js"
|
||||
"static/js/main.801aa0ec.chunk.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/main.6452b577.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"a5d02d16"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.63374ed0.chunk.js"></script><script src="./static/js/main.e5416b79.chunk.js"></script></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/main.6452b577.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"c36fc28c"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.fd2c2c30.chunk.js"></script><script src="./static/js/main.801aa0ec.chunk.js"></script></body></html>
|
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/2.fd2c2c30.chunk.js
Normal file
2
app/vmselect/vmui/static/js/2.fd2c2c30.chunk.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -1 +1 @@
|
|||
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{431:function(t,n,e){"use strict";e.r(n),e.d(n,"getCLS",(function(){return l})),e.d(n,"getFCP",(function(){return g})),e.d(n,"getFID",(function(){return h})),e.d(n,"getLCP",(function(){return y})),e.d(n,"getTTFB",(function(){return F}));var i,a,r=function(){return"".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)},o=function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:-1;return{name:t,value:n,delta:0,entries:[],id:r(),isFinal:!1}},u=function(t,n){try{if(PerformanceObserver.supportedEntryTypes.includes(t)){var e=new PerformanceObserver((function(t){return t.getEntries().map(n)}));return e.observe({type:t,buffered:!0}),e}}catch(t){}},s=!1,c=!1,d=function(t){s=!t.persisted},f=function(){addEventListener("pagehide",d),addEventListener("beforeunload",(function(){}))},p=function(t){var n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];c||(f(),c=!0),addEventListener("visibilitychange",(function(n){var e=n.timeStamp;"hidden"===document.visibilityState&&t({timeStamp:e,isUnloading:s})}),{capture:!0,once:n})},v=function(t,n,e,i){var a;return function(){e&&n.isFinal&&e.disconnect(),n.value>=0&&(i||n.isFinal||"hidden"===document.visibilityState)&&(n.delta=n.value-(a||0),(n.delta||n.isFinal||void 0===a)&&(t(n),a=n.value))}},l=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("CLS",0),a=function(t){t.hadRecentInput||(i.value+=t.value,i.entries.push(t),n())},r=u("layout-shift",a);r&&(n=v(t,i,r,e),p((function(t){var e=t.isUnloading;r.takeRecords().map(a),e&&(i.isFinal=!0),n()})))},m=function(){return void 0===i&&(i="hidden"===document.visibilityState?0:1/0,p((function(t){var n=t.timeStamp;return i=n}),!0)),{get timeStamp(){return i}}},g=function(t){var n,e=o("FCP"),i=m(),a=u("paint",(function(t){"first-contentful-paint"===t.name&&t.startTime<i.timeStamp&&(e.value=t.startTime,e.isFinal=!0,e.entries.push(t),n())}));a&&(n=v(t,e,a))},h=function(t){var n=o("FID"),e=m(),i=function(t){t.startTime<e.timeStamp&&(n.value=t.processingStart-t.startTime,n.entries.push(t),n.isFinal=!0,r())},a=u("first-input",i),r=v(t,n,a);a?p((function(){a.takeRecords().map(i),a.disconnect()}),!0):window.perfMetrics&&window.perfMetrics.onFirstInputDelay&&window.perfMetrics.onFirstInputDelay((function(t,i){i.timeStamp<e.timeStamp&&(n.value=t,n.isFinal=!0,n.entries=[{entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+t}],r())}))},S=function(){return a||(a=new Promise((function(t){return["scroll","keydown","pointerdown"].map((function(n){addEventListener(n,t,{once:!0,passive:!0,capture:!0})}))}))),a},y=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("LCP"),a=m(),r=function(t){var e=t.startTime;e<a.timeStamp?(i.value=e,i.entries.push(t)):i.isFinal=!0,n()},s=u("largest-contentful-paint",r);if(s){n=v(t,i,s,e);var c=function(){i.isFinal||(s.takeRecords().map(r),i.isFinal=!0,n())};S().then(c),p(c,!0)}},F=function(t){var n,e=o("TTFB");n=function(){try{var n=performance.getEntriesByType("navigation")[0]||function(){var t=performance.timing,n={entryType:"navigation",startTime:0};for(var e in t)"navigationStart"!==e&&"toJSON"!==e&&(n[e]=Math.max(t[e]-t.navigationStart,0));return n}();e.value=e.delta=n.responseStart,e.entries=[n],e.isFinal=!0,t(e)}catch(t){}},"complete"===document.readyState?setTimeout(n,0):addEventListener("pageshow",n)}}}]);
|
||||
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{432:function(t,n,e){"use strict";e.r(n),e.d(n,"getCLS",(function(){return l})),e.d(n,"getFCP",(function(){return g})),e.d(n,"getFID",(function(){return h})),e.d(n,"getLCP",(function(){return y})),e.d(n,"getTTFB",(function(){return F}));var i,a,r=function(){return"".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)},o=function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:-1;return{name:t,value:n,delta:0,entries:[],id:r(),isFinal:!1}},u=function(t,n){try{if(PerformanceObserver.supportedEntryTypes.includes(t)){var e=new PerformanceObserver((function(t){return t.getEntries().map(n)}));return e.observe({type:t,buffered:!0}),e}}catch(t){}},s=!1,c=!1,d=function(t){s=!t.persisted},f=function(){addEventListener("pagehide",d),addEventListener("beforeunload",(function(){}))},p=function(t){var n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];c||(f(),c=!0),addEventListener("visibilitychange",(function(n){var e=n.timeStamp;"hidden"===document.visibilityState&&t({timeStamp:e,isUnloading:s})}),{capture:!0,once:n})},v=function(t,n,e,i){var a;return function(){e&&n.isFinal&&e.disconnect(),n.value>=0&&(i||n.isFinal||"hidden"===document.visibilityState)&&(n.delta=n.value-(a||0),(n.delta||n.isFinal||void 0===a)&&(t(n),a=n.value))}},l=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("CLS",0),a=function(t){t.hadRecentInput||(i.value+=t.value,i.entries.push(t),n())},r=u("layout-shift",a);r&&(n=v(t,i,r,e),p((function(t){var e=t.isUnloading;r.takeRecords().map(a),e&&(i.isFinal=!0),n()})))},m=function(){return void 0===i&&(i="hidden"===document.visibilityState?0:1/0,p((function(t){var n=t.timeStamp;return i=n}),!0)),{get timeStamp(){return i}}},g=function(t){var n,e=o("FCP"),i=m(),a=u("paint",(function(t){"first-contentful-paint"===t.name&&t.startTime<i.timeStamp&&(e.value=t.startTime,e.isFinal=!0,e.entries.push(t),n())}));a&&(n=v(t,e,a))},h=function(t){var n=o("FID"),e=m(),i=function(t){t.startTime<e.timeStamp&&(n.value=t.processingStart-t.startTime,n.entries.push(t),n.isFinal=!0,r())},a=u("first-input",i),r=v(t,n,a);a?p((function(){a.takeRecords().map(i),a.disconnect()}),!0):window.perfMetrics&&window.perfMetrics.onFirstInputDelay&&window.perfMetrics.onFirstInputDelay((function(t,i){i.timeStamp<e.timeStamp&&(n.value=t,n.isFinal=!0,n.entries=[{entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+t}],r())}))},S=function(){return a||(a=new Promise((function(t){return["scroll","keydown","pointerdown"].map((function(n){addEventListener(n,t,{once:!0,passive:!0,capture:!0})}))}))),a},y=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("LCP"),a=m(),r=function(t){var e=t.startTime;e<a.timeStamp?(i.value=e,i.entries.push(t)):i.isFinal=!0,n()},s=u("largest-contentful-paint",r);if(s){n=v(t,i,s,e);var c=function(){i.isFinal||(s.takeRecords().map(r),i.isFinal=!0,n())};S().then(c),p(c,!0)}},F=function(t){var n,e=o("TTFB");n=function(){try{var n=performance.getEntriesByType("navigation")[0]||function(){var t=performance.timing,n={entryType:"navigation",startTime:0};for(var e in t)"navigationStart"!==e&&"toJSON"!==e&&(n[e]=Math.max(t[e]-t.navigationStart,0));return n}();e.value=e.delta=n.responseStart,e.entries=[n],e.isFinal=!0,t(e)}catch(t){}},"complete"===document.readyState?setTimeout(n,0):addEventListener("pageshow",n)}}}]);
|
1
app/vmselect/vmui/static/js/main.801aa0ec.chunk.js
Normal file
1
app/vmselect/vmui/static/js/main.801aa0ec.chunk.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1 +1 @@
|
|||
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"a5d02d16"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);
|
||||
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"c36fc28c"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.16.7 as build-web-stage
|
||||
FROM golang:1.17.1 as build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
@ -6,7 +6,7 @@ COPY web/ /build/
|
|||
RUN GOOS=linux GOARCH=amd64 GO111MODULE=on CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 GO111MODULE=on CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.14.1
|
||||
FROM alpine:3.14.2
|
||||
USER root
|
||||
|
||||
COPY --from=build-web-stage /build/web-amd64 /app/web
|
||||
|
|
55
app/vmui/packages/vmui/package-lock.json
generated
55
app/vmui/packages/vmui/package-lock.json
generated
|
@ -5,6 +5,7 @@
|
|||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vmui",
|
||||
"version": "0.1.0",
|
||||
"dependencies": {
|
||||
"@codemirror/next": "~0.13.1",
|
||||
|
@ -18,6 +19,7 @@
|
|||
"@testing-library/user-event": "^12.2.2",
|
||||
"@types/d3": "^6.1.0",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@types/lodash.get": "^4.4.6",
|
||||
"@types/node": "^12.19.4",
|
||||
"@types/qs": "^6.9.6",
|
||||
"@types/react": "^16.9.56",
|
||||
|
@ -26,6 +28,7 @@
|
|||
"codemirror-promql": "^0.10.2",
|
||||
"d3": "^6.2.0",
|
||||
"dayjs": "^1.10.4",
|
||||
"lodash.get": "^4.4.2",
|
||||
"qs": "^6.5.2",
|
||||
"react": "^17.0.1",
|
||||
"react-dom": "^17.0.1",
|
||||
|
@ -3150,6 +3153,19 @@
|
|||
"resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
|
||||
"integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4="
|
||||
},
|
||||
"node_modules/@types/lodash": {
|
||||
"version": "4.14.172",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.172.tgz",
|
||||
"integrity": "sha512-/BHF5HAx3em7/KkzVKm3LrsD6HZAXuXO1AJZQ3cRRBZj4oHZDviWPYu0aEplAqDFNHZPW6d3G7KN+ONcCCC7pw=="
|
||||
},
|
||||
"node_modules/@types/lodash.get": {
|
||||
"version": "4.4.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash.get/-/lodash.get-4.4.6.tgz",
|
||||
"integrity": "sha512-E6zzjR3GtNig8UJG/yodBeJeIOtgPkMgsLjDU3CbgCAPC++vJ0eCMnJhVpRZb/ENqEFlov1+3K9TKtY4UdWKtQ==",
|
||||
"dependencies": {
|
||||
"@types/lodash": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/minimatch": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.3.tgz",
|
||||
|
@ -8752,7 +8768,8 @@
|
|||
"node_modules/flatten": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.3.tgz",
|
||||
"integrity": "sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg=="
|
||||
"integrity": "sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg==",
|
||||
"deprecated": "flatten is deprecated in favor of utility frameworks such as lodash."
|
||||
},
|
||||
"node_modules/flush-write-stream": {
|
||||
"version": "1.1.1",
|
||||
|
@ -12190,6 +12207,7 @@
|
|||
"version": "0.11.2",
|
||||
"resolved": "https://registry.npmjs.org/lezer/-/lezer-0.11.2.tgz",
|
||||
"integrity": "sha512-ktbo5G+sMY6qOXaeCbC4Z5Cs0vUTx5H4KkjnjTCi/LHay8taBp88dcUXjLS5XV7E/I3+y1zZGgIXhua82Z0Ljw==",
|
||||
"deprecated": "This package has been replaced by @lezer/lr",
|
||||
"dependencies": {
|
||||
"lezer-tree": "^0.11.0"
|
||||
}
|
||||
|
@ -12198,6 +12216,7 @@
|
|||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-cpp/-/lezer-cpp-0.11.1.tgz",
|
||||
"integrity": "sha512-+hPUqZSJd+iUaxP2nNqZxMfGrwyRtGCsGLnxHJf0e9pjVxMQ96+nGDyKPArcAkT5n12OQwMi5g4IxidEpJ1JXw==",
|
||||
"deprecated": "This package has been replaced by @lezer/cpp",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.1"
|
||||
}
|
||||
|
@ -12206,6 +12225,7 @@
|
|||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-css/-/lezer-css-0.11.1.tgz",
|
||||
"integrity": "sha512-uspAxtw56H200p1MKQyozY6vy/uooNBJP3zPhMuOjE+Q5qNgiLDAK9G6eW9/qGNLifRD2DhqTX0uAPiOaiXDtg==",
|
||||
"deprecated": "This package has been replaced by @lezer/css",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.1"
|
||||
}
|
||||
|
@ -12214,6 +12234,7 @@
|
|||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-html/-/lezer-html-0.11.1.tgz",
|
||||
"integrity": "sha512-ub9oRgTrCQzgO3coLZlKbmODquIuR6VouBVzprxo17yeuhHbuq7c94pY7xQd6k+ied/dw7kMEROo6ekjDNtS6A==",
|
||||
"deprecated": "This package has been replaced by @lezer/html",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.1"
|
||||
}
|
||||
|
@ -12222,6 +12243,7 @@
|
|||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-java/-/lezer-java-0.11.1.tgz",
|
||||
"integrity": "sha512-g319H9R+gh/epYat2ajn67wbL72BuyrAJMNIh2qWnmwPHLPsFZNmYyUXuCcqqOuhrQwvc7HwN7ZXK03oCHH5DA==",
|
||||
"deprecated": "This package has been replaced by @lezer/java",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.1"
|
||||
}
|
||||
|
@ -12230,6 +12252,7 @@
|
|||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-javascript/-/lezer-javascript-0.11.1.tgz",
|
||||
"integrity": "sha512-XAlZe8BirVBPB7SHTg975W4HQfuV/ZRgqVry+iklCqVR9cqRZYQMd98EHpf/j0J1NyvOFw6FJL3kPrq1Zglidw==",
|
||||
"deprecated": "This package has been replaced by @lezer/javascript",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.1"
|
||||
}
|
||||
|
@ -12238,6 +12261,7 @@
|
|||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-json/-/lezer-json-0.11.1.tgz",
|
||||
"integrity": "sha512-ziT9OzwqlLskEIAVBVxq25tJEhDphOKoYk42cfZfzlm8sL12jBugHdkEVuO9BiVfGIh1aj1wJYFxpP9UMf8WKw==",
|
||||
"deprecated": "This package has been replaced by @lezer/json",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.1"
|
||||
}
|
||||
|
@ -12254,6 +12278,7 @@
|
|||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-python/-/lezer-python-0.11.1.tgz",
|
||||
"integrity": "sha512-cfopeLETQiKzBmjsENUmz0a8jCIqQkAeCeakypdX3lvMffEK6EIi0y8S8+Zb4TQbdwuzz3jB9nd8dmof1LGPxw==",
|
||||
"deprecated": "This package has been replaced by @lezer/python",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.1"
|
||||
}
|
||||
|
@ -12262,6 +12287,7 @@
|
|||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-rust/-/lezer-rust-0.11.1.tgz",
|
||||
"integrity": "sha512-Nac9eu9HSuop854qhE2EKPye7qbUtIlQTwWMwwm6hL60Mm6xwzwvaLxnQkQljl2OyQQ6xMoBTUemLIGMFztdCw==",
|
||||
"deprecated": "This package has been replaced by @lezer/rust",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.0"
|
||||
}
|
||||
|
@ -12269,12 +12295,14 @@
|
|||
"node_modules/lezer-tree": {
|
||||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-tree/-/lezer-tree-0.11.1.tgz",
|
||||
"integrity": "sha512-AC1FmSzxL/cxV8LrbPMW6+8P/CaG8AxRql1XjzIfLkTQHY3LUan6tG8aieB/OvZmz0NuQpgfw6yDegXFNzVK4A=="
|
||||
"integrity": "sha512-AC1FmSzxL/cxV8LrbPMW6+8P/CaG8AxRql1XjzIfLkTQHY3LUan6tG8aieB/OvZmz0NuQpgfw6yDegXFNzVK4A==",
|
||||
"deprecated": "This package has been replaced by @lezer/common"
|
||||
},
|
||||
"node_modules/lezer-xml": {
|
||||
"version": "0.11.1",
|
||||
"resolved": "https://registry.npmjs.org/lezer-xml/-/lezer-xml-0.11.1.tgz",
|
||||
"integrity": "sha512-FoJX8xvF80zunwWGYyPsDSmEJTKuOvipIQ5BSrn7tBaC6LTEvZ/q9mkew/3/dqpjdQfDhW8DCSoDvb/rFvqdnA==",
|
||||
"deprecated": "This package has been replaced by @lezer/xml",
|
||||
"dependencies": {
|
||||
"lezer": "^0.11.1"
|
||||
}
|
||||
|
@ -12359,6 +12387,11 @@
|
|||
"resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz",
|
||||
"integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0="
|
||||
},
|
||||
"node_modules/lodash.get": {
|
||||
"version": "4.4.2",
|
||||
"resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz",
|
||||
"integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk="
|
||||
},
|
||||
"node_modules/lodash.memoize": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
|
||||
|
@ -23499,6 +23532,19 @@
|
|||
"resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
|
||||
"integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4="
|
||||
},
|
||||
"@types/lodash": {
|
||||
"version": "4.14.172",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.172.tgz",
|
||||
"integrity": "sha512-/BHF5HAx3em7/KkzVKm3LrsD6HZAXuXO1AJZQ3cRRBZj4oHZDviWPYu0aEplAqDFNHZPW6d3G7KN+ONcCCC7pw=="
|
||||
},
|
||||
"@types/lodash.get": {
|
||||
"version": "4.4.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash.get/-/lodash.get-4.4.6.tgz",
|
||||
"integrity": "sha512-E6zzjR3GtNig8UJG/yodBeJeIOtgPkMgsLjDU3CbgCAPC++vJ0eCMnJhVpRZb/ENqEFlov1+3K9TKtY4UdWKtQ==",
|
||||
"requires": {
|
||||
"@types/lodash": "*"
|
||||
}
|
||||
},
|
||||
"@types/minimatch": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.3.tgz",
|
||||
|
@ -30851,6 +30897,11 @@
|
|||
"resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz",
|
||||
"integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0="
|
||||
},
|
||||
"lodash.get": {
|
||||
"version": "4.4.2",
|
||||
"resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz",
|
||||
"integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk="
|
||||
},
|
||||
"lodash.memoize": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
"@testing-library/user-event": "^12.2.2",
|
||||
"@types/d3": "^6.1.0",
|
||||
"@types/jest": "^26.0.15",
|
||||
"@types/lodash.get": "^4.4.6",
|
||||
"@types/node": "^12.19.4",
|
||||
"@types/qs": "^6.9.6",
|
||||
"@types/react": "^16.9.56",
|
||||
|
@ -23,6 +24,7 @@
|
|||
"codemirror-promql": "^0.10.2",
|
||||
"d3": "^6.2.0",
|
||||
"dayjs": "^1.10.4",
|
||||
"lodash.get": "^4.4.2",
|
||||
"qs": "^6.5.2",
|
||||
"react": "^17.0.1",
|
||||
"react-dom": "^17.0.1",
|
||||
|
|
|
@ -67,7 +67,6 @@ export const useFetchQuery = (): {
|
|||
headers
|
||||
});
|
||||
if (response.ok) {
|
||||
saveToStorage("PREFERRED_URL", serverUrl);
|
||||
saveToStorage("LAST_QUERY", query);
|
||||
const resp = await response.json();
|
||||
setError(undefined);
|
||||
|
@ -87,4 +86,4 @@ export const useFetchQuery = (): {
|
|||
liveData,
|
||||
error
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -3,6 +3,7 @@ import {TimeParams, TimePeriod} from "../../types";
|
|||
import {dateFromSeconds, getDurationFromPeriod, getTimeperiodForDuration} from "../../utils/time";
|
||||
import {getFromStorage} from "../../utils/storage";
|
||||
import {getDefaultServer} from "../../utils/default-server-url";
|
||||
import {getQueryStringValue} from "../../utils/query-string";
|
||||
|
||||
export interface TimeState {
|
||||
duration: string;
|
||||
|
@ -32,14 +33,16 @@ export type Action =
|
|||
| { type: "TOGGLE_AUTOREFRESH"}
|
||||
| { type: "TOGGLE_AUTOCOMPLETE"}
|
||||
|
||||
const duration = getQueryStringValue("g0.range_input", "1h");
|
||||
const endInput = getQueryStringValue("g0.end_input", undefined);
|
||||
|
||||
export const initialState: AppState = {
|
||||
serverUrl: getFromStorage("PREFERRED_URL") as string || getDefaultServer(), // https://demo.promlabs.com or https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus",
|
||||
serverUrl: getDefaultServer(),
|
||||
displayType: "chart",
|
||||
query: getFromStorage("LAST_QUERY") as string || "\n", // demo_memory_usage_bytes
|
||||
query: getQueryStringValue("g0.expr", getFromStorage("LAST_QUERY") as string || "\n"), // demo_memory_usage_bytes
|
||||
time: {
|
||||
duration: "1h",
|
||||
period: getTimeperiodForDuration("1h")
|
||||
duration,
|
||||
period: getTimeperiodForDuration(duration, endInput && new Date(endInput))
|
||||
},
|
||||
queryControls: {
|
||||
autoRefresh: false,
|
||||
|
@ -131,4 +134,4 @@ export function reducer(state: AppState, action: Action): AppState {
|
|||
default:
|
||||
throw new Error();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ export interface TimeParams {
|
|||
start: number; // timestamp in seconds
|
||||
end: number; // timestamp in seconds
|
||||
step?: number; // seconds
|
||||
date: string; // end input date
|
||||
}
|
||||
|
||||
export interface TimePeriod {
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
export const getDefaultServer = (): string => {
|
||||
const {href} = window.location;
|
||||
const regexp = /^http.+\/vmui/;
|
||||
const [result] = href.match(regexp) || ["https://"];
|
||||
return result.replace("vmui", "prometheus");
|
||||
};
|
||||
return window.location.href.replace(/\/(?:prometheus\/)?(?:graph|vmui)\/.*/, "/prometheus/");
|
||||
};
|
||||
|
|
|
@ -1,49 +1,61 @@
|
|||
import qs from "qs";
|
||||
import get from "lodash.get";
|
||||
|
||||
const decoder = (value: string) => {
|
||||
if (/^(\d+|\d*\.\d+)$/.test(value)) {
|
||||
return parseFloat(value);
|
||||
}
|
||||
|
||||
const keywords = {
|
||||
true: true,
|
||||
false: false,
|
||||
null: null,
|
||||
undefined: undefined,
|
||||
};
|
||||
if (value in keywords) {
|
||||
return keywords[value as keyof typeof keywords];
|
||||
}
|
||||
|
||||
return decodeURI(value);
|
||||
const stateToUrlParams = {
|
||||
"query": "g0.expr",
|
||||
"time.duration": "g0.range_input",
|
||||
"time.period.date": "g0.end_input",
|
||||
"time.period.step": "g0.step_input",
|
||||
"stacked": "g0.stacked",
|
||||
};
|
||||
|
||||
// TODO need function for detect types.
|
||||
// const decoder = (value: string) => {
|
||||
// This function does not parse dates
|
||||
// if (/^(\d+|\d*\.\d+)$/.test(value)) {
|
||||
// return parseFloat(value);
|
||||
// }
|
||||
//
|
||||
// const keywords = {
|
||||
// true: true,
|
||||
// false: false,
|
||||
// null: null,
|
||||
// undefined: undefined,
|
||||
// };
|
||||
// if (value in keywords) {
|
||||
// return keywords[value as keyof typeof keywords];
|
||||
// }
|
||||
//
|
||||
// return decodeURI(value);
|
||||
// };
|
||||
|
||||
export const setQueryStringWithoutPageReload = (qsValue: string): void => {
|
||||
const w = window;
|
||||
if (w) {
|
||||
const newurl = w.location.protocol +
|
||||
"//" +
|
||||
w.location.host +
|
||||
w.location.pathname +
|
||||
"?" +
|
||||
qsValue;
|
||||
const newurl = `${w.location.protocol}//${w.location.host}${w.location.pathname}?${qsValue}`;
|
||||
w.history.pushState({ path: newurl }, "", newurl);
|
||||
}
|
||||
};
|
||||
|
||||
export const setQueryStringValue = (
|
||||
newValue: Record<string, unknown>,
|
||||
queryString = window.location.search
|
||||
): void => {
|
||||
const values = qs.parse(queryString, { ignoreQueryPrefix: true, decoder });
|
||||
const newQsValue = qs.stringify({ ...values, ...newValue }, { encode: false });
|
||||
setQueryStringWithoutPageReload(newQsValue);
|
||||
export const setQueryStringValue = (newValue: Record<string, unknown>): void => {
|
||||
const queryMap = new Map(Object.entries(stateToUrlParams));
|
||||
const newQsValue: string[] = [];
|
||||
queryMap.forEach((queryKey, stateKey) => {
|
||||
const queryKeyEncoded = encodeURIComponent(queryKey);
|
||||
const value = get(newValue, stateKey, "") as string;
|
||||
if (value) {
|
||||
const valueEncoded = encodeURIComponent(value);
|
||||
newQsValue.push(`${queryKey}=${valueEncoded}`);
|
||||
}
|
||||
});
|
||||
setQueryStringWithoutPageReload(newQsValue.join("&"));
|
||||
};
|
||||
|
||||
export const getQueryStringValue = (
|
||||
key: string,
|
||||
defaultValue?: any,
|
||||
queryString = window.location.search
|
||||
): unknown => {
|
||||
const values = qs.parse(queryString, { ignoreQueryPrefix: true, decoder });
|
||||
return values[key];
|
||||
};
|
||||
) => {
|
||||
const values = qs.parse(queryString, { ignoreQueryPrefix: true });
|
||||
return get(values, key, defaultValue || "");
|
||||
};
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
export type StorageKeys = "PREFERRED_URL"
|
||||
| "LAST_QUERY"
|
||||
export type StorageKeys = "LAST_QUERY"
|
||||
| "BASIC_AUTH_DATA"
|
||||
| "BEARER_AUTH_DATA"
|
||||
| "AUTH_TYPE"
|
||||
|
|
|
@ -5,7 +5,7 @@ import duration from "dayjs/plugin/duration";
|
|||
|
||||
dayjs.extend(duration);
|
||||
|
||||
const MAX_ITEMS_PER_CHART = 30; // TODO: make dependent from screen size
|
||||
const MAX_ITEMS_PER_CHART = window.screen.availWidth / 2;
|
||||
|
||||
export const supportedDurations = [
|
||||
{long: "days", short: "d", possible: "day"},
|
||||
|
@ -51,18 +51,17 @@ export const getTimeperiodForDuration = (dur: string, date?: Date): TimeParams =
|
|||
}, {});
|
||||
|
||||
const delta = dayjs.duration(durObject).asSeconds();
|
||||
const step = Math.ceil(delta / MAX_ITEMS_PER_CHART);
|
||||
|
||||
return {
|
||||
start: n - delta,
|
||||
end: n,
|
||||
step: delta / MAX_ITEMS_PER_CHART
|
||||
step: step,
|
||||
date: formatDateForNativeInput((date || new Date()))
|
||||
};
|
||||
};
|
||||
|
||||
export const formatDateForNativeInput = (date: Date): string => {
|
||||
const isoString = dayjs(date).format("YYYY-MM-DD[T]HH:mm:ss");
|
||||
return isoString;
|
||||
};
|
||||
export const formatDateForNativeInput = (date: Date): string => dayjs(date).format("YYYY-MM-DD[T]HH:mm:ss");
|
||||
|
||||
export const getDurationFromPeriod = (p: TimePeriod): string => {
|
||||
const dur = dayjs.duration(p.to.valueOf() - p.from.valueOf());
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
DOCKER_NAMESPACE := victoriametrics
|
||||
|
||||
ROOT_IMAGE ?= alpine:3.14.1
|
||||
CERTS_IMAGE := alpine:3.14.1
|
||||
GO_BUILDER_IMAGE := golang:1.17.0
|
||||
ROOT_IMAGE ?= alpine:3.14.2
|
||||
CERTS_IMAGE := alpine:3.14.2
|
||||
GO_BUILDER_IMAGE := golang:1.17.1
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr : _)
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo $(CERTS_IMAGE) | tr : _)
|
||||
|
||||
|
|
|
@ -4,6 +4,8 @@ sort: 16
|
|||
|
||||
# Articles
|
||||
|
||||
See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
||||
|
||||
## Third-party articles and slides about VictoriaMetrics
|
||||
|
||||
* [Scaling to trillions of metric data points](https://engineering.razorpay.com/scaling-to-trillions-of-metric-data-points-f569a5b654f2)
|
||||
|
@ -42,7 +44,8 @@ sort: 16
|
|||
* [Multi-tenancy monitoring system for Kubernetes cluster using VictoriaMetrics and operators](https://blog.kintone.io/entry/2021/03/31/175256)
|
||||
* [Monitoring as Code на базе VictoriaMetrics и Grafana](https://habr.com/ru/post/568090/)
|
||||
* [Push Prometheus metrics to VictoriaMetrics or other exporters](https://pythonawesome.com/push-prometheus-metrics-to-victoriametrics-or-other-exporters/)
|
||||
|
||||
* [Install and configure VictoriaMetrics on Debian](https://www.vultr.com/docs/install-and-configure-victoriametrics-on-debian)
|
||||
* [Choosing a Time Series Database for High Cardinality Aggregations](https://abiosgaming.com/press/high-cardinality-aggregations/)
|
||||
|
||||
## Our articles
|
||||
|
||||
|
|
|
@ -6,6 +6,33 @@ sort: 15
|
|||
|
||||
## tip
|
||||
|
||||
* FEATURE: vmalert: add web UI with the list of alerting groups, alerts and alert statuses. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1602).
|
||||
* FEATURE: vmalert: add `-rule.maxResolveDuration` command-line flag, which could be used for limiting the auto-resolve duration for the alerting rule. By default it is limited to 3x evaluation interval. This could be too high for big evaluation intervals. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1586).
|
||||
* FEATURE: vmalert: add support for Bearer token authorization for `-datasource.url`, `-remoteRead.url` and `-remoteWrite.url`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1608).
|
||||
* FEATURE: vmagent: send stale markers for disappeared metrics like Prometheus does. Previously stale markers were sent only when the scrape target disappears or when it becomes temporarily unavailable. See [these docs](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) for details.
|
||||
* FEATURE: vmagent: add ability to set `series_limit` option for a particular scrape target via `__series_limit__` label. This allows setting the limit on the number of time series on a per-target basis. See [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter) for details.
|
||||
* FEATURE: vmagent: add ability to set `stream_parse` option for a particular scrape target via `__stream_parse__` label. This allows managing the stream parsing mode on a per-target basis. See [these docs](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode) for details.
|
||||
* FEATURE: vmagent: add ability to set `scrape_interval` and `scrape_timeout` options for a particular target via `__scrape_interval__` and `__scrape_timeout__` labels in the same way as Prometheus 2.30 does. See [this pull request](https://github.com/prometheus/prometheus/pull/8911).
|
||||
* FEATURE: vmagent: generate `scrape_timeout_seconds` metric per each scrape target, so the target saturation could be calculated with `scrape_duration_seconds / scrape_timeout_seconds`. See the corresponding [pull request from Prometheus 2.30](https://github.com/prometheus/prometheus/pull/9247).
|
||||
* FEATURE: vmagent: reduce CPU usage when calculating the number of newly added series per scrape (this number is sent to remote storage in `scrape_series_added` metric).
|
||||
* FEATURE: vmagent: reduce CPU usage when applying `series_limit` to scrape targets with constant set of metrics. See more information about `series_limit` [here](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
* FEATURE: vminsert: disable rerouting by default when a few of `vmstorage` nodes start accepting data at lower speed than the rest of `vmstorage` nodes. This should improve VictoriaMetrics cluster stability during rolling restarts and during spikes in [time series churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). The rerouting can be enabled by passing `-disableRerouting=false` command-line flag to `vminsert`.
|
||||
* FEATURE: vmauth: do not put invalid auth tokens into log by default due to security reasons. The logging can be returned back by passing `-logInvalidAuthTokens` command-line flag to `vmauth`. Requests with invalid auth tokens are counted at `vmagent_http_request_errors_total{reason="invalid_auth_token"}` metric exposed by `vmauth` at `/metrics` page.
|
||||
* FEATURE: add new relabeling actions: `keep_metrics` and `drop_metrics`. This simplifies metrics filtering by metric names. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
|
||||
* FEATURE: allow splitting long `regex` in relabeling filters into an array of shorter regexps, which can be put into multiple lines for better readability and maintainability. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
|
||||
* FEATURE: optimize performance for queries with regexp filters on metric name like `{__name__=~"metric1|...|metricN"}`. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1610) from @faceair.
|
||||
* FEATURE: vmui: use Prometheus-compatible query args, so `vmui` could be accessed from graph editor in Grafana. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1619). Thanks to @Loori-R.
|
||||
* FEATURE: vmselect: automatically add missing port to `-storageNode` hostnames. For example, `-storageNode=vmstorage1,vmstorage2` is automatically translated to `-storageNode=vmstorage1:8401,vmstorage2:8401`. This simplifies [manual setup of VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
|
||||
* FEATURE: vminsert: automatically add missing port to `-storageNode` hostnames. For example, `-storageNode=vmstorage1,vmstorage2` is automatically translated to `-storageNode=vmstorage1:8400,vmstorage2:8400`. This simplifies [manual setup of VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
|
||||
|
||||
* BUGFIX: properly handle queries with multiple filters matching empty labels such as `metric{label1=~"foo|",label2="bar|"}`. This filter must match the following series: `metric`, `metric{label1="foo"}`, `metric{label2="bar"}` and `metric{label1="foo",label2="bar"}`. Previously it was matching only `metric{label1="foo",label2="bar"}`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601).
|
||||
* BUGFIX: vmselect: reset connection timeouts after each request to `vmstorage`. This should prevent from `cannot read data in 0.000 seconds: unexpected EOF` warning in logs. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1562). Thanks to @mxlxm .
|
||||
* BUGFIX: keep metric name for time series returned from [rollup_candlestick](https://docs.victoriametrics.com/MetricsQL.html#rollup_candlestick) function, since the returned series don't change the meaning of the original series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1600).
|
||||
* BUGFIX: use Prometheus-compatible label value formatting for [count_values](https://docs.victoriametrics.com/MetricsQL.html#count_values) function. Previously big values could be improperly formatted, which could break query results, which rely on label value such as `... on(label) count_values("label", ...)`.
|
||||
* BUGFIX: vmagent: properly use `https` scheme for wildcard TLS certificates for `role: ingress` targets in Kubernetes service discovery. See [this issue](https://github.com/prometheus/prometheus/issues/8902).
|
||||
* BUGFIX: vmagent: support host networking mode for `docker_sd_config`. See [this issue](https://github.com/prometheus/prometheus/issues/9116).
|
||||
* BUGFIX: fix non-repeatable results from `quantile_over_time()` function when the number of input samples exceeds 1000. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1612).
|
||||
|
||||
|
||||
## [v1.65.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.65.0)
|
||||
|
||||
|
@ -229,7 +256,7 @@ Thanks to @johnseekins!
|
|||
* `process_resident_memory_peak_bytes` - peak RSS usage for the process.
|
||||
* `process_virtual_memory_peak_bytes` - peak virtual memory usage for the process.
|
||||
* FEATURE: accept and enforce `extra_label=<label_name>=<label_value>` query arg at [Graphite APIs](https://docs.victoriametrics.com/#graphite-api-usage).
|
||||
* FEATURE: use Influx field as metric name if measurement is empty and `-influxSkipSingleField` command-line is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1139).
|
||||
* FEATURE: use InfluxDB field as metric name if measurement is empty and `-influxSkipSingleField` command-line is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1139).
|
||||
* FEATURE: vmagent: add `-promscrape.consul.waitTime` command-line flag for tuning the maximum wait time for Consul service discovery. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1144).
|
||||
* FEATURE: vmagent: add `vm_promscrape_discovery_kubernetes_stale_resource_versions_total` metric for monitoring the frequency of `too old resource version` errors during Kubernetes service discovery.
|
||||
* FEATURE: single-node VictoriaMetrics: log metrics with timestamps older than `-search.cacheTimestampOffset` compared to the current time. See [these docs](https://docs.victoriametrics.com/#backfilling) for details.
|
||||
|
@ -638,7 +665,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y
|
|||
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/728 .
|
||||
* FEATURE: vmalert: make `-maxIdleConnections` configurable for datasource HTTP client. This option can be used for minimizing connection churn.
|
||||
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/795 .
|
||||
* FEATURE: add `-influx.maxLineSize` command-line flag for configuring the maximum size for a single Influx line during parsing.
|
||||
* FEATURE: add `-influx.maxLineSize` command-line flag for configuring the maximum size for a single InfluxDB line during parsing.
|
||||
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/807
|
||||
|
||||
* BUGFIX: properly handle `inf` values during [background merge of LSM parts](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
|
|
|
@ -132,8 +132,8 @@ ROOT_IMAGE=scratch make package
|
|||
A minimal cluster must contain the following nodes:
|
||||
|
||||
* a single `vmstorage` node with `-retentionPeriod` and `-storageDataPath` flags
|
||||
* a single `vminsert` node with `-storageNode=<vmstorage_host>:8400`
|
||||
* a single `vmselect` node with `-storageNode=<vmstorage_host>:8401`
|
||||
* a single `vminsert` node with `-storageNode=<vmstorage_host>`
|
||||
* a single `vmselect` node with `-storageNode=<vmstorage_host>`
|
||||
|
||||
It is recommended to run at least two nodes for each service
|
||||
for high availability purposes.
|
||||
|
@ -192,7 +192,7 @@ It is recommended setting up alerts in [vmalert](https://docs.victoriametrics.co
|
|||
where `projectID` is also arbitrary 32-bit integer. If `projectID` isn't set, then it equals to `0`.
|
||||
- `<suffix>` may have the following values:
|
||||
- `prometheus` and `prometheus/api/v1/write` - for inserting data with [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
- `influx/write` and `influx/api/v2/write` - for inserting data with [Influx line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/).
|
||||
- `influx/write` and `influx/api/v2/write` - for inserting data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/).
|
||||
- `opentsdb/api/put` - for accepting [OpenTSDB HTTP /api/put requests](http://opentsdb.net/docs/build/html/api_http/put.html).
|
||||
This handler is disabled by default. It is exposed on a distinct TCP address set via `-opentsdbHTTPListenAddr` command-line flag.
|
||||
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-opentsdb-data-via-http-apiput-requests) for details.
|
||||
|
@ -271,8 +271,8 @@ Cluster performance and capacity scales with adding new nodes.
|
|||
Steps to add `vmstorage` node:
|
||||
|
||||
1. Start new `vmstorage` node with the same `-retentionPeriod` as existing nodes in the cluster.
|
||||
2. Gradually restart all the `vmselect` nodes with new `-storageNode` arg containing `<new_vmstorage_host>:8401`.
|
||||
3. Gradually restart all the `vminsert` nodes with new `-storageNode` arg containing `<new_vmstorage_host>:8400`.
|
||||
2. Gradually restart all the `vmselect` nodes with new `-storageNode` arg containing `<new_vmstorage_host>`.
|
||||
3. Gradually restart all the `vminsert` nodes with new `-storageNode` arg containing `<new_vmstorage_host>`.
|
||||
|
||||
|
||||
## Updating / reconfiguring cluster nodes
|
||||
|
@ -284,6 +284,8 @@ with new configs.
|
|||
Cluster should remain in working state if at least a single node of each type remains available during
|
||||
the update process. See [cluster availability](#cluster-availability) section for details.
|
||||
|
||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
|
||||
|
||||
## Cluster availability
|
||||
|
||||
|
@ -469,7 +471,7 @@ Below is the output for `/path/to/vminsert -help`:
|
|||
-csvTrimTimestamp duration
|
||||
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-disableRerouting
|
||||
Whether to disable re-routing when some of vmstorage nodes accept incoming data at slower speed compared to other storage nodes. By default the re-routing is enabled. Disabled re-routing limits the ingestion rate by the slowest vmstorage node. On the other side, disabled re-routing minimizes the number of active time series in the cluster
|
||||
Whether to disable re-routing when some of vmstorage nodes accept incoming data at slower speed compared to other storage nodes. Disabled re-routing limits the ingestion rate by the slowest vmstorage node. On the other side, disabled re-routing minimizes the number of active time series in the cluster during rolling restarts and during spikes in series churn rate (default true)
|
||||
-enableTCP6
|
||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||
-envflag.enable
|
||||
|
@ -503,18 +505,18 @@ Below is the output for `/path/to/vminsert -help`:
|
|||
Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-influx.maxLineSize size
|
||||
The maximum size in bytes for a single Influx line during parsing
|
||||
The maximum size in bytes for a single InfluxDB line during parsing
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144)
|
||||
-influxListenAddr string
|
||||
TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<vminsert>:8480/insert/<accountID>/influx/write
|
||||
TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
||||
-influxMeasurementFieldSeparator string
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_")
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol (default "_")
|
||||
-influxSkipMeasurement
|
||||
Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'
|
||||
-influxSkipSingleField
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
|
||||
-influxTrimTimestamp duration
|
||||
Trim timestamps for Influx line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-insert.maxQueueDuration duration
|
||||
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
|
||||
-loggerDisableTimestamps
|
||||
|
@ -565,7 +567,7 @@ Below is the output for `/path/to/vminsert -help`:
|
|||
-sortLabels
|
||||
Whether to sort labels for incoming samples before writing them to storage. This may be needed for reducing memory usage at storage when the order of labels in incoming samples is random. For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}. Enabled sorting for labels can slow down ingestion performance a bit
|
||||
-storageNode array
|
||||
Address of vmstorage nodes; usage: -storageNode=vmstorage-host1:8400 -storageNode=vmstorage-host2:8400
|
||||
Comma-separated addresses of vmstorage nodes; usage: -storageNode=vmstorage-host1,...,vmstorage-hostN
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-tls
|
||||
Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set
|
||||
|
@ -679,10 +681,10 @@ Below is the output for `/path/to/vmselect -help`:
|
|||
-search.treatDotsAsIsInRegexps
|
||||
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
||||
-selectNode array
|
||||
Addresses of vmselect nodes; usage: -selectNode=vmselect-host1:8481 -selectNode=vmselect-host2:8481
|
||||
Comma-serparated addresses of vmselect nodes; usage: -selectNode=vmselect-host1,...,vmselect-hostN
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-storageNode array
|
||||
Addresses of vmstorage nodes; usage: -storageNode=vmstorage-host1:8401 -storageNode=vmstorage-host2:8401
|
||||
Comma-separated dddresses of vmstorage nodes; usage: -storageNode=vmstorage-host1,...,vmstorage-hostN
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-tls
|
||||
Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set
|
||||
|
|
|
@ -4,7 +4,7 @@ sort: 13
|
|||
|
||||
# MetricsQL
|
||||
|
||||
VictoriaMetrics implements MetricsQL - query language inspired by [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/).
|
||||
[VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) implements MetricsQL - query language inspired by [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/).
|
||||
MetricsQL is backwards-compatible with PromQL, so Grafana dashboards backed by Prometheus datasource should work the same after switching from Prometheus to VictoriaMetrics.
|
||||
[Standalone MetricsQL package](https://godoc.org/github.com/VictoriaMetrics/metricsql) can be used for parsing MetricsQL in external apps.
|
||||
|
||||
|
@ -24,7 +24,7 @@ Other PromQL functionality should work the same in MetricsQL. [File an issue](ht
|
|||
|
||||
MetricsQL implements [PromQL](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085) and provides additional functionality mentioned below, which is aimed towards solving practical cases. Feel free [filing a feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you think MetricsQL misses certain useful functionality.
|
||||
|
||||
This functionality can be evaluated at [an editable Grafana dashboard](http://play-grafana.victoriametrics.com:3000/d/4ome8yJmz/node-exporter-on-victoriametrics-demo) or at your own [VcitoriaMetrics instance](https://docs.victoriametrics.com/#how-to-start-victoriametrics).
|
||||
This functionality can be evaluated at [an editable Grafana dashboard](https://play-grafana.victoriametrics.com/d/4ome8yJmz/node-exporter-on-victoriametrics-demo) or at your own [VictoriaMetrics instance](https://docs.victoriametrics.com/#how-to-start-victoriametrics).
|
||||
|
||||
- Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax. This is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but usually works faster and is easier to use when migrating from Graphite. VictoriaMetrics also can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
- Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries)). For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`. It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
|
|
289
docs/README.md
289
docs/README.md
|
@ -12,16 +12,15 @@
|
|||
|
||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
|
||||
It is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap package](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and see [how to start it](#how-to-start-victoriametrics).
|
||||
If you use Ubuntu, then just run `snap install victoriametrics` in order to install and run it.
|
||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||
|
||||
Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need paid enterprise support for VictoriaMetrics.
|
||||
See [features available for enterprise customers](https://victoriametrics.com/enterprise.html).
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
|
@ -52,188 +51,101 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
|
|||
|
||||
## Prominent features
|
||||
|
||||
* VictoriaMetrics can be used as long-term storage for Prometheus or for [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
See [these docs](#prometheus-setup) for details.
|
||||
* VictoriaMetrics supports [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/), so it can be used as Prometheus drop-in replacement in Grafana.
|
||||
* VictoriaMetrics implements [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) query language backwards compatible with PromQL.
|
||||
* VictoriaMetrics provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics.
|
||||
Later this data may be queried via a single query.
|
||||
* High performance and good scalability for both [inserts](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
|
||||
and [selects](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
|
||||
[Outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* [Uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893)
|
||||
and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f)
|
||||
when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
|
||||
* Optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). Think about [prometheus-operator](https://github.com/coreos/prometheus-operator) metrics from frequent deployments in Kubernetes.
|
||||
* High data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
|
||||
may be crammed into limited storage comparing to TimescaleDB
|
||||
and [up to 7x less storage space is required comparing to Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
|
||||
* Optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc).
|
||||
See [graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB.
|
||||
See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae),
|
||||
[comparing Thanos to VictoriaMetrics cluster](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683)
|
||||
and [Remote Write Storage Wars](https://promcon.io/2019-munich/talks/remote-write-storage-wars/) talk
|
||||
from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
* Easy operation:
|
||||
VictoriaMetrics has the following prominent features:
|
||||
|
||||
* It can be used as long-term storage for Prometheus. See [these docs](#prometheus-setup) for details.
|
||||
* It can be used as drop-in replacement for Prometheus in Grafana, because it supports [Prometheus querying API](#prometheus-querying-api-usage).
|
||||
* It can be used as drop-in replacement for Graphite in Grafana, because it supports [Graphite API](#graphite-api-usage).
|
||||
* It features easy setup and operation:
|
||||
* VictoriaMetrics consists of a single [small executable](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d) without external dependencies.
|
||||
* All the configuration is done via explicit command-line flags with reasonable defaults.
|
||||
* All the data is stored in a single directory pointed by `-storageDataPath` command-line flag.
|
||||
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
to S3 or GCS with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html).
|
||||
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
* Storage is protected from corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
|
||||
* [Metrics from Prometheus exporters](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format)
|
||||
such as [node_exporter](https://github.com/prometheus/node_exporter). See [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for details.
|
||||
* [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon)
|
||||
if `-graphiteListenAddr` is set.
|
||||
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol) if `-opentsdbListenAddr` is set.
|
||||
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests) if `-opentsdbHTTPListenAddr` is set.
|
||||
* [JSON line format](#how-to-import-data-in-json-line-format).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) to S3 or GCS can be done with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools. See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
* It implements PromQL-based query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
|
||||
* It provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
|
||||
* It provides high performance and good vertical and horizontal scalability for both [data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and [data querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4). It [outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* It [uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
|
||||
* It is optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
* It provides high data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4) may be crammed into limited storage comparing to TimescaleDB and [up to 7x less storage space is required compared to Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
|
||||
* It is optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc). See [disk IO graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB. See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae), [comparing Thanos to VictoriaMetrics cluster](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683) and [Remote Write Storage Wars](https://promcon.io/2019-munich/talks/remote-write-storage-wars/) talk from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
* It protects the storage from data corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* It supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
|
||||
* [Metrics scraping from Prometheus exporters](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
* [Prometheus remote write API](#prometheus-setup).
|
||||
* [Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
|
||||
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol).
|
||||
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests).
|
||||
* [JSON line format](#how-to-import-data-in-json-line-format).
|
||||
* [Arbitrary CSV data](#how-to-import-csv-data).
|
||||
* Supports metrics' relabeling. See [these docs](#relabeling) for details.
|
||||
* Can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues using [series limiter](#cardinality-limiter).
|
||||
* Ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html).
|
||||
* Has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
* See also technical [Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* It supports metrics' relabeling. See [these docs](#relabeling) for details.
|
||||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
|
||||
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html).
|
||||
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
||||
## Operation
|
||||
|
||||
### Table of contents
|
||||
|
||||
* [How to start VictoriaMetrics](#how-to-start-victoriametrics)
|
||||
* [Environment variables](#environment-variables)
|
||||
* [Configuration with snap package](#configuration-with-snap-package)
|
||||
* [Prometheus setup](#prometheus-setup)
|
||||
* [Grafana setup](#grafana-setup)
|
||||
* [How to upgrade VictoriaMetrics](#how-to-upgrade-victoriametrics)
|
||||
* [How to apply new config to VictoriaMetrics](#how-to-apply-new-config-to-victoriametrics)
|
||||
* [How to scrape Prometheus exporters such as node_exporter](#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
* [How to send data from InfluxDB-compatible agents such as Telegraf](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
* [How to send data from Graphite-compatible agents such as StatsD](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||
* [Querying Graphite data](#querying-graphite-data)
|
||||
* [How to send data from OpenTSDB-compatible agents](#how-to-send-data-from-opentsdb-compatible-agents)
|
||||
* [Prometheus querying API usage](#prometheus-querying-api-usage)
|
||||
* [Prometheus querying API enhancements](#prometheus-querying-api-enhancements)
|
||||
* [Graphite API usage](#graphite-api-usage)
|
||||
* [Graphite Render API usage](#graphite-render-api-usage)
|
||||
* [Graphite Metrics API usage](#graphite-metrics-api-usage)
|
||||
* [Graphite Tags API usage](#graphite-tags-api-usage)
|
||||
* [How to build from sources](#how-to-build-from-sources)
|
||||
* [Development build](#development-build)
|
||||
* [Production build](#production-build)
|
||||
* [ARM build](#arm-build)
|
||||
* [Pure Go build (CGO_ENABLED=0)](#pure-go-build-cgo_enabled0)
|
||||
* [Building docker images](#building-docker-images)
|
||||
* [Start with docker-compose](#start-with-docker-compose)
|
||||
* [Setting up service](#setting-up-service)
|
||||
* [How to work with snapshots](#how-to-work-with-snapshots)
|
||||
* [How to delete time series](#how-to-delete-time-series)
|
||||
* [Forced merge](#forced-merge)
|
||||
* [How to export time series](#how-to-export-time-series)
|
||||
* [How to export data in native format](#how-to-export-data-in-native-format)
|
||||
* [How to export data in JSON line format](#how-to-export-data-in-json-line-format)
|
||||
* [How to export CSV data](#how-to-export-csv-data)
|
||||
* [How to import time series data](#how-to-import-time-series-data)
|
||||
* [How to import data in native format](#how-to-import-data-in-native-format)
|
||||
* [How to import data in json line format](#how-to-import-data-in-json-line-format)
|
||||
* [How to import CSV data](#how-to-import-csv-data)
|
||||
* [How to import data in Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format)
|
||||
* [Relabeling](#relabeling)
|
||||
* [Federation](#federation)
|
||||
* [Capacity planning](#capacity-planning)
|
||||
* [High availability](#high-availability)
|
||||
* [Deduplication](#deduplication)
|
||||
* [Retention](#retention)
|
||||
* [Multiple retentions](#multiple-retentions)
|
||||
* [Downsampling](#downsampling)
|
||||
* [Multi-tenancy](#multi-tenancy)
|
||||
* [Scalability and cluster version](#scalability-and-cluster-version)
|
||||
* [Alerting](#alerting)
|
||||
* [Security](#security)
|
||||
* [Tuning](#tuning)
|
||||
* [Monitoring](#monitoring)
|
||||
* [TSDB stats](#tsdb-stats)
|
||||
* [Cardinality limiter](#cardinality-limiter)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Data migration](#data-migration)
|
||||
* [Backfilling](#backfilling)
|
||||
* [Data updates](#data-updates)
|
||||
* [Replication](#replication)
|
||||
* [Backups](#backups)
|
||||
* [Profiling](#profiling)
|
||||
* [Integrations](#integrations)
|
||||
* [Third-party contributions](#third-party-contributions)
|
||||
* [Contacts](#contacts)
|
||||
* [Community and contributions](#community-and-contributions)
|
||||
* [Reporting bugs](#reporting-bugs)
|
||||
* [VictoriaMetrics Logo](#victoria-metrics-logo)
|
||||
* [Logo Usage Guidelines](#logo-usage-guidelines)
|
||||
* [Font used](#font-used)
|
||||
* [Color Palette](#color-palette)
|
||||
* [We kindly ask](#we-kindly-ask)
|
||||
* [List of command-line flags](#list-of-command-line-flags)
|
||||
|
||||
|
||||
## How to start VictoriaMetrics
|
||||
|
||||
Start VictoriaMetrics [executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
or [docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) with the desired command-line flags.
|
||||
Just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
||||
|
||||
The following command-line flags are used the most:
|
||||
|
||||
* `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month. See [these docs](#retention) for more details.
|
||||
|
||||
Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
|
||||
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics](#grafana-setup)
|
||||
and how to [handle alerts](#alerting).
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics via Grafana](#grafana-setup), how to [query VictoriaMetrics via Graphite API](#graphite-api-usage) and how to [handle alerts](#alerting).
|
||||
|
||||
VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-api-usage) on port `8428` by default.
|
||||
|
||||
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
|
||||
|
||||
|
||||
### Environment variables
|
||||
|
||||
Each flag value can be set via environment variables according to these rules:
|
||||
|
||||
* The `-envflag.enable` flag must be set
|
||||
* Each `.` char in flag name must be substituted by `_` (for example `-insert.maxQueueDuration <duration>` will translate to `insert_maxQueueDuration=<duration>`)
|
||||
* For repeating flags an alternative syntax can be used by joining the different values into one using `,` char as separator (for example `-storageNode <nodeA> -storageNode <nodeB>` will translate to `storageNode=<nodeA>,<nodeB>`)
|
||||
* It is possible setting prefix for environment vars with `-envflag.prefix`. For instance, if `-envflag.prefix=VM_`, then env vars must be prepended with `VM_`
|
||||
* The `-envflag.enable` flag must be set.
|
||||
* Each `.` char in flag name must be substituted with `_` (for example `-insert.maxQueueDuration <duration>` will translate to `insert_maxQueueDuration=<duration>`).
|
||||
* For repeating flags an alternative syntax can be used by joining the different values into one using `,` char as separator (for example `-storageNode <nodeA> -storageNode <nodeB>` will translate to `storageNode=<nodeA>,<nodeB>`).
|
||||
* Environment var prefix can be set via `-envflag.prefix` flag. For instance, if `-envflag.prefix=VM_`, then env vars must be prepended with `VM_`.
|
||||
|
||||
|
||||
### Configuration with snap package
|
||||
|
||||
|
||||
Command-line flags can be changed with following command:
|
||||
Snap package for VictoriaMetrics is available [here](https://snapcraft.io/victoriametrics).
|
||||
|
||||
Command-line flags for Snap package can be set with following command:
|
||||
|
||||
```text
|
||||
echo 'FLAGS="-selfScrapeInterval=10s -search.logSlowQueryDuration=20s"' > $SNAP_DATA/var/snap/victoriametrics/current/extra_flags
|
||||
snap restart victoriametrics
|
||||
```
|
||||
Or add needed command-line flags to the file `$SNAP_DATA/var/snap/victoriametrics/current/extra_flags`.
|
||||
|
||||
Note you cannot change value for `-storageDataPath` flag, for safety snap package has limited access to host system.
|
||||
Do not change value for `-storageDataPath` flag, because snap package has limited access to host filesystem.
|
||||
|
||||
|
||||
Changing scrape configuration is possible with text editor:
|
||||
```text
|
||||
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
|
||||
```
|
||||
After changes was made, trigger config re-read with command `curl 127.0.0.1:8248/-/reload`.
|
||||
Changing scrape configuration is possible with text editor:
|
||||
|
||||
```text
|
||||
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
|
||||
```
|
||||
|
||||
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8248/-/reload`.
|
||||
|
||||
|
||||
## Prometheus setup
|
||||
|
||||
Prometheus must be configured with [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
in order to send data to VictoriaMetrics. Add the following lines
|
||||
to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`):
|
||||
Add the following lines to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`) in order to send data to VictoriaMetrics:
|
||||
|
||||
```yml
|
||||
remote_write:
|
||||
|
@ -251,7 +163,7 @@ Prometheus writes incoming data to local storage and replicates it to remote sto
|
|||
This means that data remains available in local storage for `--storage.tsdb.retention.time` duration
|
||||
even if remote storage is unavailable.
|
||||
|
||||
If you plan to send data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
If you plan sending data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
of [Prometheus config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file):
|
||||
|
||||
```yml
|
||||
|
@ -260,11 +172,11 @@ global:
|
|||
datacenter: dc-123
|
||||
```
|
||||
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each time series sent to remote storage.
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each sample before sending it to remote storage.
|
||||
The label name can be arbitrary - `datacenter` is just an example. The label value must be unique
|
||||
across Prometheus instances, so those time series may be filtered and grouped by this label.
|
||||
across Prometheus instances, so time series could be filtered and grouped by this label.
|
||||
|
||||
For highly loaded Prometheus instances (400k+ samples per second) the following tuning may be applied:
|
||||
For highly loaded Prometheus instances (200k+ samples per second) the following tuning may be applied:
|
||||
|
||||
```yaml
|
||||
remote_write:
|
||||
|
@ -275,14 +187,13 @@ remote_write:
|
|||
max_shards: 30
|
||||
```
|
||||
|
||||
Using remote write increases memory usage for Prometheus up to ~25% and depends on the shape of data. If you are experiencing issues with
|
||||
too high memory consumption try to lower `max_samples_per_send` and `capacity` params (keep in mind that these two params are tightly connected).
|
||||
Using remote write increases memory usage for Prometheus by up to ~25%. If you are experiencing issues with
|
||||
too high memory consumption of Prometheus, then try to lower `max_samples_per_send` and `capacity` params. Keep in mind that these two params are tightly connected.
|
||||
Read more about tuning remote write for Prometheus [here](https://prometheus.io/docs/practices/remote_write).
|
||||
|
||||
It is recommended upgrading Prometheus to [v2.12.0](https://github.com/prometheus/prometheus/releases) or newer, since previous versions may have issues with `remote_write`.
|
||||
|
||||
Take a look also at [vmagent](https://docs.victoriametrics.com/vmagent.html)
|
||||
and [vmalert](https://docs.victoriametrics.com/vmalert.html),
|
||||
Take a look also at [vmagent](https://docs.victoriametrics.com/vmagent.html) and [vmalert](https://docs.victoriametrics.com/vmalert.html),
|
||||
which can be used as faster and less resource-hungry alternative to Prometheus.
|
||||
|
||||
|
||||
|
@ -296,27 +207,22 @@ http://<victoriametrics-addr>:8428
|
|||
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
|
||||
Then build graphs with the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/)
|
||||
or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). VictoriaMetrics supports [Prometheus querying API](#prometheus-querying-api-usage),
|
||||
which is used by Grafana.
|
||||
Then build graphs and dashboards for the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
|
||||
|
||||
## How to upgrade VictoriaMetrics
|
||||
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
|
||||
It is also safe downgrading to the previous version unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
It is also safe downgrading to older versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
|
||||
The following steps must be performed during the upgrade / downgrade:
|
||||
The following steps must be performed during the upgrade / downgrade procedure:
|
||||
|
||||
* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
* Wait until the process stops. This can take a few seconds.
|
||||
* Start the upgraded VictoriaMetrics.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details. The same applies also to [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
|
||||
## How to apply new config to VictoriaMetrics
|
||||
|
@ -327,15 +233,12 @@ VictoriaMetrics is configured via command-line flags, so it must be restarted wh
|
|||
* Wait until the process stops. This can take a few seconds.
|
||||
* Start VictoriaMetrics with the new command-line flags.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details. The same applies alos to [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
|
||||
## How to scrape Prometheus exporters such as [node-exporter](https://github.com/prometheus/node_exporter)
|
||||
|
||||
VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file).
|
||||
Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets.
|
||||
Currently the following [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) types are supported:
|
||||
VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file). Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets. Currently the following [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) types are supported:
|
||||
|
||||
* [static_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config)
|
||||
* [file_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config)
|
||||
|
@ -352,7 +255,7 @@ Currently the following [scrape_config](https://prometheus.io/docs/prometheus/la
|
|||
* [http_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config)
|
||||
|
||||
|
||||
Other `*_sd_config` types will be supported in the future.
|
||||
File a [feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need support for other `*_sd_config` types.
|
||||
|
||||
The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
||||
|
@ -371,21 +274,18 @@ For instance, put the following lines into `Telegraf` config, so it sends data t
|
|||
urls = ["http://<victoriametrics-addr>:8428"]
|
||||
```
|
||||
|
||||
Another option is to enable TCP and UDP receiver for Influx line protocol via `-influxListenAddr` command-line flag
|
||||
and stream plain Influx line protocol data to the configured TCP and/or UDP addresses.
|
||||
Another option is to enable TCP and UDP receiver for InfluxDB line protocol via `-influxListenAddr` command-line flag
|
||||
and stream plain InfluxDB line protocol data to the configured TCP and/or UDP addresses.
|
||||
|
||||
VictoriaMetrics maps Influx data using the following rules:
|
||||
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
||||
|
||||
* [`db` query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
||||
unless `db` tag exists in the Influx line.
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value,
|
||||
where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag.
|
||||
See also `-influxSkipSingleField` command-line flag.
|
||||
If `{measurement}` is empty or `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||
unless `db` tag exists in the InfluxDB line.
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
For example, the following InfluxDB line:
|
||||
|
||||
```raw
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
|
@ -398,7 +298,7 @@ foo_field1{tag1="value1", tag2="value2"} 12
|
|||
foo_field2{tag1="value1", tag2="value2"} 40
|
||||
```
|
||||
|
||||
Example for writing data with [Influx line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
Example for writing data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
to local VictoriaMetrics using `curl`:
|
||||
|
||||
```bash
|
||||
|
@ -419,7 +319,7 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[1.23],"timestamps":[1560272508147]}
|
||||
```
|
||||
|
||||
Note that Influx line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
Note that InfluxDB line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
while VictoriaMetrics stores them with *milliseconds* precision.
|
||||
|
||||
Extra labels may be added to all the written time series by passing `extra_label=name=value` query args.
|
||||
|
@ -889,7 +789,7 @@ The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv
|
|||
Time series data can be imported via any supported ingestion protocol:
|
||||
|
||||
* [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). See [these docs](#prometheus-setup) for details.
|
||||
* Influx line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
|
||||
* OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
|
||||
* OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
|
||||
|
@ -1068,14 +968,7 @@ Example contents for `-relabelConfig` file:
|
|||
regex: true
|
||||
```
|
||||
|
||||
VictoriaMetrics provides the following extra actions for relabeling rules:
|
||||
|
||||
* `replace_all`: replaces all the occurences of `regex` in the values of `source_labels` with the `replacement` and stores the result in the `target_label`.
|
||||
* `labelmap_all`: replaces all the occurences of `regex` in all the label names with the `replacement`.
|
||||
* `keep_if_equal`: keeps the entry if all label values from `source_labels` are equal.
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal.
|
||||
|
||||
See also [relabeling in vmagent](https://docs.victoriametrics.com/vmagent.html#relabeling).
|
||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details about relabeling in VictoriaMetrics.
|
||||
|
||||
|
||||
## Federation
|
||||
|
@ -1215,7 +1108,7 @@ only a single data point out of 20 initial data points per each 5m interval.
|
|||
|
||||
## Multi-tenancy
|
||||
|
||||
Single-node VictoriaMetrics doesn't support multi-tenancy. Use [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) instead.
|
||||
Single-node VictoriaMetrics doesn't support multi-tenancy. Use [cluster version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) instead.
|
||||
|
||||
|
||||
## Scalability and cluster version
|
||||
|
@ -1338,6 +1231,8 @@ The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
|||
|
||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||
|
||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
@ -1639,18 +1534,18 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-influx.maxLineSize size
|
||||
The maximum size in bytes for a single Influx line during parsing
|
||||
The maximum size in bytes for a single InfluxDB line during parsing
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144)
|
||||
-influxListenAddr string
|
||||
TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
||||
TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
||||
-influxMeasurementFieldSeparator string
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_")
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol (default "_")
|
||||
-influxSkipMeasurement
|
||||
Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'
|
||||
-influxSkipSingleField
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
|
||||
-influxTrimTimestamp duration
|
||||
Trim timestamps for Influx line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-insert.maxQueueDuration duration
|
||||
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
|
||||
-logNewSeries
|
||||
|
|
|
@ -16,16 +16,15 @@ sort: 1
|
|||
|
||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
|
||||
It is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap package](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and see [how to start it](#how-to-start-victoriametrics).
|
||||
If you use Ubuntu, then just run `snap install victoriametrics` in order to install and run it.
|
||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||
|
||||
Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need paid enterprise support for VictoriaMetrics.
|
||||
See [features available for enterprise customers](https://victoriametrics.com/enterprise.html).
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
|
@ -56,188 +55,101 @@ See also [articles and slides about VictoriaMetrics from our users](https://docs
|
|||
|
||||
## Prominent features
|
||||
|
||||
* VictoriaMetrics can be used as long-term storage for Prometheus or for [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
See [these docs](#prometheus-setup) for details.
|
||||
* VictoriaMetrics supports [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/), so it can be used as Prometheus drop-in replacement in Grafana.
|
||||
* VictoriaMetrics implements [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) query language backwards compatible with PromQL.
|
||||
* VictoriaMetrics provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics.
|
||||
Later this data may be queried via a single query.
|
||||
* High performance and good scalability for both [inserts](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
|
||||
and [selects](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
|
||||
[Outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* [Uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893)
|
||||
and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f)
|
||||
when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
|
||||
* Optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). Think about [prometheus-operator](https://github.com/coreos/prometheus-operator) metrics from frequent deployments in Kubernetes.
|
||||
* High data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
|
||||
may be crammed into limited storage comparing to TimescaleDB
|
||||
and [up to 7x less storage space is required comparing to Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
|
||||
* Optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc).
|
||||
See [graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB.
|
||||
See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae),
|
||||
[comparing Thanos to VictoriaMetrics cluster](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683)
|
||||
and [Remote Write Storage Wars](https://promcon.io/2019-munich/talks/remote-write-storage-wars/) talk
|
||||
from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
* Easy operation:
|
||||
VictoriaMetrics has the following prominent features:
|
||||
|
||||
* It can be used as long-term storage for Prometheus. See [these docs](#prometheus-setup) for details.
|
||||
* It can be used as drop-in replacement for Prometheus in Grafana, because it supports [Prometheus querying API](#prometheus-querying-api-usage).
|
||||
* It can be used as drop-in replacement for Graphite in Grafana, because it supports [Graphite API](#graphite-api-usage).
|
||||
* It features easy setup and operation:
|
||||
* VictoriaMetrics consists of a single [small executable](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d) without external dependencies.
|
||||
* All the configuration is done via explicit command-line flags with reasonable defaults.
|
||||
* All the data is stored in a single directory pointed by `-storageDataPath` command-line flag.
|
||||
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
to S3 or GCS with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html).
|
||||
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
* Storage is protected from corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
|
||||
* [Metrics from Prometheus exporters](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format)
|
||||
such as [node_exporter](https://github.com/prometheus/node_exporter). See [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for details.
|
||||
* [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon)
|
||||
if `-graphiteListenAddr` is set.
|
||||
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol) if `-opentsdbListenAddr` is set.
|
||||
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests) if `-opentsdbHTTPListenAddr` is set.
|
||||
* [JSON line format](#how-to-import-data-in-json-line-format).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) to S3 or GCS can be done with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) / [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools. See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
* It implements PromQL-based query language - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which provides improved functionality on top of PromQL.
|
||||
* It provides global query view. Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics. Later this data may be queried via a single query.
|
||||
* It provides high performance and good vertical and horizontal scalability for both [data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and [data querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4). It [outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* It [uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) and [up to 7x less RAM than Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) when dealing with millions of unique time series (aka [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)).
|
||||
* It is optimized for time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
* It provides high data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4) may be crammed into limited storage comparing to TimescaleDB and [up to 7x less storage space is required compared to Prometheus, Thanos or Cortex](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f).
|
||||
* It is optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc). See [disk IO graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB. See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae), [comparing Thanos to VictoriaMetrics cluster](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683) and [Remote Write Storage Wars](https://promcon.io/2019-munich/talks/remote-write-storage-wars/) talk from [PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
* It protects the storage from data corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* It supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols:
|
||||
* [Metrics scraping from Prometheus exporters](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
* [Prometheus remote write API](#prometheus-setup).
|
||||
* [Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
|
||||
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol).
|
||||
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests).
|
||||
* [JSON line format](#how-to-import-data-in-json-line-format).
|
||||
* [Arbitrary CSV data](#how-to-import-csv-data).
|
||||
* Supports metrics' relabeling. See [these docs](#relabeling) for details.
|
||||
* Can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues using [series limiter](#cardinality-limiter).
|
||||
* Ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html).
|
||||
* Has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
* See also technical [Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* It supports metrics' relabeling. See [these docs](#relabeling) for details.
|
||||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
|
||||
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/enterprise.html).
|
||||
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
||||
## Operation
|
||||
|
||||
### Table of contents
|
||||
|
||||
* [How to start VictoriaMetrics](#how-to-start-victoriametrics)
|
||||
* [Environment variables](#environment-variables)
|
||||
* [Configuration with snap package](#configuration-with-snap-package)
|
||||
* [Prometheus setup](#prometheus-setup)
|
||||
* [Grafana setup](#grafana-setup)
|
||||
* [How to upgrade VictoriaMetrics](#how-to-upgrade-victoriametrics)
|
||||
* [How to apply new config to VictoriaMetrics](#how-to-apply-new-config-to-victoriametrics)
|
||||
* [How to scrape Prometheus exporters such as node_exporter](#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
* [How to send data from InfluxDB-compatible agents such as Telegraf](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
* [How to send data from Graphite-compatible agents such as StatsD](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||
* [Querying Graphite data](#querying-graphite-data)
|
||||
* [How to send data from OpenTSDB-compatible agents](#how-to-send-data-from-opentsdb-compatible-agents)
|
||||
* [Prometheus querying API usage](#prometheus-querying-api-usage)
|
||||
* [Prometheus querying API enhancements](#prometheus-querying-api-enhancements)
|
||||
* [Graphite API usage](#graphite-api-usage)
|
||||
* [Graphite Render API usage](#graphite-render-api-usage)
|
||||
* [Graphite Metrics API usage](#graphite-metrics-api-usage)
|
||||
* [Graphite Tags API usage](#graphite-tags-api-usage)
|
||||
* [How to build from sources](#how-to-build-from-sources)
|
||||
* [Development build](#development-build)
|
||||
* [Production build](#production-build)
|
||||
* [ARM build](#arm-build)
|
||||
* [Pure Go build (CGO_ENABLED=0)](#pure-go-build-cgo_enabled0)
|
||||
* [Building docker images](#building-docker-images)
|
||||
* [Start with docker-compose](#start-with-docker-compose)
|
||||
* [Setting up service](#setting-up-service)
|
||||
* [How to work with snapshots](#how-to-work-with-snapshots)
|
||||
* [How to delete time series](#how-to-delete-time-series)
|
||||
* [Forced merge](#forced-merge)
|
||||
* [How to export time series](#how-to-export-time-series)
|
||||
* [How to export data in native format](#how-to-export-data-in-native-format)
|
||||
* [How to export data in JSON line format](#how-to-export-data-in-json-line-format)
|
||||
* [How to export CSV data](#how-to-export-csv-data)
|
||||
* [How to import time series data](#how-to-import-time-series-data)
|
||||
* [How to import data in native format](#how-to-import-data-in-native-format)
|
||||
* [How to import data in json line format](#how-to-import-data-in-json-line-format)
|
||||
* [How to import CSV data](#how-to-import-csv-data)
|
||||
* [How to import data in Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format)
|
||||
* [Relabeling](#relabeling)
|
||||
* [Federation](#federation)
|
||||
* [Capacity planning](#capacity-planning)
|
||||
* [High availability](#high-availability)
|
||||
* [Deduplication](#deduplication)
|
||||
* [Retention](#retention)
|
||||
* [Multiple retentions](#multiple-retentions)
|
||||
* [Downsampling](#downsampling)
|
||||
* [Multi-tenancy](#multi-tenancy)
|
||||
* [Scalability and cluster version](#scalability-and-cluster-version)
|
||||
* [Alerting](#alerting)
|
||||
* [Security](#security)
|
||||
* [Tuning](#tuning)
|
||||
* [Monitoring](#monitoring)
|
||||
* [TSDB stats](#tsdb-stats)
|
||||
* [Cardinality limiter](#cardinality-limiter)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Data migration](#data-migration)
|
||||
* [Backfilling](#backfilling)
|
||||
* [Data updates](#data-updates)
|
||||
* [Replication](#replication)
|
||||
* [Backups](#backups)
|
||||
* [Profiling](#profiling)
|
||||
* [Integrations](#integrations)
|
||||
* [Third-party contributions](#third-party-contributions)
|
||||
* [Contacts](#contacts)
|
||||
* [Community and contributions](#community-and-contributions)
|
||||
* [Reporting bugs](#reporting-bugs)
|
||||
* [VictoriaMetrics Logo](#victoria-metrics-logo)
|
||||
* [Logo Usage Guidelines](#logo-usage-guidelines)
|
||||
* [Font used](#font-used)
|
||||
* [Color Palette](#color-palette)
|
||||
* [We kindly ask](#we-kindly-ask)
|
||||
* [List of command-line flags](#list-of-command-line-flags)
|
||||
|
||||
|
||||
## How to start VictoriaMetrics
|
||||
|
||||
Start VictoriaMetrics [executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
or [docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) with the desired command-line flags.
|
||||
Just download [VictoriaMetrics executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or [Docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and start it with the desired command-line flags.
|
||||
|
||||
The following command-line flags are used the most:
|
||||
|
||||
* `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month. See [these docs](#retention) for more details.
|
||||
|
||||
Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
|
||||
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics](#grafana-setup)
|
||||
and how to [handle alerts](#alerting).
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics via Grafana](#grafana-setup), how to [query VictoriaMetrics via Graphite API](#graphite-api-usage) and how to [handle alerts](#alerting).
|
||||
|
||||
VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-api-usage) on port `8428` by default.
|
||||
|
||||
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
|
||||
|
||||
|
||||
### Environment variables
|
||||
|
||||
Each flag value can be set via environment variables according to these rules:
|
||||
|
||||
* The `-envflag.enable` flag must be set
|
||||
* Each `.` char in flag name must be substituted by `_` (for example `-insert.maxQueueDuration <duration>` will translate to `insert_maxQueueDuration=<duration>`)
|
||||
* For repeating flags an alternative syntax can be used by joining the different values into one using `,` char as separator (for example `-storageNode <nodeA> -storageNode <nodeB>` will translate to `storageNode=<nodeA>,<nodeB>`)
|
||||
* It is possible setting prefix for environment vars with `-envflag.prefix`. For instance, if `-envflag.prefix=VM_`, then env vars must be prepended with `VM_`
|
||||
* The `-envflag.enable` flag must be set.
|
||||
* Each `.` char in flag name must be substituted with `_` (for example `-insert.maxQueueDuration <duration>` will translate to `insert_maxQueueDuration=<duration>`).
|
||||
* For repeating flags an alternative syntax can be used by joining the different values into one using `,` char as separator (for example `-storageNode <nodeA> -storageNode <nodeB>` will translate to `storageNode=<nodeA>,<nodeB>`).
|
||||
* Environment var prefix can be set via `-envflag.prefix` flag. For instance, if `-envflag.prefix=VM_`, then env vars must be prepended with `VM_`.
|
||||
|
||||
|
||||
### Configuration with snap package
|
||||
|
||||
|
||||
Command-line flags can be changed with following command:
|
||||
Snap package for VictoriaMetrics is available [here](https://snapcraft.io/victoriametrics).
|
||||
|
||||
Command-line flags for Snap package can be set with following command:
|
||||
|
||||
```text
|
||||
echo 'FLAGS="-selfScrapeInterval=10s -search.logSlowQueryDuration=20s"' > $SNAP_DATA/var/snap/victoriametrics/current/extra_flags
|
||||
snap restart victoriametrics
|
||||
```
|
||||
Or add needed command-line flags to the file `$SNAP_DATA/var/snap/victoriametrics/current/extra_flags`.
|
||||
|
||||
Note you cannot change value for `-storageDataPath` flag, for safety snap package has limited access to host system.
|
||||
Do not change value for `-storageDataPath` flag, because snap package has limited access to host filesystem.
|
||||
|
||||
|
||||
Changing scrape configuration is possible with text editor:
|
||||
```text
|
||||
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
|
||||
```
|
||||
After changes was made, trigger config re-read with command `curl 127.0.0.1:8248/-/reload`.
|
||||
Changing scrape configuration is possible with text editor:
|
||||
|
||||
```text
|
||||
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
|
||||
```
|
||||
|
||||
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8248/-/reload`.
|
||||
|
||||
|
||||
## Prometheus setup
|
||||
|
||||
Prometheus must be configured with [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
in order to send data to VictoriaMetrics. Add the following lines
|
||||
to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`):
|
||||
Add the following lines to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`) in order to send data to VictoriaMetrics:
|
||||
|
||||
```yml
|
||||
remote_write:
|
||||
|
@ -255,7 +167,7 @@ Prometheus writes incoming data to local storage and replicates it to remote sto
|
|||
This means that data remains available in local storage for `--storage.tsdb.retention.time` duration
|
||||
even if remote storage is unavailable.
|
||||
|
||||
If you plan to send data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
If you plan sending data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
of [Prometheus config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file):
|
||||
|
||||
```yml
|
||||
|
@ -264,11 +176,11 @@ global:
|
|||
datacenter: dc-123
|
||||
```
|
||||
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each time series sent to remote storage.
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each sample before sending it to remote storage.
|
||||
The label name can be arbitrary - `datacenter` is just an example. The label value must be unique
|
||||
across Prometheus instances, so those time series may be filtered and grouped by this label.
|
||||
across Prometheus instances, so time series could be filtered and grouped by this label.
|
||||
|
||||
For highly loaded Prometheus instances (400k+ samples per second) the following tuning may be applied:
|
||||
For highly loaded Prometheus instances (200k+ samples per second) the following tuning may be applied:
|
||||
|
||||
```yaml
|
||||
remote_write:
|
||||
|
@ -279,14 +191,13 @@ remote_write:
|
|||
max_shards: 30
|
||||
```
|
||||
|
||||
Using remote write increases memory usage for Prometheus up to ~25% and depends on the shape of data. If you are experiencing issues with
|
||||
too high memory consumption try to lower `max_samples_per_send` and `capacity` params (keep in mind that these two params are tightly connected).
|
||||
Using remote write increases memory usage for Prometheus by up to ~25%. If you are experiencing issues with
|
||||
too high memory consumption of Prometheus, then try to lower `max_samples_per_send` and `capacity` params. Keep in mind that these two params are tightly connected.
|
||||
Read more about tuning remote write for Prometheus [here](https://prometheus.io/docs/practices/remote_write).
|
||||
|
||||
It is recommended upgrading Prometheus to [v2.12.0](https://github.com/prometheus/prometheus/releases) or newer, since previous versions may have issues with `remote_write`.
|
||||
|
||||
Take a look also at [vmagent](https://docs.victoriametrics.com/vmagent.html)
|
||||
and [vmalert](https://docs.victoriametrics.com/vmalert.html),
|
||||
Take a look also at [vmagent](https://docs.victoriametrics.com/vmagent.html) and [vmalert](https://docs.victoriametrics.com/vmalert.html),
|
||||
which can be used as faster and less resource-hungry alternative to Prometheus.
|
||||
|
||||
|
||||
|
@ -300,27 +211,22 @@ http://<victoriametrics-addr>:8428
|
|||
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
|
||||
Then build graphs with the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/)
|
||||
or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). VictoriaMetrics supports [Prometheus querying API](#prometheus-querying-api-usage),
|
||||
which is used by Grafana.
|
||||
Then build graphs and dashboards for the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
|
||||
|
||||
## How to upgrade VictoriaMetrics
|
||||
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
|
||||
It is also safe downgrading to the previous version unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
It is also safe downgrading to older versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
|
||||
The following steps must be performed during the upgrade / downgrade:
|
||||
The following steps must be performed during the upgrade / downgrade procedure:
|
||||
|
||||
* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
* Wait until the process stops. This can take a few seconds.
|
||||
* Start the upgraded VictoriaMetrics.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details. The same applies also to [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
|
||||
## How to apply new config to VictoriaMetrics
|
||||
|
@ -331,15 +237,12 @@ VictoriaMetrics is configured via command-line flags, so it must be restarted wh
|
|||
* Wait until the process stops. This can take a few seconds.
|
||||
* Start VictoriaMetrics with the new command-line flags.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details. The same applies alos to [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
|
||||
## How to scrape Prometheus exporters such as [node-exporter](https://github.com/prometheus/node_exporter)
|
||||
|
||||
VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file).
|
||||
Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets.
|
||||
Currently the following [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) types are supported:
|
||||
VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file). Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets. Currently the following [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) types are supported:
|
||||
|
||||
* [static_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config)
|
||||
* [file_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config)
|
||||
|
@ -356,7 +259,7 @@ Currently the following [scrape_config](https://prometheus.io/docs/prometheus/la
|
|||
* [http_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config)
|
||||
|
||||
|
||||
Other `*_sd_config` types will be supported in the future.
|
||||
File a [feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need support for other `*_sd_config` types.
|
||||
|
||||
The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
||||
|
@ -375,21 +278,18 @@ For instance, put the following lines into `Telegraf` config, so it sends data t
|
|||
urls = ["http://<victoriametrics-addr>:8428"]
|
||||
```
|
||||
|
||||
Another option is to enable TCP and UDP receiver for Influx line protocol via `-influxListenAddr` command-line flag
|
||||
and stream plain Influx line protocol data to the configured TCP and/or UDP addresses.
|
||||
Another option is to enable TCP and UDP receiver for InfluxDB line protocol via `-influxListenAddr` command-line flag
|
||||
and stream plain InfluxDB line protocol data to the configured TCP and/or UDP addresses.
|
||||
|
||||
VictoriaMetrics maps Influx data using the following rules:
|
||||
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
||||
|
||||
* [`db` query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
||||
unless `db` tag exists in the Influx line.
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value,
|
||||
where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag.
|
||||
See also `-influxSkipSingleField` command-line flag.
|
||||
If `{measurement}` is empty or `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||
unless `db` tag exists in the InfluxDB line.
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
For example, the following InfluxDB line:
|
||||
|
||||
```raw
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
|
@ -402,7 +302,7 @@ foo_field1{tag1="value1", tag2="value2"} 12
|
|||
foo_field2{tag1="value1", tag2="value2"} 40
|
||||
```
|
||||
|
||||
Example for writing data with [Influx line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
Example for writing data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
to local VictoriaMetrics using `curl`:
|
||||
|
||||
```bash
|
||||
|
@ -423,7 +323,7 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[1.23],"timestamps":[1560272508147]}
|
||||
```
|
||||
|
||||
Note that Influx line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
Note that InfluxDB line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
while VictoriaMetrics stores them with *milliseconds* precision.
|
||||
|
||||
Extra labels may be added to all the written time series by passing `extra_label=name=value` query args.
|
||||
|
@ -893,7 +793,7 @@ The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv
|
|||
Time series data can be imported via any supported ingestion protocol:
|
||||
|
||||
* [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). See [these docs](#prometheus-setup) for details.
|
||||
* Influx line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
|
||||
* OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
|
||||
* OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
|
||||
|
@ -1072,14 +972,7 @@ Example contents for `-relabelConfig` file:
|
|||
regex: true
|
||||
```
|
||||
|
||||
VictoriaMetrics provides the following extra actions for relabeling rules:
|
||||
|
||||
* `replace_all`: replaces all the occurences of `regex` in the values of `source_labels` with the `replacement` and stores the result in the `target_label`.
|
||||
* `labelmap_all`: replaces all the occurences of `regex` in all the label names with the `replacement`.
|
||||
* `keep_if_equal`: keeps the entry if all label values from `source_labels` are equal.
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal.
|
||||
|
||||
See also [relabeling in vmagent](https://docs.victoriametrics.com/vmagent.html#relabeling).
|
||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details about relabeling in VictoriaMetrics.
|
||||
|
||||
|
||||
## Federation
|
||||
|
@ -1219,7 +1112,7 @@ only a single data point out of 20 initial data points per each 5m interval.
|
|||
|
||||
## Multi-tenancy
|
||||
|
||||
Single-node VictoriaMetrics doesn't support multi-tenancy. Use [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) instead.
|
||||
Single-node VictoriaMetrics doesn't support multi-tenancy. Use [cluster version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) instead.
|
||||
|
||||
|
||||
## Scalability and cluster version
|
||||
|
@ -1342,6 +1235,8 @@ The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
|||
|
||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||
|
||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
@ -1643,18 +1538,18 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-influx.maxLineSize size
|
||||
The maximum size in bytes for a single Influx line during parsing
|
||||
The maximum size in bytes for a single InfluxDB line during parsing
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144)
|
||||
-influxListenAddr string
|
||||
TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
||||
TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
||||
-influxMeasurementFieldSeparator string
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_")
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol (default "_")
|
||||
-influxSkipMeasurement
|
||||
Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'
|
||||
-influxSkipSingleField
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
|
||||
-influxTrimTimestamp duration
|
||||
Trim timestamps for Influx line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-insert.maxQueueDuration duration
|
||||
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
|
||||
-logNewSeries
|
||||
|
|
|
@ -25,13 +25,13 @@ to `vmagent` such as the ability to push metrics instead of pulling them. We did
|
|||
See [Quick Start](#quick-start) for details.
|
||||
* Can add, remove and modify labels (aka tags) via Prometheus relabeling. Can filter data before sending it to remote storage. See [these docs](#relabeling) for details.
|
||||
* Accepts data via all ingestion protocols supported by VictoriaMetrics:
|
||||
* Influx line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
* InfluxDB line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
* Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).
|
||||
* OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents).
|
||||
* Prometheus remote write protocol via `http://<vmagent>:8429/api/v1/write`.
|
||||
* JSON lines import protocol via `http://<vmagent>:8429/api/v1/import`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format).
|
||||
* Native data import protocol via `http://<vmagent>:8429/api/v1/import/native`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-native-format).
|
||||
* Data in Prometheus exposition format. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details.
|
||||
* Prometheus exposition format via `http://<vmagent>:8429/api/v1/import/prometheus`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details.
|
||||
* Arbitrary CSV data via `http://<vmagent>:8429/api/v1/import/csv`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-csv-data).
|
||||
* Can replicate collected metrics simultaneously to multiple remote storage systems.
|
||||
* Works smoothly in environments with unstable connections to remote storage. If the remote storage is unavailable, the collected metrics
|
||||
|
@ -57,13 +57,13 @@ Example command line:
|
|||
/path/to/vmagent -promscrape.config=/path/to/prometheus.yml -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||
```
|
||||
|
||||
If you only need to collect Influx data, then the following command is sufficient:
|
||||
If you only need to collect InfluxDB data, then the following command is sufficient:
|
||||
|
||||
```
|
||||
/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||
```
|
||||
|
||||
Then send Influx data to `http://vmagent-host:8429`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for more details.
|
||||
Then send InfluxDB data to `http://vmagent-host:8429`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for more details.
|
||||
|
||||
`vmagent` is also available in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags).
|
||||
|
||||
|
@ -252,13 +252,30 @@ Labels can be added to metrics by the following mechanisms:
|
|||
|
||||
## Relabeling
|
||||
|
||||
`vmagent` supports [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config).
|
||||
and also provides the following actions:
|
||||
`vmagent` and VictoriaMetrics support Prometheus-compatible relabeling].
|
||||
They provide the following additional actions on top of actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
|
||||
* `replace_all`: replaces all of the occurences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`.
|
||||
* `labelmap_all`: replaces all of the occurences of `regex` in all the label names with the `replacement`.
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal.
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal.
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`.
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`.
|
||||
|
||||
The `regex` value can be split into multiple lines for improved readability and maintainability. These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "metric_a|metric_b|foo_.+"
|
||||
```
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex:
|
||||
- "metric_a"
|
||||
- "metric_b"
|
||||
- "foo_.+"
|
||||
```
|
||||
|
||||
The relabeling can be defined in the following places:
|
||||
|
||||
|
@ -279,26 +296,40 @@ You can read more about relabeling in the following articles:
|
|||
|
||||
## Prometheus staleness markers
|
||||
|
||||
Starting from [v1.64.0](https://docs.victoriametrics.com/CHANGELOG.html#v1640), `vmagent` sends [Prometheus staleness markers](https://www.robustperception.io/staleness-and-promql) for scraped metrics when the scrape target is removed from the list of targets. Prometheus staleness markers aren't sent in [stream parsing mode](#stream-parsing-mode) or if `-promscrape.noStaleMarkers` command-line is set.
|
||||
`vmagent` sends [Prometheus staleness markers](https://www.robustperception.io/staleness-and-promql) to `-remoteWrite.url` in the following cases:
|
||||
|
||||
* If they are passed to `vmagent` via [Prometheus remote_write protocol](#prometheus-remote_write-proxy).
|
||||
* If the metric disappears from the list of scraped metrics, then stale marker is sent to this particular metrics.
|
||||
* If the scrape target becomes temporarily unavailable, then stale markers are sent for all the metrics scraped from this target.
|
||||
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
|
||||
* Stale markers are sent for all the scraped metrics on graceful shutdown of `vmagent`.
|
||||
|
||||
Prometheus staleness markers aren't sent in [stream parsing mode](#stream-parsing-mode) or if `-promscrape.noStaleMarkers` command-line is set.
|
||||
|
||||
|
||||
## Stream parsing mode
|
||||
|
||||
By default `vmagent` reads the full response from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics. Stream parsing mode may be enabled either globally for all of the scrape targets by passing `-promscrape.streamParse` command-line flag or on a per-scrape target basis with `stream_parse: true` option. For example:
|
||||
By default `vmagent` reads the full response from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics. Stream parsing mode may be enabled in the following places:
|
||||
|
||||
```yml
|
||||
scrape_configs:
|
||||
- job_name: 'big-federate'
|
||||
stream_parse: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- big-prometeus1
|
||||
- big-prometeus2
|
||||
honor_labels: true
|
||||
metrics_path: /federate
|
||||
params:
|
||||
'match[]': ['{__name__!=""}']
|
||||
```
|
||||
- Via `-promscrape.streamParse` command-line flag. In this case all the scrape targets defined in the file pointed by `-promscrape.config` are scraped in stream parsing mode.
|
||||
- Via `stream_parse: true` option at `scrape_configs` section. In this case all the scrape targets defined in this section are scraped in stream parsing mode.
|
||||
- Via `__stream_parse__=true` label, which can be set via [relabeling](#relabeling) at `relabel_configs` section. In this case stream parsing mode is enabled for the corresponding scrape targets. Typical use case: to set the label via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets exposing big number of metrics.
|
||||
|
||||
Examples:
|
||||
|
||||
```yml
|
||||
scrape_configs:
|
||||
- job_name: 'big-federate'
|
||||
stream_parse: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- big-prometeus1
|
||||
- big-prometeus2
|
||||
honor_labels: true
|
||||
metrics_path: /federate
|
||||
params:
|
||||
'match[]': ['{__name__!=""}']
|
||||
```
|
||||
|
||||
Note that `sample_limit` option doesn't prevent from data push to remote storage if stream parsing is enabled because the parsed data is pushed to remote storage as soon as it is parsed.
|
||||
|
||||
|
@ -368,7 +399,13 @@ scrape_configs:
|
|||
|
||||
## Cardinality limiter
|
||||
|
||||
By default `vmagent` doesn't limit the number of time series each scrape target can expose. The limit can be enforced across all the scrape targets by specifying `-promscrape.seriesLimitPerTarget` command-line option. The limit also can be specified via `series_limit` option at `scrape_config` section. All the scraped metrics are dropped for time series exceeding the given limit. The exceeded limit can be [monitored](#monitoring) via `promscrape_series_limit_rows_dropped_total` metric, which shows the number of metrics dropped due to the exceeded limit.
|
||||
By default `vmagent` doesn't limit the number of time series each scrape target can expose. The limit can be enforced in the following places:
|
||||
|
||||
- Via `-promscrape.seriesLimitPerTarget` command-line option. This limit is applied individually to all the scrape targets defined in the file pointed by `-promscrape.config`.
|
||||
- Via `series_limit` config option at `scrape_config` section. This limit is applied individually to all the scrape targets defined in the given `scrape_config`.
|
||||
- Via `__series_limit__` label, which can be set with [relabeling](#relabeling) at `relabel_configs` section. This limit is applied to the corresponding scrape targets. Typical use case: to set the limit via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets, which may expose too high number of time series.
|
||||
|
||||
All the scraped metrics are dropped for time series exceeding the given limit. The exceeded limit can be [monitored](#monitoring) via `promscrape_series_limit_rows_dropped_total` metric.
|
||||
|
||||
See also `sample_limit` option at [scrape_config section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
||||
|
||||
|
@ -440,7 +477,7 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
|
|||
|
||||
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses. The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
|
||||
|
||||
* Use `-remoteWrite.queues=1` when `-remoteWrite.url` points to remote storage, which doesn't accept out-of-order samples (aka data backfilling). Such storage systems include Prometheus, Cortex and Thanos.
|
||||
* Use `-remoteWrite.queues=1` when `-remoteWrite.url` points to remote storage, which doesn't accept out-of-order samples (aka data backfilling). Such storage systems include Prometheus, Cortex and Thanos, which typically emit `out of order sample` errors. The best solution is to use remote storage with [backfilling support](https://docs.victoriametrics.com/#backfilling).
|
||||
|
||||
* `vmagent` buffers scraped data at the `-remoteWrite.tmpDataPath` directory until it is sent to `-remoteWrite.url`.
|
||||
The directory can grow large when remote storage is unavailable for extended periods of time and if `-remoteWrite.maxDiskUsagePerURL` isn't set.
|
||||
|
@ -608,18 +645,18 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-influx.maxLineSize size
|
||||
The maximum size in bytes for a single Influx line during parsing
|
||||
The maximum size in bytes for a single InfluxDB line during parsing
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144)
|
||||
-influxListenAddr string
|
||||
TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<vmagent>:8429/write
|
||||
TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<vmagent>:8429/write
|
||||
-influxMeasurementFieldSeparator string
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_")
|
||||
Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol (default "_")
|
||||
-influxSkipMeasurement
|
||||
Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'
|
||||
-influxSkipSingleField
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field
|
||||
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
|
||||
-influxTrimTimestamp duration
|
||||
Trim timestamps for Influx line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-insert.maxQueueDuration duration
|
||||
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
|
||||
-loggerDisableTimestamps
|
||||
|
|
|
@ -28,7 +28,6 @@ may fail;
|
|||
* by default, rules execution is sequential within one group, but persisting of execution results to remote
|
||||
storage is asynchronous. Hence, user shouldn't rely on recording rules chaining when result of previous
|
||||
recording rule is reused in next one;
|
||||
* `vmalert` has no UI, just an API for getting groups and rules statuses.
|
||||
|
||||
## QuickStart
|
||||
|
||||
|
@ -237,7 +236,7 @@ groups:
|
|||
|
||||
If `-clusterMode` is enabled, then `-datasource.url`, `-remoteRead.url` and `-remoteWrite.url` must
|
||||
contain only the hostname without tenant id. For example: `-datasource.url=http://vmselect:8481`.
|
||||
`vmselect` automatically adds the specified tenant to urls per each recording rule in this case.
|
||||
`vmalert` automatically adds the specified tenant to urls per each recording rule in this case.
|
||||
|
||||
The enterprise version of vmalert is available in `vmutils-*-enterprise.tar.gz` files
|
||||
at [release page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) and in `*-enterprise`
|
||||
|
@ -247,6 +246,7 @@ tags at [Docker Hub](https://hub.docker.com/r/victoriametrics/vmalert/tags).
|
|||
### WEB
|
||||
|
||||
`vmalert` runs a web-server (`-httpListenAddr`) for serving metrics and alerts endpoints:
|
||||
* `http://<vmalert-addr>` - UI;
|
||||
* `http://<vmalert-addr>/api/v1/groups` - list of all loaded groups and rules;
|
||||
* `http://<vmalert-addr>/api/v1/alerts` - list of all active alerts;
|
||||
* `http://<vmalert-addr>/api/v1/<groupID>/<alertID>/status" ` - get alert status by ID.
|
||||
|
@ -375,8 +375,14 @@ The shortlist of configuration flags is the following:
|
|||
Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.
|
||||
-datasource.basicAuth.password string
|
||||
Optional basic auth password for -datasource.url
|
||||
-datasource.basicAuth.passwordFile string
|
||||
Optional path to basic auth password to use for -datasource.url
|
||||
-datasource.basicAuth.username string
|
||||
Optional basic auth username for -datasource.url
|
||||
-datasource.bearerToken string
|
||||
Optional bearer auth token to use for -datasource.url.
|
||||
-datasource.bearerTokenFile string
|
||||
Optional path to bearer token file to use for -datasource.url.
|
||||
-datasource.lookback duration
|
||||
Lookback defines how far into the past to look when evaluating queries. For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.
|
||||
-datasource.maxIdleConnections int
|
||||
|
@ -486,8 +492,14 @@ The shortlist of configuration flags is the following:
|
|||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
-remoteRead.basicAuth.password string
|
||||
Optional basic auth password for -remoteRead.url
|
||||
-remoteRead.basicAuth.passwordFile string
|
||||
Optional path to basic auth password to use for -remoteRead.url
|
||||
-remoteRead.basicAuth.username string
|
||||
Optional basic auth username for -remoteRead.url
|
||||
-remoteRead.bearerToken string
|
||||
Optional bearer auth token to use for -remoteRead.url.
|
||||
-remoteRead.bearerTokenFile string
|
||||
Optional path to bearer token file to use for -remoteRead.url.
|
||||
-remoteRead.ignoreRestoreErrors
|
||||
Whether to ignore errors from remote storage when restoring alerts state on startup. (default true)
|
||||
-remoteRead.lookback duration
|
||||
|
@ -506,8 +518,14 @@ The shortlist of configuration flags is the following:
|
|||
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428
|
||||
-remoteWrite.basicAuth.password string
|
||||
Optional basic auth password for -remoteWrite.url
|
||||
-remoteWrite.basicAuth.passwordFile string
|
||||
Optional path to basic auth password to use for -remoteWrite.url
|
||||
-remoteWrite.basicAuth.username string
|
||||
Optional basic auth username for -remoteWrite.url
|
||||
-remoteWrite.bearerToken string
|
||||
Optional bearer auth token to use for -remoteWrite.url.
|
||||
-remoteWrite.bearerTokenFile string
|
||||
Optional path to bearer token file to use for -remoteWrite.url.
|
||||
-remoteWrite.concurrency int
|
||||
Defines number of writers for concurrent writing into remote querier (default 1)
|
||||
-remoteWrite.disablePathAppend
|
||||
|
@ -551,6 +569,8 @@ The shortlist of configuration flags is the following:
|
|||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-rule.configCheckInterval duration
|
||||
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
|
||||
-rule.maxResolveDuration duration
|
||||
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
|
||||
-rule.validateExpressions
|
||||
Whether to validate rules expressions via MetricsQL engine (default true)
|
||||
-rule.validateTemplates
|
||||
|
|
|
@ -234,6 +234,8 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password
|
||||
-httpListenAddr string
|
||||
TCP address to listen for http connections (default ":8427")
|
||||
-logInvalidAuthTokens
|
||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmagent_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||
-loggerDisableTimestamps
|
||||
Whether to disable writing timestamps in logs
|
||||
-loggerErrorsPerSecondLimit int
|
||||
|
|
|
@ -23,7 +23,7 @@ Backed up data can be restored with [vmrestore](https://docs.victoriametrics.com
|
|||
|
||||
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
|
||||
See also [vmbackupmanager](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) tool built on top of `vmbackup`. This tool simplifies
|
||||
See also [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool built on top of `vmbackup`. This tool simplifies
|
||||
creation of hourly, daily, weekly and monthly backups.
|
||||
|
||||
|
||||
|
@ -93,7 +93,7 @@ or from any day (`YYYYMMDD` backups). Note that hourly backup shouldn't run when
|
|||
|
||||
Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs.
|
||||
|
||||
See also [vmbackupmanager tool](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for automating smart backups.
|
||||
See also [vmbackupmanager tool](https://docs.victoriametrics.com/vmbackupmanager.html) for automating smart backups.
|
||||
|
||||
|
||||
## How does it work?
|
||||
|
|
|
@ -38,8 +38,8 @@ to the data source and common list of flags for destination (prefixed with `vm`
|
|||
```
|
||||
./vmctl influx --help
|
||||
OPTIONS:
|
||||
--influx-addr value Influx server addr (default: "http://localhost:8086")
|
||||
--influx-user value Influx user [$INFLUX_USERNAME]
|
||||
--influx-addr value InfluxDB server addr (default: "http://localhost:8086")
|
||||
--influx-user value InfluxDB user [$INFLUX_USERNAME]
|
||||
...
|
||||
--vm-addr vmctl VictoriaMetrics address to perform import requests.
|
||||
Should be the same as --httpListenAddr value for single-node version or vminsert component.
|
||||
|
@ -220,16 +220,16 @@ Found 40000 timeseries to import. Continue? [Y/n] y
|
|||
|
||||
### Data mapping
|
||||
|
||||
Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules:
|
||||
Vmctl maps InfluxDB data the same way as VictoriaMetrics does by using the following rules:
|
||||
|
||||
* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line.
|
||||
* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the InfluxDB line.
|
||||
* Field names are mapped to time series names prefixed with {measurement}{separator} value,
|
||||
where {separator} equals to _ by default.
|
||||
It can be changed with `--influx-measurement-field-separator` command-line flag.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels format as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
For example, the following InfluxDB line:
|
||||
```
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
```
|
||||
|
@ -298,7 +298,7 @@ if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The ex
|
|||
Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process.
|
||||
|
||||
The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one
|
||||
accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage,
|
||||
accumulating timeseries and samples. Please note, that `vmctl` relies on responses from InfluxDB on this stage,
|
||||
so ensure that Explore queries are executed without errors or limits. Please see this
|
||||
[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details.
|
||||
The data processed in chunks and then sent to VM.
|
||||
|
@ -488,7 +488,7 @@ See more details for cluster version [here](https://github.com/VictoriaMetrics/V
|
|||
|
||||
## Tuning
|
||||
|
||||
### Influx mode
|
||||
### InfluxDB mode
|
||||
|
||||
The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching
|
||||
timeseries. Please set it wisely to avoid InfluxDB overwhelming.
|
||||
|
|
22
go.mod
22
go.mod
|
@ -1,16 +1,17 @@
|
|||
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.94.1 // indirect
|
||||
cloud.google.com/go/storage v1.16.1
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0
|
||||
github.com/VictoriaMetrics/fastcache v1.7.0
|
||||
|
||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.0.16
|
||||
github.com/VictoriaMetrics/metrics v1.17.3
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.0
|
||||
github.com/VictoriaMetrics/metricsql v0.21.0
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.40.34
|
||||
github.com/aws/aws-sdk-go v1.40.41
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cheggaaa/pb/v3 v3.0.8
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
|
@ -21,7 +22,7 @@ require (
|
|||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.9.3
|
||||
github.com/klauspost/compress v1.13.5
|
||||
github.com/mattn/go-isatty v0.0.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/prometheus/common v0.30.0 // indirect
|
||||
|
@ -29,17 +30,18 @@ require (
|
|||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/valyala/fastrand v1.0.0
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/gozstd v1.12.0
|
||||
github.com/valyala/histogram v1.1.2
|
||||
github.com/valyala/quicktemplate v1.6.3
|
||||
github.com/valyala/histogram v1.2.0
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a
|
||||
golang.org/x/net v0.0.0-20210908191846-a5e095526f91
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e
|
||||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/api v0.56.0
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
|
|
61
go.sum
61
go.sum
|
@ -24,8 +24,9 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV
|
|||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1 h1:DwuSvDZ1pTYGbXo8yOJevCTr3BoBlE+OVkHAKiYQUXc=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -101,13 +102,13 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
|
|||
github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
|
||||
github.com/VictoriaMetrics/fasthttp v1.0.16 h1:rLonAIxZWUq0V4a1yJeuIeHZ/zSneR+gWrXF3A1xTqE=
|
||||
github.com/VictoriaMetrics/fasthttp v1.0.16/go.mod h1:s9o5H4T58Kt4CTrdyJp4RorBKCwY7gRVS3N2JAUJ9jw=
|
||||
github.com/VictoriaMetrics/fastcache v1.7.0 h1:E6GibaGI685TafrI7E/QqZPkMsOzRw+3gpICQx08ISg=
|
||||
github.com/VictoriaMetrics/fastcache v1.7.0/go.mod h1:n7Sl+ioh/HlWeYHLSIBIE8TcZFHg/+xgvomWSS5xuEE=
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0=
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
|
||||
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metrics v1.17.3 h1:QPUakR6JRy8BhL2C2kOgYKLuoPDwtJQ+7iKIZSjt1A4=
|
||||
github.com/VictoriaMetrics/metrics v1.17.3/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metrics v1.18.0 h1:vov5NxDHRSXFbdiH4dYLYEjKLoAXXSQ7hcnG8TSD9JQ=
|
||||
github.com/VictoriaMetrics/metrics v1.18.0/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metricsql v0.21.0 h1:wA/IVfRFQaThy4bM1kAmPiCR0BkWv4tEXD9lBF+GPdU=
|
||||
github.com/VictoriaMetrics/metricsql v0.21.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
|
@ -125,7 +126,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
|||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
|
||||
|
@ -151,8 +153,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
|||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.34 h1:SBYmodndE2d4AYucuuJnOXk4MD1SFbucoIdpwKVKeSA=
|
||||
github.com/aws/aws-sdk-go v1.40.34/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.40.41 h1:v/Y4bB8+wHCONtKV+fuHTzLiqC08lk8e9HqYhRB9PBQ=
|
||||
github.com/aws/aws-sdk-go v1.40.41/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM=
|
||||
|
@ -621,10 +623,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
|||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
|
@ -675,8 +675,8 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y
|
|||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
|
||||
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
|
@ -917,21 +917,23 @@ github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
|||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA=
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc=
|
||||
github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
|
||||
github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||
github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8=
|
||||
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/gozstd v1.12.0 h1:CVG/hZKv3VqgiesiDrFrkgTIwDr5+9yaRaFDgMso5lI=
|
||||
github.com/valyala/gozstd v1.12.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||
github.com/valyala/histogram v1.1.2 h1:vOk5VrGjMBIoPR5k6wA8vBaC8toeJ8XO0yfRjFEc1h8=
|
||||
github.com/valyala/histogram v1.1.2/go.mod h1:CZAr6gK9dbD7hYx2s8WSPh0p5x5wETjC+2b3PJVtEdg=
|
||||
github.com/valyala/quicktemplate v1.6.3 h1:O7EuMwuH7Q94U2CXD6sOX8AYHqQqWtmIk690IhmpkKA=
|
||||
github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
|
||||
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
|
||||
github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM=
|
||||
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
|
@ -1016,6 +1018,7 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -1109,10 +1112,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210908191846-a5e095526f91 h1:E8wdt+zBjoxD3MA65wEc3pl25BsTi7tbkpwc4ANThjc=
|
||||
golang.org/x/net v0.0.0-20210908191846-a5e095526f91/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1198,7 +1202,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1216,7 +1219,6 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1228,8 +1230,8 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 h1:xrCZDmdtoloIiooiA9q0OQb9r8HejIHYoHGhGCe1pGg=
|
||||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1372,6 +1374,7 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk
|
|||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
|
||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
|
@ -1440,8 +1443,10 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr
|
|||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af h1:aLMMXFYqw01RA6XJim5uaN+afqNNjc9P8HPAbnpnc5s=
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
|
|
@ -256,7 +256,7 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
|||
http.Error(w, errMsg, http.StatusServiceUnavailable)
|
||||
return
|
||||
case "/ping":
|
||||
// This is needed for compatibility with Influx agents.
|
||||
// This is needed for compatibility with InfluxDB agents.
|
||||
// See https://docs.influxdata.com/influxdb/v1.7/tools/api/#ping-http-endpoint
|
||||
status := http.StatusNoContent
|
||||
if verbose := r.FormValue("verbose"); verbose == "true" {
|
||||
|
|
|
@ -24,7 +24,7 @@ var (
|
|||
writeErrorsUDP = metrics.NewCounter(`vm_ingestserver_request_errors_total{type="influx", name="write", net="udp"}`)
|
||||
)
|
||||
|
||||
// Server accepts Influx line protocol over TCP and UDP.
|
||||
// Server accepts InfluxDB line protocol over TCP and UDP.
|
||||
type Server struct {
|
||||
addr string
|
||||
lnTCP net.Listener
|
||||
|
@ -33,22 +33,22 @@ type Server struct {
|
|||
cm ingestserver.ConnsMap
|
||||
}
|
||||
|
||||
// MustStart starts Influx server on the given addr.
|
||||
// MustStart starts InfluxDB server on the given addr.
|
||||
//
|
||||
// The incoming connections are processed with insertHandler.
|
||||
//
|
||||
// MustStop must be called on the returned server when it is no longer needed.
|
||||
func MustStart(addr string, insertHandler func(r io.Reader) error) *Server {
|
||||
logger.Infof("starting TCP Influx server at %q", addr)
|
||||
logger.Infof("starting TCP InfluxDB server at %q", addr)
|
||||
lnTCP, err := netutil.NewTCPListener("influx", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start TCP Influx server at %q: %s", addr, err)
|
||||
logger.Fatalf("cannot start TCP InfluxDB server at %q: %s", addr, err)
|
||||
}
|
||||
|
||||
logger.Infof("starting UDP Influx server at %q", addr)
|
||||
logger.Infof("starting UDP InfluxDB server at %q", addr)
|
||||
lnUDP, err := net.ListenPacket(netutil.GetUDPNetwork(), addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP Influx server at %q: %s", addr, err)
|
||||
logger.Fatalf("cannot start UDP InfluxDB server at %q: %s", addr, err)
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
|
@ -61,30 +61,30 @@ func MustStart(addr string, insertHandler func(r io.Reader) error) *Server {
|
|||
go func() {
|
||||
defer s.wg.Done()
|
||||
s.serveTCP(insertHandler)
|
||||
logger.Infof("stopped TCP Influx server at %q", addr)
|
||||
logger.Infof("stopped TCP InfluxDB server at %q", addr)
|
||||
}()
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
s.serveUDP(insertHandler)
|
||||
logger.Infof("stopped UDP Influx server at %q", addr)
|
||||
logger.Infof("stopped UDP InfluxDB server at %q", addr)
|
||||
}()
|
||||
return s
|
||||
}
|
||||
|
||||
// MustStop stops the server.
|
||||
func (s *Server) MustStop() {
|
||||
logger.Infof("stopping TCP Influx server at %q...", s.addr)
|
||||
logger.Infof("stopping TCP InfluxDB server at %q...", s.addr)
|
||||
if err := s.lnTCP.Close(); err != nil {
|
||||
logger.Errorf("cannot close TCP Influx server: %s", err)
|
||||
logger.Errorf("cannot close TCP InfluxDB server: %s", err)
|
||||
}
|
||||
logger.Infof("stopping UDP Influx server at %q...", s.addr)
|
||||
logger.Infof("stopping UDP InfluxDB server at %q...", s.addr)
|
||||
if err := s.lnUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot close UDP Influx server: %s", err)
|
||||
logger.Errorf("cannot close UDP InfluxDB server: %s", err)
|
||||
}
|
||||
s.cm.CloseAll()
|
||||
s.wg.Wait()
|
||||
logger.Infof("TCP and UDP Influx servers at %q have been stopped", s.addr)
|
||||
logger.Infof("TCP and UDP InfluxDB servers at %q have been stopped", s.addr)
|
||||
}
|
||||
|
||||
func (s *Server) serveTCP(insertHandler func(r io.Reader) error) {
|
||||
|
@ -102,9 +102,9 @@ func (s *Server) serveTCP(insertHandler func(r io.Reader) error) {
|
|||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
logger.Fatalf("unrecoverable error when accepting TCP Influx connections: %s", err)
|
||||
logger.Fatalf("unrecoverable error when accepting TCP InfluxDB connections: %s", err)
|
||||
}
|
||||
logger.Fatalf("unexpected error when accepting TCP Influx connections: %s", err)
|
||||
logger.Fatalf("unexpected error when accepting TCP InfluxDB connections: %s", err)
|
||||
}
|
||||
if !s.cm.Add(c) {
|
||||
_ = c.Close()
|
||||
|
@ -120,7 +120,7 @@ func (s *Server) serveTCP(insertHandler func(r io.Reader) error) {
|
|||
writeRequestsTCP.Inc()
|
||||
if err := insertHandler(c); err != nil {
|
||||
writeErrorsTCP.Inc()
|
||||
logger.Errorf("error in TCP Influx conn %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
||||
logger.Errorf("error in TCP InfluxDB conn %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -153,14 +153,14 @@ func (s *Server) serveUDP(insertHandler func(r io.Reader) error) {
|
|||
break
|
||||
}
|
||||
}
|
||||
logger.Errorf("cannot read Influx UDP data: %s", err)
|
||||
logger.Errorf("cannot read InfluxDB UDP data: %s", err)
|
||||
continue
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
writeRequestsUDP.Inc()
|
||||
if err := insertHandler(bb.NewReader()); err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
logger.Errorf("error in UDP Influx conn %q<->%q: %s", s.lnUDP.LocalAddr(), addr, err)
|
||||
logger.Errorf("error in UDP InfluxDB conn %q<->%q: %s", s.lnUDP.LocalAddr(), addr, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,13 +14,61 @@ import (
|
|||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
|
||||
type RelabelConfig struct {
|
||||
SourceLabels []string `yaml:"source_labels,flow,omitempty"`
|
||||
Separator *string `yaml:"separator,omitempty"`
|
||||
TargetLabel string `yaml:"target_label,omitempty"`
|
||||
Regex *string `yaml:"regex,omitempty"`
|
||||
Modulus uint64 `yaml:"modulus,omitempty"`
|
||||
Replacement *string `yaml:"replacement,omitempty"`
|
||||
Action string `yaml:"action,omitempty"`
|
||||
SourceLabels []string `yaml:"source_labels,flow,omitempty"`
|
||||
Separator *string `yaml:"separator,omitempty"`
|
||||
TargetLabel string `yaml:"target_label,omitempty"`
|
||||
Regex *MultiLineRegex `yaml:"regex,omitempty"`
|
||||
Modulus uint64 `yaml:"modulus,omitempty"`
|
||||
Replacement *string `yaml:"replacement,omitempty"`
|
||||
Action string `yaml:"action,omitempty"`
|
||||
}
|
||||
|
||||
// MultiLineRegex contains a regex, which can be split into multiple lines.
|
||||
//
|
||||
// These lines are joined with "|" then.
|
||||
// For example:
|
||||
//
|
||||
// regex:
|
||||
// - foo
|
||||
// - bar
|
||||
//
|
||||
// is equivalent to:
|
||||
//
|
||||
// regex: "foo|bar"
|
||||
type MultiLineRegex struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshals mlr from YAML passed to f.
|
||||
func (mlr *MultiLineRegex) UnmarshalYAML(f func(interface{}) error) error {
|
||||
var v interface{}
|
||||
if err := f(&v); err != nil {
|
||||
return err
|
||||
}
|
||||
var a []string
|
||||
switch x := v.(type) {
|
||||
case string:
|
||||
a = []string{x}
|
||||
case []interface{}:
|
||||
a = make([]string, len(x))
|
||||
for i, xx := range x {
|
||||
s, ok := xx.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("`regex` must contain array of strings; got %T", xx)
|
||||
}
|
||||
a[i] = s
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unexpected type for `regex`: %T; want string or []string", v)
|
||||
}
|
||||
mlr.s = strings.Join(a, "|")
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML marshals mlr to YAML.
|
||||
func (mlr *MultiLineRegex) MarshalYAML() (interface{}, error) {
|
||||
a := strings.Split(mlr.s, "|")
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// ParsedConfigs represents parsed relabel configs.
|
||||
|
@ -107,18 +155,19 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
|
|||
regexCompiled := defaultRegexForRelabelConfig
|
||||
regexOriginalCompiled := defaultOriginalRegexForRelabelConfig
|
||||
if rc.Regex != nil {
|
||||
regex := *rc.Regex
|
||||
regex := rc.Regex.s
|
||||
regexOrig := regex
|
||||
if rc.Action != "replace_all" && rc.Action != "labelmap_all" {
|
||||
regex = "^(?:" + *rc.Regex + ")$"
|
||||
regex = "^(?:" + regex + ")$"
|
||||
}
|
||||
re, err := regexp.Compile(regex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `regex` %q: %w", regex, err)
|
||||
}
|
||||
regexCompiled = re
|
||||
reOriginal, err := regexp.Compile(*rc.Regex)
|
||||
reOriginal, err := regexp.Compile(regexOrig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `regex` %q: %w", *rc.Regex, err)
|
||||
return nil, fmt.Errorf("cannot parse `regex` %q: %w", regexOrig, err)
|
||||
}
|
||||
regexOriginalCompiled = reOriginal
|
||||
}
|
||||
|
@ -169,6 +218,24 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
|
|||
if modulus < 1 {
|
||||
return nil, fmt.Errorf("unexpected `modulus` for `action=hashmod`: %d; must be greater than 0", modulus)
|
||||
}
|
||||
case "keep_metrics":
|
||||
if rc.Regex == nil || rc.Regex.s == "" {
|
||||
return nil, fmt.Errorf("`regex` must be non-empty for `action=keep_metrics`")
|
||||
}
|
||||
if len(sourceLabels) > 0 {
|
||||
return nil, fmt.Errorf("`source_labels` must be empty for `action=keep_metrics`; got %q", sourceLabels)
|
||||
}
|
||||
sourceLabels = []string{"__name__"}
|
||||
action = "keep"
|
||||
case "drop_metrics":
|
||||
if rc.Regex == nil || rc.Regex.s == "" {
|
||||
return nil, fmt.Errorf("`regex` must be non-empty for `action=drop_metrics`")
|
||||
}
|
||||
if len(sourceLabels) > 0 {
|
||||
return nil, fmt.Errorf("`source_labels` must be empty for `action=drop_metrics`; got %q", sourceLabels)
|
||||
}
|
||||
sourceLabels = []string{"__name__"}
|
||||
action = "drop"
|
||||
case "labelmap":
|
||||
case "labelmap_all":
|
||||
case "labeldrop":
|
||||
|
|
|
@ -3,16 +3,46 @@ package promrelabel
|
|||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestRelabelConfigMarshalUnmarshal(t *testing.T) {
|
||||
f := func(data, resultExpected string) {
|
||||
t.Helper()
|
||||
var rcs []RelabelConfig
|
||||
if err := yaml.UnmarshalStrict([]byte(data), &rcs); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", data, err)
|
||||
}
|
||||
result, err := yaml.Marshal(&rcs)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot marshal %q: %s", data, err)
|
||||
}
|
||||
if string(result) != resultExpected {
|
||||
t.Fatalf("unexpected marshaled data; got\n%q\nwant\n%q", result, resultExpected)
|
||||
}
|
||||
}
|
||||
f(``, "[]\n")
|
||||
f(`
|
||||
- action: keep
|
||||
regex: foobar
|
||||
`, "- regex:\n - foobar\n action: keep\n")
|
||||
f(`
|
||||
- regex:
|
||||
- 'fo.+'
|
||||
- '.*ba[r-z]a'
|
||||
`, "- regex:\n - fo.+\n - .*ba[r-z]a\n")
|
||||
f(`- regex: foo|bar`, "- regex:\n - foo\n - bar\n")
|
||||
}
|
||||
|
||||
func TestLoadRelabelConfigsSuccess(t *testing.T) {
|
||||
path := "testdata/relabel_configs_valid.yml"
|
||||
pcs, err := LoadRelabelConfigs(path, false)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load relabel configs from %q: %s", path, err)
|
||||
}
|
||||
if n := pcs.Len(); n != 9 {
|
||||
t.Fatalf("unexpected number of relabel configs loaded from %q; got %d; want %d", path, n, 9)
|
||||
if n := pcs.Len(); n != 12 {
|
||||
t.Fatalf("unexpected number of relabel configs loaded from %q; got %d; want %d", path, n, 12)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,7 +115,9 @@ func TestParseRelabelConfigsFailure(t *testing.T) {
|
|||
{
|
||||
SourceLabels: []string{"aaa"},
|
||||
TargetLabel: "xxx",
|
||||
Regex: strPtr("foo[bar"),
|
||||
Regex: &MultiLineRegex{
|
||||
s: "foo[bar",
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
@ -191,8 +223,40 @@ func TestParseRelabelConfigsFailure(t *testing.T) {
|
|||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func strPtr(s string) *string {
|
||||
return &s
|
||||
t.Run("drop_metrics-missing-regex", func(t *testing.T) {
|
||||
f([]RelabelConfig{
|
||||
{
|
||||
Action: "drop_metrics",
|
||||
},
|
||||
})
|
||||
})
|
||||
t.Run("drop_metrics-non-empty-source-labels", func(t *testing.T) {
|
||||
f([]RelabelConfig{
|
||||
{
|
||||
Action: "drop_metrics",
|
||||
SourceLabels: []string{"foo"},
|
||||
Regex: &MultiLineRegex{
|
||||
s: "bar",
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
t.Run("keep_metrics-missing-regex", func(t *testing.T) {
|
||||
f([]RelabelConfig{
|
||||
{
|
||||
Action: "keep_metrics",
|
||||
},
|
||||
})
|
||||
})
|
||||
t.Run("keep_metrics-non-empty-source-labels", func(t *testing.T) {
|
||||
f([]RelabelConfig{
|
||||
{
|
||||
Action: "keep_metrics",
|
||||
SourceLabels: []string{"foo"},
|
||||
Regex: &MultiLineRegex{
|
||||
s: "bar",
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -564,6 +564,37 @@ func TestApplyRelabelConfigs(t *testing.T) {
|
|||
},
|
||||
})
|
||||
})
|
||||
t.Run("keep_metrics-miss", func(t *testing.T) {
|
||||
f(`
|
||||
- action: keep_metrics
|
||||
regex:
|
||||
- foo
|
||||
- bar
|
||||
`, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "xxx",
|
||||
},
|
||||
}, true, []prompbmarshal.Label{})
|
||||
})
|
||||
t.Run("keep_metrics-hit", func(t *testing.T) {
|
||||
f(`
|
||||
- action: keep_metrics
|
||||
regex:
|
||||
- foo
|
||||
- bar
|
||||
`, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foo",
|
||||
},
|
||||
}, true, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foo",
|
||||
},
|
||||
})
|
||||
})
|
||||
t.Run("drop-miss", func(t *testing.T) {
|
||||
f(`
|
||||
- action: drop
|
||||
|
@ -610,6 +641,37 @@ func TestApplyRelabelConfigs(t *testing.T) {
|
|||
},
|
||||
}, true, []prompbmarshal.Label{})
|
||||
})
|
||||
t.Run("drop_metrics-miss", func(t *testing.T) {
|
||||
f(`
|
||||
- action: drop_metrics
|
||||
regex:
|
||||
- foo
|
||||
- bar
|
||||
`, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "xxx",
|
||||
},
|
||||
}, true, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "xxx",
|
||||
},
|
||||
})
|
||||
})
|
||||
t.Run("drop_metrics-hit", func(t *testing.T) {
|
||||
f(`
|
||||
- action: drop_metrics
|
||||
regex:
|
||||
- foo
|
||||
- bar
|
||||
`, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foo",
|
||||
},
|
||||
}, true, []prompbmarshal.Label{})
|
||||
})
|
||||
t.Run("hashmod-miss", func(t *testing.T) {
|
||||
f(`
|
||||
- action: hashmod
|
||||
|
|
|
@ -22,3 +22,13 @@
|
|||
source_labels: [foo, bar]
|
||||
- action: drop_if_equal
|
||||
source_labels: [foo, bar]
|
||||
- action: keep
|
||||
source_labels: [__name__]
|
||||
regex:
|
||||
- 'fo.*o'
|
||||
- 'bar'
|
||||
- 'baz.+'
|
||||
- action: keep_metrics
|
||||
regex: [foo bar baz]
|
||||
- action: drop_metrics
|
||||
regex: "foo|bar|baz"
|
||||
|
|
|
@ -155,7 +155,7 @@ func newClient(sw *ScrapeWork) *client {
|
|||
}
|
||||
|
||||
func (c *client) GetStreamReader() (*streamReader, error) {
|
||||
deadline := time.Now().Add(c.hc.ReadTimeout)
|
||||
deadline := time.Now().Add(c.sc.Timeout)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), deadline)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", c.scrapeURL, nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"net/url"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -985,7 +986,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
|||
}
|
||||
}
|
||||
|
||||
labels := mergeLabels(swc.jobName, swc.scheme, target, swc.metricsPath, extraLabels, swc.externalLabels, metaLabels, swc.params)
|
||||
labels := mergeLabels(swc, target, extraLabels, metaLabels)
|
||||
var originalLabels []prompbmarshal.Label
|
||||
if !*dropOriginalLabels {
|
||||
originalLabels = append([]prompbmarshal.Label{}, labels...)
|
||||
|
@ -1048,12 +1049,49 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
|||
})
|
||||
promrelabel.SortLabels(labels)
|
||||
}
|
||||
// Read __scrape_interval__ and __scrape_timeout__ from labels.
|
||||
scrapeInterval := swc.scrapeInterval
|
||||
if s := promrelabel.GetLabelValueByName(labels, "__scrape_interval__"); len(s) > 0 {
|
||||
d, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse __scrape_interval__=%q: %w", s, err)
|
||||
}
|
||||
scrapeInterval = d
|
||||
}
|
||||
scrapeTimeout := swc.scrapeTimeout
|
||||
if s := promrelabel.GetLabelValueByName(labels, "__scrape_timeout__"); len(s) > 0 {
|
||||
d, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse __scrape_timeout__=%q: %w", s, err)
|
||||
}
|
||||
scrapeTimeout = d
|
||||
}
|
||||
// Read series_limit option from __series_limit__ label.
|
||||
// See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
seriesLimit := swc.seriesLimit
|
||||
if s := promrelabel.GetLabelValueByName(labels, "__series_limit__"); len(s) > 0 {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse __series_limit__=%q: %w", s, err)
|
||||
}
|
||||
seriesLimit = n
|
||||
}
|
||||
// Read stream_parse option from __stream_parse__ label.
|
||||
// See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
|
||||
streamParse := swc.streamParse
|
||||
if s := promrelabel.GetLabelValueByName(labels, "__stream_parse__"); len(s) > 0 {
|
||||
b, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse __stream_parse__=%q: %w", s, err)
|
||||
}
|
||||
streamParse = b
|
||||
}
|
||||
// Reduce memory usage by interning all the strings in labels.
|
||||
internLabelStrings(labels)
|
||||
sw := &ScrapeWork{
|
||||
ScrapeURL: scrapeURL,
|
||||
ScrapeInterval: swc.scrapeInterval,
|
||||
ScrapeTimeout: swc.scrapeTimeout,
|
||||
ScrapeInterval: scrapeInterval,
|
||||
ScrapeTimeout: scrapeTimeout,
|
||||
HonorLabels: swc.honorLabels,
|
||||
HonorTimestamps: swc.honorTimestamps,
|
||||
DenyRedirects: swc.denyRedirects,
|
||||
|
@ -1066,10 +1104,10 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
|||
SampleLimit: swc.sampleLimit,
|
||||
DisableCompression: swc.disableCompression,
|
||||
DisableKeepAlive: swc.disableKeepAlive,
|
||||
StreamParse: swc.streamParse,
|
||||
StreamParse: streamParse,
|
||||
ScrapeAlignInterval: swc.scrapeAlignInterval,
|
||||
ScrapeOffset: swc.scrapeOffset,
|
||||
SeriesLimit: swc.seriesLimit,
|
||||
SeriesLimit: seriesLimit,
|
||||
|
||||
jobNameOriginal: swc.jobName,
|
||||
}
|
||||
|
@ -1123,17 +1161,19 @@ func getParamsFromLabels(labels []prompbmarshal.Label, paramsOrig map[string][]s
|
|||
return m
|
||||
}
|
||||
|
||||
func mergeLabels(job, scheme, target, metricsPath string, extraLabels, externalLabels, metaLabels map[string]string, params map[string][]string) []prompbmarshal.Label {
|
||||
func mergeLabels(swc *scrapeWorkConfig, target string, extraLabels, metaLabels map[string]string) []prompbmarshal.Label {
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
|
||||
m := make(map[string]string, 4+len(externalLabels)+len(params)+len(extraLabels)+len(metaLabels))
|
||||
for k, v := range externalLabels {
|
||||
m := make(map[string]string, 4+len(swc.externalLabels)+len(swc.params)+len(extraLabels)+len(metaLabels))
|
||||
for k, v := range swc.externalLabels {
|
||||
m[k] = v
|
||||
}
|
||||
m["job"] = job
|
||||
m["job"] = swc.jobName
|
||||
m["__address__"] = target
|
||||
m["__scheme__"] = scheme
|
||||
m["__metrics_path__"] = metricsPath
|
||||
for k, args := range params {
|
||||
m["__scheme__"] = swc.scheme
|
||||
m["__metrics_path__"] = swc.metricsPath
|
||||
m["__scrape_interval__"] = swc.scrapeInterval.String()
|
||||
m["__scrape_timeout__"] = swc.scrapeTimeout.String()
|
||||
for k, args := range swc.params {
|
||||
if len(args) == 0 {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -152,6 +152,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "8.8.8.8",
|
||||
|
@ -581,6 +589,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "__vm_filepath",
|
||||
Value: "",
|
||||
|
@ -621,6 +637,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "__vm_filepath",
|
||||
Value: "",
|
||||
|
@ -661,6 +685,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "__vm_filepath",
|
||||
Value: "",
|
||||
|
@ -723,6 +755,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "foo.bar:1234",
|
||||
|
@ -766,6 +806,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "datacenter",
|
||||
Value: "foobar",
|
||||
|
@ -794,7 +842,7 @@ global:
|
|||
scrape_timeout: 34s
|
||||
scrape_configs:
|
||||
- job_name: foo
|
||||
scrape_interval: 543s
|
||||
scrape_interval: 54s
|
||||
scrape_timeout: 12s
|
||||
metrics_path: /foo/bar
|
||||
scheme: https
|
||||
|
@ -809,6 +857,7 @@ scrape_configs:
|
|||
- targets: ["foo.bar", "aaa"]
|
||||
labels:
|
||||
x: y
|
||||
__scrape_timeout__: "5s"
|
||||
- job_name: qwer
|
||||
tls_config:
|
||||
server_name: foobar
|
||||
|
@ -821,8 +870,8 @@ scrape_configs:
|
|||
`, []*ScrapeWork{
|
||||
{
|
||||
ScrapeURL: "https://foo.bar:443/foo/bar?p=x%26y&p=%3D",
|
||||
ScrapeInterval: 543 * time.Second,
|
||||
ScrapeTimeout: 12 * time.Second,
|
||||
ScrapeInterval: 54 * time.Second,
|
||||
ScrapeTimeout: 5 * time.Second,
|
||||
HonorLabels: true,
|
||||
HonorTimestamps: true,
|
||||
DenyRedirects: true,
|
||||
|
@ -843,6 +892,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "https",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "54s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "5s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "foo.bar:443",
|
||||
|
@ -863,8 +920,8 @@ scrape_configs:
|
|||
},
|
||||
{
|
||||
ScrapeURL: "https://aaa:443/foo/bar?p=x%26y&p=%3D",
|
||||
ScrapeInterval: 543 * time.Second,
|
||||
ScrapeTimeout: 12 * time.Second,
|
||||
ScrapeInterval: 54 * time.Second,
|
||||
ScrapeTimeout: 5 * time.Second,
|
||||
HonorLabels: true,
|
||||
HonorTimestamps: true,
|
||||
DenyRedirects: true,
|
||||
|
@ -885,6 +942,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "https",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "54s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "5s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "aaa:443",
|
||||
|
@ -920,6 +985,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "8s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "8s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "1.2.3.4:80",
|
||||
|
@ -953,6 +1026,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "8s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "8s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "foobar:80",
|
||||
|
@ -1024,6 +1105,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "hash",
|
||||
Value: "82",
|
||||
|
@ -1095,6 +1184,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "mailto",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "fake.addr",
|
||||
|
@ -1180,6 +1277,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "foo.bar:1234",
|
||||
|
@ -1221,6 +1326,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "foo.bar:1234",
|
||||
|
@ -1258,6 +1371,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "foo.bar:1234",
|
||||
|
@ -1313,6 +1434,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
|
@ -1341,10 +1470,8 @@ scrape_configs:
|
|||
sample_limit: 100
|
||||
disable_keepalive: true
|
||||
disable_compression: true
|
||||
stream_parse: true
|
||||
scrape_align_interval: 1s
|
||||
scrape_offset: 0.5s
|
||||
series_limit: 123
|
||||
static_configs:
|
||||
- targets:
|
||||
- 192.168.1.2 # SNMP device.
|
||||
|
@ -1358,6 +1485,10 @@ scrape_configs:
|
|||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 127.0.0.1:9116 # The SNMP exporter's real hostname:port.
|
||||
- target_label: __series_limit__
|
||||
replacement: 1234
|
||||
- target_label: __stream_parse__
|
||||
replacement: true
|
||||
`, []*ScrapeWork{
|
||||
{
|
||||
ScrapeURL: "http://127.0.0.1:9116/snmp?module=if_mib&target=192.168.1.2",
|
||||
|
@ -1384,6 +1515,22 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "__series_limit__",
|
||||
Value: "1234",
|
||||
},
|
||||
{
|
||||
Name: "__stream_parse__",
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "192.168.1.2",
|
||||
|
@ -1401,7 +1548,7 @@ scrape_configs:
|
|||
StreamParse: true,
|
||||
ScrapeAlignInterval: time.Second,
|
||||
ScrapeOffset: 500 * time.Millisecond,
|
||||
SeriesLimit: 123,
|
||||
SeriesLimit: 1234,
|
||||
jobNameOriginal: "snmp",
|
||||
},
|
||||
})
|
||||
|
@ -1431,6 +1578,14 @@ scrape_configs:
|
|||
Name: "__scheme__",
|
||||
Value: "http",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_interval__",
|
||||
Value: "1m0s",
|
||||
},
|
||||
{
|
||||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "foo.bar:1234",
|
||||
|
|
|
@ -13,8 +13,9 @@ import (
|
|||
var configMap = discoveryutils.NewConfigMap()
|
||||
|
||||
type apiConfig struct {
|
||||
client *discoveryutils.Client
|
||||
port int
|
||||
client *discoveryutils.Client
|
||||
port int
|
||||
hostNetworkingHost string
|
||||
|
||||
// filtersQueryArg contains escaped `filters` query arg to add to each request to Docker Swarm API.
|
||||
filtersQueryArg string
|
||||
|
@ -29,9 +30,14 @@ func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
}
|
||||
|
||||
func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||
hostNetworkingHost := sdc.HostNetworkingHost
|
||||
if hostNetworkingHost == "" {
|
||||
hostNetworkingHost = "localhost"
|
||||
}
|
||||
cfg := &apiConfig{
|
||||
port: sdc.Port,
|
||||
filtersQueryArg: getFiltersQueryArg(sdc.Filters),
|
||||
port: sdc.Port,
|
||||
hostNetworkingHost: hostNetworkingHost,
|
||||
filtersQueryArg: getFiltersQueryArg(sdc.Filters),
|
||||
}
|
||||
if cfg.port == 0 {
|
||||
cfg.port = 80
|
||||
|
|
|
@ -39,7 +39,7 @@ func getContainersLabels(cfg *apiConfig) ([]map[string]string, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return addContainersLabels(containers, networkLabels, cfg.port), nil
|
||||
return addContainersLabels(containers, networkLabels, cfg.port, cfg.hostNetworkingHost), nil
|
||||
}
|
||||
|
||||
func getContainers(cfg *apiConfig) ([]container, error) {
|
||||
|
@ -58,7 +58,7 @@ func parseContainers(data []byte) ([]container, error) {
|
|||
return containers, nil
|
||||
}
|
||||
|
||||
func addContainersLabels(containers []container, networkLabels map[string]map[string]string, defaultPort int) []map[string]string {
|
||||
func addContainersLabels(containers []container, networkLabels map[string]map[string]string, defaultPort int, hostNetworkingHost string) []map[string]string {
|
||||
var ms []map[string]string
|
||||
for i := range containers {
|
||||
c := &containers[i]
|
||||
|
@ -86,8 +86,12 @@ func addContainersLabels(containers []container, networkLabels map[string]map[st
|
|||
}
|
||||
if !added {
|
||||
// Use fallback port when no exposed ports are available or if all are non-TCP
|
||||
addr := hostNetworkingHost
|
||||
if c.HostConfig.NetworkMode != "host" {
|
||||
addr = discoveryutils.JoinHostPort(n.IPAddress, defaultPort)
|
||||
}
|
||||
m := map[string]string{
|
||||
"__address__": discoveryutils.JoinHostPort(n.IPAddress, defaultPort),
|
||||
"__address__": addr,
|
||||
"__meta_docker_network_ip": n.IPAddress,
|
||||
}
|
||||
addCommonLabels(m, c, networkLabels[n.NetworkID])
|
||||
|
|
|
@ -317,6 +317,118 @@ func Test_addContainerLabels(t *testing.T) {
|
|||
want []map[string]string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "NetworkMode!=host",
|
||||
c: container{
|
||||
ID: "90bc3b31aa13da5c0b11af2e228d54b38428a84e25d4e249ae9e9c95e51a0700",
|
||||
Names: []string{"/crow-server"},
|
||||
Labels: map[string]string{
|
||||
"com.docker.compose.config-hash": "c9f0bd5bb31921f94cff367d819a30a0cc08d4399080897a6c5cd74b983156ec",
|
||||
"com.docker.compose.container-number": "1",
|
||||
"com.docker.compose.oneoff": "False",
|
||||
"com.docker.compose.project": "crowserver",
|
||||
"com.docker.compose.service": "crow-server",
|
||||
"com.docker.compose.version": "1.11.2",
|
||||
},
|
||||
HostConfig: struct {
|
||||
NetworkMode string
|
||||
}{
|
||||
NetworkMode: "bridge",
|
||||
},
|
||||
NetworkSettings: struct {
|
||||
Networks map[string]struct {
|
||||
IPAddress string
|
||||
NetworkID string
|
||||
}
|
||||
}{
|
||||
Networks: map[string]struct {
|
||||
IPAddress string
|
||||
NetworkID string
|
||||
}{
|
||||
"host": {
|
||||
IPAddress: "172.17.0.2",
|
||||
NetworkID: "1dd8d1a8bef59943345c7231d7ce8268333ff5a8c5b3c94881e6b4742b447634",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []map[string]string{
|
||||
{
|
||||
"__address__": "172.17.0.2:8012",
|
||||
"__meta_docker_container_id": "90bc3b31aa13da5c0b11af2e228d54b38428a84e25d4e249ae9e9c95e51a0700",
|
||||
"__meta_docker_container_label_com_docker_compose_config_hash": "c9f0bd5bb31921f94cff367d819a30a0cc08d4399080897a6c5cd74b983156ec",
|
||||
"__meta_docker_container_label_com_docker_compose_container_number": "1",
|
||||
"__meta_docker_container_label_com_docker_compose_oneoff": "False",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "crowserver",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "crow-server",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "1.11.2",
|
||||
"__meta_docker_container_name": "/crow-server",
|
||||
"__meta_docker_container_network_mode": "bridge",
|
||||
"__meta_docker_network_id": "1dd8d1a8bef59943345c7231d7ce8268333ff5a8c5b3c94881e6b4742b447634",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.17.0.2",
|
||||
"__meta_docker_network_name": "bridge",
|
||||
"__meta_docker_network_scope": "local",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NetworkMode=host",
|
||||
c: container{
|
||||
ID: "90bc3b31aa13da5c0b11af2e228d54b38428a84e25d4e249ae9e9c95e51a0700",
|
||||
Names: []string{"/crow-server"},
|
||||
Labels: map[string]string{
|
||||
"com.docker.compose.config-hash": "c9f0bd5bb31921f94cff367d819a30a0cc08d4399080897a6c5cd74b983156ec",
|
||||
"com.docker.compose.container-number": "1",
|
||||
"com.docker.compose.oneoff": "False",
|
||||
"com.docker.compose.project": "crowserver",
|
||||
"com.docker.compose.service": "crow-server",
|
||||
"com.docker.compose.version": "1.11.2",
|
||||
},
|
||||
HostConfig: struct {
|
||||
NetworkMode string
|
||||
}{
|
||||
NetworkMode: "host",
|
||||
},
|
||||
NetworkSettings: struct {
|
||||
Networks map[string]struct {
|
||||
IPAddress string
|
||||
NetworkID string
|
||||
}
|
||||
}{
|
||||
Networks: map[string]struct {
|
||||
IPAddress string
|
||||
NetworkID string
|
||||
}{
|
||||
"host": {
|
||||
IPAddress: "172.17.0.2",
|
||||
NetworkID: "1dd8d1a8bef59943345c7231d7ce8268333ff5a8c5b3c94881e6b4742b447634",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []map[string]string{
|
||||
{
|
||||
"__address__": "foobar",
|
||||
"__meta_docker_container_id": "90bc3b31aa13da5c0b11af2e228d54b38428a84e25d4e249ae9e9c95e51a0700",
|
||||
"__meta_docker_container_label_com_docker_compose_config_hash": "c9f0bd5bb31921f94cff367d819a30a0cc08d4399080897a6c5cd74b983156ec",
|
||||
"__meta_docker_container_label_com_docker_compose_container_number": "1",
|
||||
"__meta_docker_container_label_com_docker_compose_oneoff": "False",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "crowserver",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "crow-server",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "1.11.2",
|
||||
"__meta_docker_container_name": "/crow-server",
|
||||
"__meta_docker_container_network_mode": "host",
|
||||
"__meta_docker_network_id": "1dd8d1a8bef59943345c7231d7ce8268333ff5a8c5b3c94881e6b4742b447634",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.17.0.2",
|
||||
"__meta_docker_network_name": "bridge",
|
||||
"__meta_docker_network_scope": "local",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get labels from a container",
|
||||
c: container{
|
||||
|
@ -391,7 +503,7 @@ func Test_addContainerLabels(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
labelsMap := addContainersLabels([]container{tt.c}, networkLabels, 80)
|
||||
labelsMap := addContainersLabels([]container{tt.c}, networkLabels, 8012, "foobar")
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("addContainersLabels() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
|
|
@ -18,9 +18,10 @@ var SDCheckInterval = flag.Duration("promscrape.dockerSDCheckInterval", 30*time.
|
|||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#docker_sd_config
|
||||
type SDConfig struct {
|
||||
Host string `yaml:"host"`
|
||||
Port int `yaml:"port,omitempty"`
|
||||
Filters []Filter `yaml:"filters,omitempty"`
|
||||
Host string `yaml:"host"`
|
||||
Port int `yaml:"port,omitempty"`
|
||||
Filters []Filter `yaml:"filters,omitempty"`
|
||||
HostNetworkingHost string `yaml:"host_networking_host,omitempty"`
|
||||
|
||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||
ProxyURL proxy.URL `yaml:"proxy_url,omitempty"`
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (ig *Ingress) key() string {
|
||||
|
@ -88,19 +89,10 @@ type HTTPIngressPath struct {
|
|||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ingress
|
||||
func (ig *Ingress) getTargetLabels(gw *groupWatcher) []map[string]string {
|
||||
tlsHosts := make(map[string]bool)
|
||||
for _, tls := range ig.Spec.TLS {
|
||||
for _, host := range tls.Hosts {
|
||||
tlsHosts[host] = true
|
||||
}
|
||||
}
|
||||
var ms []map[string]string
|
||||
for _, r := range ig.Spec.Rules {
|
||||
paths := getIngressRulePaths(r.HTTP.Paths)
|
||||
scheme := "http"
|
||||
if tlsHosts[r.Host] {
|
||||
scheme = "https"
|
||||
}
|
||||
scheme := getSchemeForHost(r.Host, ig.Spec.TLS)
|
||||
for _, path := range paths {
|
||||
m := getLabelsForIngressPath(ig, scheme, r.Host, path)
|
||||
ms = append(ms, m)
|
||||
|
@ -109,6 +101,33 @@ func (ig *Ingress) getTargetLabels(gw *groupWatcher) []map[string]string {
|
|||
return ms
|
||||
}
|
||||
|
||||
func getSchemeForHost(host string, tlss []IngressTLS) string {
|
||||
for _, tls := range tlss {
|
||||
for _, hostPattern := range tls.Hosts {
|
||||
if matchesHostPattern(hostPattern, host) {
|
||||
return "https"
|
||||
}
|
||||
}
|
||||
}
|
||||
return "http"
|
||||
}
|
||||
|
||||
func matchesHostPattern(pattern, host string) bool {
|
||||
if pattern == host {
|
||||
return true
|
||||
}
|
||||
if !strings.HasPrefix(pattern, "*.") {
|
||||
return false
|
||||
}
|
||||
pattern = pattern[len("*."):]
|
||||
n := strings.IndexByte(host, '.')
|
||||
if n < 0 {
|
||||
return false
|
||||
}
|
||||
host = host[n+1:]
|
||||
return pattern == host
|
||||
}
|
||||
|
||||
func getLabelsForIngressPath(ig *Ingress, scheme, host, path string) map[string]string {
|
||||
m := map[string]string{
|
||||
"__address__": host,
|
||||
|
|
|
@ -8,6 +8,26 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
|
||||
)
|
||||
|
||||
func TestMatchesHostPattern(t *testing.T) {
|
||||
f := func(pattern, host string, resultExpected bool) {
|
||||
t.Helper()
|
||||
result := matchesHostPattern(pattern, host)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result for matchesHostPattern(%q, %q); got %v; want %v", pattern, host, result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("", "", true)
|
||||
f("", "foo", false)
|
||||
f("foo", "", false)
|
||||
f("localhost", "localhost", true)
|
||||
f("localhost", "localhost2", false)
|
||||
f("*.foo", "bar", false)
|
||||
f("foo.bar", "foo.bar", true)
|
||||
f("foo.baz", "foo.bar", false)
|
||||
f("a.x.yyy", "b.x.yyy", false)
|
||||
f("*.x.yyy", "b.x.yyy", true)
|
||||
}
|
||||
|
||||
func TestParseIngressListFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"math"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -68,6 +69,8 @@ type ScrapeWork struct {
|
|||
// * __address__
|
||||
// * __scheme__
|
||||
// * __metrics_path__
|
||||
// * __scrape_interval__
|
||||
// * __scrape_timeout__
|
||||
// * __param_<name>
|
||||
// * __meta_*
|
||||
// * user-defined labels set via `relabel_configs` section in `scrape_config`
|
||||
|
@ -178,9 +181,10 @@ type scrapeWork struct {
|
|||
|
||||
tmpRow parser.Row
|
||||
|
||||
// the seriesMap, seriesAdded and labelsHashBuf are used for fast calculation of `scrape_series_added` metric.
|
||||
seriesMap map[uint64]struct{}
|
||||
seriesAdded int
|
||||
// This flag is set to true if series_limit is exceeded.
|
||||
seriesLimitExceeded bool
|
||||
|
||||
// labelsHashBuf is used for calculating the hash on series labels
|
||||
labelsHashBuf []byte
|
||||
|
||||
// Optional limiter on the number of unique series per scrape target.
|
||||
|
@ -195,7 +199,6 @@ type scrapeWork struct {
|
|||
prevLabelsLen int
|
||||
|
||||
// lastScrape holds the last response from scrape target.
|
||||
// It is used for generating Prometheus stale markers.
|
||||
lastScrape []byte
|
||||
}
|
||||
|
||||
|
@ -248,7 +251,7 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}) {
|
|||
select {
|
||||
case <-stopCh:
|
||||
t := time.Now().UnixNano() / 1e6
|
||||
sw.sendStaleMarkersForLastScrape(t, true)
|
||||
sw.sendStaleSeries("", t, true)
|
||||
if sw.seriesLimiter != nil {
|
||||
sw.seriesLimiter.MustStop()
|
||||
}
|
||||
|
@ -307,6 +310,8 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
|||
up := 1
|
||||
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
|
||||
bodyString := bytesutil.ToUnsafeString(body.B)
|
||||
lastScrape := bytesutil.ToUnsafeString(sw.lastScrape)
|
||||
areIdenticalSeries := parser.AreIdenticalSeriesFast(lastScrape, bodyString)
|
||||
if err != nil {
|
||||
up = 0
|
||||
scrapesFailed.Inc()
|
||||
|
@ -327,26 +332,39 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
|||
err = fmt.Errorf("the response from %q exceeds sample_limit=%d; "+
|
||||
"either reduce the sample count for the target or increase sample_limit", sw.Config.ScrapeURL, sw.Config.SampleLimit)
|
||||
}
|
||||
sw.updateSeriesAdded(wc)
|
||||
seriesAdded := sw.finalizeSeriesAdded(samplesPostRelabeling)
|
||||
if up == 0 {
|
||||
bodyString = ""
|
||||
}
|
||||
seriesAdded := 0
|
||||
if !areIdenticalSeries {
|
||||
// The returned value for seriesAdded may be bigger than the real number of added series
|
||||
// if some series were removed during relabeling.
|
||||
// This is a trade-off between performance and accuracy.
|
||||
seriesAdded = sw.getSeriesAdded(bodyString)
|
||||
}
|
||||
if sw.seriesLimitExceeded || !areIdenticalSeries {
|
||||
if sw.applySeriesLimit(wc) {
|
||||
sw.seriesLimitExceeded = true
|
||||
}
|
||||
}
|
||||
sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_duration_seconds", duration, scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_samples_scraped", float64(samplesScraped), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_samples_post_metric_relabeling", float64(samplesPostRelabeling), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_series_added", float64(seriesAdded), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_timeout_seconds", sw.Config.ScrapeTimeout.Seconds(), scrapeTimestamp)
|
||||
sw.pushData(&wc.writeRequest)
|
||||
sw.prevLabelsLen = len(wc.labels)
|
||||
wc.reset()
|
||||
writeRequestCtxPool.Put(wc)
|
||||
// body must be released only after wc is released, since wc refers to body.
|
||||
sw.prevBodyLen = len(body.B)
|
||||
if !areIdenticalSeries {
|
||||
sw.sendStaleSeries(bodyString, scrapeTimestamp, false)
|
||||
}
|
||||
sw.lastScrape = append(sw.lastScrape[:0], bodyString...)
|
||||
leveledbytebufferpool.Put(body)
|
||||
tsmGlobal.Update(sw.Config, sw.ScrapeGroup, up == 1, realTimestamp, int64(duration*1000), samplesScraped, err)
|
||||
if up == 0 {
|
||||
bodyString = ""
|
||||
sw.sendStaleMarkersForLastScrape(scrapeTimestamp, false)
|
||||
}
|
||||
sw.updateLastScrape(bodyString)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -384,7 +402,6 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
|||
return fmt.Errorf("the response from %q exceeds sample_limit=%d; "+
|
||||
"either reduce the sample count for the target or increase sample_limit", sw.Config.ScrapeURL, sw.Config.SampleLimit)
|
||||
}
|
||||
sw.updateSeriesAdded(wc)
|
||||
sw.pushData(&wc.writeRequest)
|
||||
wc.resetNoRows()
|
||||
return nil
|
||||
|
@ -405,12 +422,14 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
|||
}
|
||||
scrapesFailed.Inc()
|
||||
}
|
||||
seriesAdded := sw.finalizeSeriesAdded(samplesPostRelabeling)
|
||||
sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_duration_seconds", duration, scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_samples_scraped", float64(samplesScraped), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_samples_post_metric_relabeling", float64(samplesPostRelabeling), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_series_added", float64(seriesAdded), scrapeTimestamp)
|
||||
// scrape_series_added isn't calculated in streaming mode,
|
||||
// since it may need unlimited amounts of memory when scraping targets with millions of exposed metrics.
|
||||
sw.addAutoTimeseries(wc, "scrape_series_added", 0, scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_timeout_seconds", sw.Config.ScrapeTimeout.Seconds(), scrapeTimestamp)
|
||||
sw.pushData(&wc.writeRequest)
|
||||
sw.prevLabelsLen = len(wc.labels)
|
||||
wc.reset()
|
||||
|
@ -487,11 +506,16 @@ func (wc *writeRequestCtx) resetNoRows() {
|
|||
|
||||
var writeRequestCtxPool leveledWriteRequestCtxPool
|
||||
|
||||
func (sw *scrapeWork) updateSeriesAdded(wc *writeRequestCtx) {
|
||||
if sw.seriesMap == nil {
|
||||
sw.seriesMap = make(map[uint64]struct{}, len(wc.writeRequest.Timeseries))
|
||||
func (sw *scrapeWork) getSeriesAdded(currScrape string) int {
|
||||
if currScrape == "" {
|
||||
return 0
|
||||
}
|
||||
m := sw.seriesMap
|
||||
lastScrape := bytesutil.ToUnsafeString(sw.lastScrape)
|
||||
bodyString := parser.GetRowsDiff(currScrape, lastScrape)
|
||||
return strings.Count(bodyString, "\n")
|
||||
}
|
||||
|
||||
func (sw *scrapeWork) applySeriesLimit(wc *writeRequestCtx) bool {
|
||||
seriesLimit := *seriesLimitPerTarget
|
||||
if sw.Config.SeriesLimit > 0 {
|
||||
seriesLimit = sw.Config.SeriesLimit
|
||||
|
@ -500,42 +524,44 @@ func (sw *scrapeWork) updateSeriesAdded(wc *writeRequestCtx) {
|
|||
sw.seriesLimiter = bloomfilter.NewLimiter(seriesLimit, 24*time.Hour)
|
||||
}
|
||||
hsl := sw.seriesLimiter
|
||||
if hsl == nil {
|
||||
return false
|
||||
}
|
||||
dstSeries := wc.writeRequest.Timeseries[:0]
|
||||
job := sw.Config.Job()
|
||||
limitExceeded := false
|
||||
for _, ts := range wc.writeRequest.Timeseries {
|
||||
h := sw.getLabelsHash(ts.Labels)
|
||||
if hsl != nil && !hsl.Add(h) {
|
||||
if !hsl.Add(h) {
|
||||
// The limit on the number of hourly unique series per scrape target has been exceeded.
|
||||
// Drop the metric.
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{job=%q,target=%q}`,
|
||||
sw.Config.jobNameOriginal, sw.Config.ScrapeURL)).Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
|
||||
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL)).Inc()
|
||||
limitExceeded = true
|
||||
continue
|
||||
}
|
||||
dstSeries = append(dstSeries, ts)
|
||||
if _, ok := m[h]; !ok {
|
||||
m[h] = struct{}{}
|
||||
sw.seriesAdded++
|
||||
}
|
||||
}
|
||||
wc.writeRequest.Timeseries = dstSeries
|
||||
return limitExceeded
|
||||
}
|
||||
|
||||
func (sw *scrapeWork) updateLastScrape(response string) {
|
||||
func (sw *scrapeWork) sendStaleSeries(currScrape string, timestamp int64, addAutoSeries bool) {
|
||||
if *noStaleMarkers {
|
||||
return
|
||||
}
|
||||
sw.lastScrape = append(sw.lastScrape[:0], response...)
|
||||
}
|
||||
|
||||
func (sw *scrapeWork) sendStaleMarkersForLastScrape(timestamp int64, addAutoSeries bool) {
|
||||
bodyString := bytesutil.ToUnsafeString(sw.lastScrape)
|
||||
if len(bodyString) == 0 && !addAutoSeries {
|
||||
return
|
||||
lastScrape := bytesutil.ToUnsafeString(sw.lastScrape)
|
||||
bodyString := lastScrape
|
||||
if currScrape != "" {
|
||||
bodyString = parser.GetRowsDiff(lastScrape, currScrape)
|
||||
}
|
||||
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
|
||||
wc.rows.UnmarshalWithErrLogger(bodyString, sw.logError)
|
||||
srcRows := wc.rows.Rows
|
||||
for i := range srcRows {
|
||||
sw.addRowToTimeseries(wc, &srcRows[i], timestamp, true)
|
||||
wc := &writeRequestCtx{}
|
||||
if bodyString != "" {
|
||||
wc.rows.Unmarshal(bodyString)
|
||||
srcRows := wc.rows.Rows
|
||||
for i := range srcRows {
|
||||
sw.addRowToTimeseries(wc, &srcRows[i], timestamp, true)
|
||||
}
|
||||
}
|
||||
if addAutoSeries {
|
||||
sw.addAutoTimeseries(wc, "up", 0, timestamp)
|
||||
|
@ -556,17 +582,6 @@ func (sw *scrapeWork) sendStaleMarkersForLastScrape(timestamp int64, addAutoSeri
|
|||
}
|
||||
}
|
||||
sw.pushData(&wc.writeRequest)
|
||||
writeRequestCtxPool.Put(wc)
|
||||
}
|
||||
|
||||
func (sw *scrapeWork) finalizeSeriesAdded(lastScrapeSize int) int {
|
||||
seriesAdded := sw.seriesAdded
|
||||
sw.seriesAdded = 0
|
||||
if len(sw.seriesMap) > 4*lastScrapeSize {
|
||||
// Reset seriesMap, since it occupies more than 4x metrics collected during the last scrape.
|
||||
sw.seriesMap = make(map[uint64]struct{}, lastScrapeSize)
|
||||
}
|
||||
return seriesAdded
|
||||
}
|
||||
|
||||
func (sw *scrapeWork) getLabelsHash(labels []prompbmarshal.Label) uint64 {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
|
@ -44,11 +45,14 @@ func TestScrapeWorkScrapeInternalFailure(t *testing.T) {
|
|||
scrape_duration_seconds 0 123
|
||||
scrape_samples_post_metric_relabeling 0 123
|
||||
scrape_series_added 0 123
|
||||
scrape_timeout_seconds 42 123
|
||||
`
|
||||
timeseriesExpected := parseData(dataExpected)
|
||||
|
||||
var sw scrapeWork
|
||||
sw.Config = &ScrapeWork{}
|
||||
sw.Config = &ScrapeWork{
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
}
|
||||
|
||||
readDataCalls := 0
|
||||
sw.ReadData = func(dst []byte) ([]byte, error) {
|
||||
|
@ -133,17 +137,22 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
f(``, &ScrapeWork{}, `
|
||||
f(``, &ScrapeWork{
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
}, `
|
||||
up 1 123
|
||||
scrape_samples_scraped 0 123
|
||||
scrape_duration_seconds 0 123
|
||||
scrape_samples_post_metric_relabeling 0 123
|
||||
scrape_series_added 0 123
|
||||
scrape_timeout_seconds 42 123
|
||||
`)
|
||||
f(`
|
||||
foo{bar="baz",empty_label=""} 34.45 3
|
||||
abc -2
|
||||
`, &ScrapeWork{}, `
|
||||
`, &ScrapeWork{
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
}, `
|
||||
foo{bar="baz"} 34.45 123
|
||||
abc -2 123
|
||||
up 1 123
|
||||
|
@ -151,11 +160,13 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
scrape_duration_seconds 0 123
|
||||
scrape_samples_post_metric_relabeling 2 123
|
||||
scrape_series_added 2 123
|
||||
scrape_timeout_seconds 42 123
|
||||
`)
|
||||
f(`
|
||||
foo{bar="baz"} 34.45 3
|
||||
abc -2
|
||||
`, &ScrapeWork{
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorTimestamps: true,
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
|
@ -171,12 +182,14 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
scrape_duration_seconds{foo="x"} 0 123
|
||||
scrape_samples_post_metric_relabeling{foo="x"} 2 123
|
||||
scrape_series_added{foo="x"} 2 123
|
||||
scrape_timeout_seconds{foo="x"} 42 123
|
||||
`)
|
||||
f(`
|
||||
foo{job="orig",bar="baz"} 34.45
|
||||
bar{y="2",job="aa",a="b",job="bb",x="1"} -3e4 2345
|
||||
`, &ScrapeWork{
|
||||
HonorLabels: false,
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: false,
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "job",
|
||||
|
@ -191,13 +204,15 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
scrape_duration_seconds{job="override"} 0 123
|
||||
scrape_samples_post_metric_relabeling{job="override"} 2 123
|
||||
scrape_series_added{job="override"} 2 123
|
||||
scrape_timeout_seconds{job="override"} 42 123
|
||||
`)
|
||||
// Empty instance override. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/453
|
||||
f(`
|
||||
no_instance{instance="",job="some_job",label="val1",test=""} 5555
|
||||
test_with_instance{instance="some_instance",job="some_job",label="val2",test=""} 1555
|
||||
`, &ScrapeWork{
|
||||
HonorLabels: true,
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: true,
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "instance",
|
||||
|
@ -216,12 +231,14 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
scrape_duration_seconds{instance="foobar",job="xxx"} 0 123
|
||||
scrape_samples_post_metric_relabeling{instance="foobar",job="xxx"} 2 123
|
||||
scrape_series_added{instance="foobar",job="xxx"} 2 123
|
||||
scrape_timeout_seconds{instance="foobar",job="xxx"} 42 123
|
||||
`)
|
||||
f(`
|
||||
no_instance{instance="",job="some_job",label="val1",test=""} 5555
|
||||
test_with_instance{instance="some_instance",job="some_job",label="val2",test=""} 1555
|
||||
`, &ScrapeWork{
|
||||
HonorLabels: false,
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: false,
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "instance",
|
||||
|
@ -240,12 +257,14 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
scrape_duration_seconds{instance="foobar",job="xxx"} 0 123
|
||||
scrape_samples_post_metric_relabeling{instance="foobar",job="xxx"} 2 123
|
||||
scrape_series_added{instance="foobar",job="xxx"} 2 123
|
||||
scrape_timeout_seconds{instance="foobar",job="xxx"} 42 123
|
||||
`)
|
||||
f(`
|
||||
foo{job="orig",bar="baz"} 34.45
|
||||
bar{job="aa",a="b",job="bb"} -3e4 2345
|
||||
`, &ScrapeWork{
|
||||
HonorLabels: true,
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: true,
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "job",
|
||||
|
@ -260,12 +279,14 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
scrape_duration_seconds{job="override"} 0 123
|
||||
scrape_samples_post_metric_relabeling{job="override"} 2 123
|
||||
scrape_series_added{job="override"} 2 123
|
||||
scrape_timeout_seconds{job="override"} 42 123
|
||||
`)
|
||||
f(`
|
||||
foo{bar="baz"} 34.44
|
||||
bar{a="b",c="d"} -3e4
|
||||
`, &ScrapeWork{
|
||||
HonorLabels: true,
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: true,
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "job",
|
||||
|
@ -292,6 +313,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
scrape_duration_seconds{job="xx"} 0 123
|
||||
scrape_samples_post_metric_relabeling{job="xx"} 2 123
|
||||
scrape_series_added{job="xx"} 2 123
|
||||
scrape_timeout_seconds{job="xx"} 42 123
|
||||
`)
|
||||
f(`
|
||||
foo{bar="baz"} 34.44
|
||||
|
@ -299,7 +321,8 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
dropme{foo="bar"} 334
|
||||
dropme{xxx="yy",ss="dsf"} 843
|
||||
`, &ScrapeWork{
|
||||
HonorLabels: true,
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: true,
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "job",
|
||||
|
@ -325,21 +348,24 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
scrape_samples_scraped{job="xx",instance="foo.com"} 4 123
|
||||
scrape_duration_seconds{job="xx",instance="foo.com"} 0 123
|
||||
scrape_samples_post_metric_relabeling{job="xx",instance="foo.com"} 1 123
|
||||
scrape_series_added{job="xx",instance="foo.com"} 1 123
|
||||
scrape_series_added{job="xx",instance="foo.com"} 4 123
|
||||
scrape_timeout_seconds{job="xx",instance="foo.com"} 42 123
|
||||
`)
|
||||
f(`
|
||||
foo{bar="baz"} 34.44
|
||||
bar{a="b",c="d"} -3e4
|
||||
`, &ScrapeWork{
|
||||
HonorLabels: true,
|
||||
SampleLimit: 1,
|
||||
SeriesLimit: 123,
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: true,
|
||||
SampleLimit: 1,
|
||||
SeriesLimit: 123,
|
||||
}, `
|
||||
up 0 123
|
||||
scrape_samples_scraped 2 123
|
||||
scrape_duration_seconds 0 123
|
||||
scrape_samples_post_metric_relabeling 2 123
|
||||
scrape_series_added 0 123
|
||||
scrape_timeout_seconds 42 123
|
||||
`)
|
||||
}
|
||||
|
||||
|
|
|
@ -213,7 +213,7 @@ func unmarshalRow(dst []Row, s string, tagsPool []Tag, fieldsPool []Field, noEsc
|
|||
tagsPool, fieldsPool, err = r.unmarshal(s, tagsPool, fieldsPool, noEscapeChars)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal Influx line %q: %s; skipping it", s, err)
|
||||
logger.Errorf("cannot unmarshal InfluxDB line %q: %s; skipping it", s, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool, fieldsPool
|
||||
|
@ -312,7 +312,7 @@ func parseFieldValue(s string, hasQuotedFields bool) (float64, error) {
|
|||
if len(s) < 2 || s[len(s)-1] != '"' {
|
||||
return 0, fmt.Errorf("missing closing quote for quoted field value %s", s)
|
||||
}
|
||||
// Try converting quoted string to number, since sometimes Influx agents
|
||||
// Try converting quoted string to number, since sometimes InfluxDB agents
|
||||
// send numbers as strings.
|
||||
s = s[1 : len(s)-1]
|
||||
return fastfloat.ParseBestEffort(s), nil
|
||||
|
|
|
@ -16,8 +16,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
maxLineSize = flagutil.NewBytes("influx.maxLineSize", 256*1024, "The maximum size in bytes for a single Influx line during parsing")
|
||||
trimTimestamp = flag.Duration("influxTrimTimestamp", time.Millisecond, "Trim timestamps for Influx line protocol data to this duration. "+
|
||||
maxLineSize = flagutil.NewBytes("influx.maxLineSize", 256*1024, "The maximum size in bytes for a single InfluxDB line during parsing")
|
||||
trimTimestamp = flag.Duration("influxTrimTimestamp", time.Millisecond, "Trim timestamps for InfluxDB line protocol data to this duration. "+
|
||||
"Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data")
|
||||
)
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package prometheus
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
|
@ -364,3 +365,214 @@ func prevBackslashesCount(s string) int {
|
|||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// GetRowsDiff returns rows from s1, which are missing in s2.
|
||||
//
|
||||
// The returned rows have default value 0 and have no timestamps.
|
||||
func GetRowsDiff(s1, s2 string) string {
|
||||
var r1, r2 Rows
|
||||
r1.Unmarshal(s1)
|
||||
r2.Unmarshal(s2)
|
||||
rows1 := r1.Rows
|
||||
rows2 := r2.Rows
|
||||
m := make(map[string]bool, len(rows2))
|
||||
for i := range rows2 {
|
||||
r := &rows2[i]
|
||||
key := marshalMetricNameWithTags(r)
|
||||
m[key] = true
|
||||
}
|
||||
var diff []byte
|
||||
for i := range rows1 {
|
||||
r := &rows1[i]
|
||||
key := marshalMetricNameWithTags(r)
|
||||
if !m[key] {
|
||||
diff = append(diff, key...)
|
||||
diff = append(diff, " 0\n"...)
|
||||
}
|
||||
}
|
||||
return string(diff)
|
||||
}
|
||||
|
||||
func marshalMetricNameWithTags(r *Row) string {
|
||||
if len(r.Tags) == 0 {
|
||||
return r.Metric
|
||||
}
|
||||
var b []byte
|
||||
b = append(b, r.Metric...)
|
||||
b = append(b, '{')
|
||||
for i, t := range r.Tags {
|
||||
b = append(b, t.Key...)
|
||||
b = append(b, '=')
|
||||
b = strconv.AppendQuote(b, t.Value)
|
||||
if i+1 < len(r.Tags) {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, '}')
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// AreIdenticalSeriesFast returns true if s1 and s2 contains identical Prometheus series with possible different values.
|
||||
//
|
||||
// This function is optimized for speed.
|
||||
func AreIdenticalSeriesFast(s1, s2 string) bool {
|
||||
for {
|
||||
if len(s1) == 0 {
|
||||
// The last byte on the line reached.
|
||||
return len(s2) == 0
|
||||
}
|
||||
if len(s2) == 0 {
|
||||
// The last byte on s2 reached, while s1 has non-empty contents.
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract the next pair of lines from s1 and s2.
|
||||
var x1, x2 string
|
||||
n1 := strings.IndexByte(s1, '\n')
|
||||
if n1 < 0 {
|
||||
x1 = s1
|
||||
s1 = ""
|
||||
} else {
|
||||
x1 = s1[:n1]
|
||||
s1 = s1[n1+1:]
|
||||
}
|
||||
if n := strings.IndexByte(x1, '#'); n >= 0 {
|
||||
// Drop comment.
|
||||
x1 = x1[:n]
|
||||
}
|
||||
n2 := strings.IndexByte(s2, '\n')
|
||||
if n2 < 0 {
|
||||
if n1 >= 0 {
|
||||
return false
|
||||
}
|
||||
x2 = s2
|
||||
s2 = ""
|
||||
} else {
|
||||
if n1 < 0 {
|
||||
return false
|
||||
}
|
||||
x2 = s2[:n2]
|
||||
s2 = s2[n2+1:]
|
||||
}
|
||||
if n := strings.IndexByte(x2, '#'); n >= 0 {
|
||||
// Drop comment.
|
||||
x2 = x2[:n]
|
||||
}
|
||||
|
||||
// Skip whitespaces in front of lines
|
||||
for len(x1) > 0 && x1[0] == ' ' {
|
||||
if len(x2) == 0 || x2[0] != ' ' {
|
||||
return false
|
||||
}
|
||||
x1 = x1[1:]
|
||||
x2 = x2[1:]
|
||||
}
|
||||
if len(x1) == 0 {
|
||||
// The last byte on x1 reached.
|
||||
if len(x2) != 0 {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
if len(x2) == 0 {
|
||||
// The last byte on x2 reached, while x1 has non-empty contents.
|
||||
return false
|
||||
}
|
||||
// Compare metric names
|
||||
n := strings.IndexByte(x1, ' ')
|
||||
if n < 0 {
|
||||
// Invalid Prometheus line - it must contain at least a single space between metric name and value
|
||||
return false
|
||||
}
|
||||
n++
|
||||
if n > len(x2) || x1[:n] != x2[:n] {
|
||||
// Metric names mismatch
|
||||
return false
|
||||
}
|
||||
x1 = x1[n:]
|
||||
x2 = x2[n:]
|
||||
|
||||
// The space could belong to metric name in the following cases:
|
||||
// foo {bar="baz"} 1
|
||||
// foo{ bar="baz"} 2
|
||||
// foo{bar="baz", aa="b"} 3
|
||||
// foo{bar="b az"} 4
|
||||
// foo 5
|
||||
// Continue comparing the remaining parts until space or newline.
|
||||
for {
|
||||
n1 := strings.IndexByte(x1, ' ')
|
||||
if n1 < 0 {
|
||||
// Fast path.
|
||||
// Treat x1 as a value.
|
||||
// Skip values at x1 and x2.
|
||||
n2 := strings.IndexByte(x2, ' ')
|
||||
if n2 >= 0 {
|
||||
// x2 contains additional parts.
|
||||
return false
|
||||
}
|
||||
break
|
||||
}
|
||||
n1++
|
||||
// Slow path.
|
||||
// The x1[:n1] can be either a part of metric name or a value if timestamp is present:
|
||||
// foo 12 34
|
||||
if isNumeric(x1[:n1-1]) {
|
||||
// Skip numeric part (most likely a value before timestamp) in x1 and x2
|
||||
n2 := strings.IndexByte(x2, ' ')
|
||||
if n2 < 0 {
|
||||
// x2 contains less parts than x1
|
||||
return false
|
||||
}
|
||||
n2++
|
||||
if !isNumeric(x2[:n2-1]) {
|
||||
// x1 contains numeric part, while x2 contains non-numeric part
|
||||
return false
|
||||
}
|
||||
x1 = x1[n1:]
|
||||
x2 = x2[n2:]
|
||||
} else {
|
||||
// The non-numeric part from x1 must match the corresponding part from x2.
|
||||
if n1 > len(x2) || x1[:n1] != x2[:n1] {
|
||||
// Parts mismatch
|
||||
return false
|
||||
}
|
||||
x1 = x1[n1:]
|
||||
x2 = x2[n1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isNumeric(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if numericChars[s[i]] {
|
||||
continue
|
||||
}
|
||||
if i == 0 && s == "NaN" || s == "nan" || s == "Inf" || s == "inf" {
|
||||
return true
|
||||
}
|
||||
if i == 1 && (s[0] == '-' || s[0] == '+') && (s[1:] == "Inf" || s[1:] == "inf") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var numericChars = [256]bool{
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'-': true,
|
||||
'+': true,
|
||||
'e': true,
|
||||
'E': true,
|
||||
'.': true,
|
||||
}
|
||||
|
|
|
@ -6,6 +6,87 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestGetRowsDiff(t *testing.T) {
|
||||
f := func(s1, s2, resultExpected string) {
|
||||
t.Helper()
|
||||
result := GetRowsDiff(s1, s2)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result for GetRowsDiff(%q, %q); got %q; want %q", s1, s2, result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("", "", "")
|
||||
f("", "foo 1", "")
|
||||
f(" ", "foo 1", "")
|
||||
f("foo 123", "", "foo 0\n")
|
||||
f("foo 123", "bar 3", "foo 0\n")
|
||||
f("foo 123", "bar 3\nfoo 344", "")
|
||||
f("foo{x=\"y\", z=\"a a a\"} 123", "bar 3\nfoo{x=\"y\", z=\"b b b\"} 344", "foo{x=\"y\",z=\"a a a\"} 0\n")
|
||||
f("foo{bar=\"baz\"} 123\nx 3.4 5\ny 5 6", "x 34 342", "foo{bar=\"baz\"} 0\ny 0\n")
|
||||
}
|
||||
|
||||
func TestAreIdenticalSeriesFast(t *testing.T) {
|
||||
f := func(s1, s2 string, resultExpected bool) {
|
||||
t.Helper()
|
||||
result := AreIdenticalSeriesFast(s1, s2)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result for AreIdenticalSeries(%q, %q); got %v; want %v", s1, s2, result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("", "", true)
|
||||
f("", "a 1", false) // different number of metrics
|
||||
f(" ", " a 1", false) // different number of metrics
|
||||
f("a 1", "", false) // different number of metrics
|
||||
f(" a 1", " ", false) // different number of metrics
|
||||
f("foo", "foo", false) // missing value
|
||||
f("foo 1", "foo 1", true)
|
||||
f("foo 1", "foo 2", true)
|
||||
f("foo 1 ", "foo 2 ", true)
|
||||
f("foo 1 ", "foo 2 ", false) // different number of spaces
|
||||
f("foo 1 ", "foo 2 ", false) // different number of spaces
|
||||
f("foo nan", "foo -inf", true)
|
||||
f("foo 1 # coment x", "foo 2 #comment y", true)
|
||||
f(" foo 1", " foo 1", true)
|
||||
f(" foo 1", " foo 1", false) // different number of spaces in front of metric
|
||||
f(" foo 1", " foo 1", false) // different number of spaces in front of metric
|
||||
f("foo 1", "bar 1", false) // different metric name
|
||||
f("foo 1", "fooo 1", false) // different metric name
|
||||
f("foo 123", "foo 32.32", true)
|
||||
f(`foo{bar="x"} -3.3e-6`, `foo{bar="x"} 23343`, true)
|
||||
f(`foo{} 1`, `foo{} 234`, true)
|
||||
f(`foo {x="y x" } 234`, `foo {x="y x" } 43.342`, true)
|
||||
f(`foo {x="y x"} 234`, `foo{x="y x"} 43.342`, false) // different spaces
|
||||
f("foo 2\nbar 3", "foo 34.43\nbar -34.3", true)
|
||||
f("foo 2\nbar 3", "foo 34.43\nbarz -34.3", false) // different metric names
|
||||
f("\nfoo 13\n", "\nfoo 3.4\n", true)
|
||||
f("\nfoo 13", "\nfoo 3.4\n", false) // different number of blank lines
|
||||
f("\nfoo 13\n", "\nfoo 3.4", false) // different number of blank lines
|
||||
f("\n\nfoo 1", "\n\nfoo 34.43", true)
|
||||
f("\n\nfoo 3434\n", "\n\nfoo 43\n", true)
|
||||
f("\nfoo 1", "\n\nfoo 34.43", false) // different number of blank lines
|
||||
f("#foo{bar}", "#baz", true)
|
||||
f("", "#baz", false) // different number of comments
|
||||
f("#foo{bar}", "", false) // different number of comments
|
||||
f("#foo{bar}", "bar 3", false) // different number of comments
|
||||
f("foo{bar} 2", "#bar 3", false) // different number of comments
|
||||
f("#foo\n", "#bar", false) // different number of blank lines
|
||||
f("#foo{bar}\n#baz", "#baz\n#xdsfds dsf", true)
|
||||
f("# foo\nbar 234\nbaz{x=\"y\", z=\"\"} 3", "# foo\nbar 3.3\nbaz{x=\"y\", z=\"\"} 4323", true)
|
||||
f("# foo\nbar 234\nbaz{x=\"z\", z=\"\"} 3", "# foo\nbar 3.3\nbaz{x=\"y\", z=\"\"} 4323", false) // different label value
|
||||
f("foo {bar=\"xfdsdsffdsa\"} 1", "foo {x=\"y\"} 2", false) // different labels
|
||||
f("foo {x=\"z\"} 1", "foo {x=\"y\"} 2", false) // different label value
|
||||
|
||||
// Lines with timestamps
|
||||
f("foo 1 2", "foo 234 4334", true)
|
||||
f("foo 2", "foo 3 4", false) // missing timestamp
|
||||
f("foo 2 1", "foo 3", false) // missing timestamp
|
||||
f("foo{bar=\"b az\"} 2 5", "foo{bar=\"b az\"} +6.3 7.43", true)
|
||||
f("foo{bar=\"b az\"} 2 5 # comment ss ", "foo{bar=\"b az\"} +6.3 7.43 # comment as ", true)
|
||||
f("foo{bar=\"b az\"} 2 5 #comment", "foo{bar=\"b az\"} +6.3 7.43 #comment {foo=\"bar\"} 21.44", true)
|
||||
f("foo{bar=\"b az\"} +Inf 5", "foo{bar=\"b az\"} NaN 7.43", true)
|
||||
f("foo{bar=\"b az\"} +Inf 5", "foo{bar=\"b az\"} nan 7.43", true)
|
||||
f("foo{bar=\"b az\"} +Inf 5", "foo{bar=\"b az\"} nansf 7.43", false) // invalid value
|
||||
}
|
||||
|
||||
func TestPrevBackslashesCount(t *testing.T) {
|
||||
f := func(s string, nExpected int) {
|
||||
t.Helper()
|
||||
|
@ -105,6 +186,10 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
|||
// empty metric name
|
||||
f(`{foo="bar"}`)
|
||||
|
||||
// Invalid quotes for label value
|
||||
f(`{foo='bar'} 23`)
|
||||
f("{foo=`bar`} 23")
|
||||
|
||||
// Missing value
|
||||
f("aaa")
|
||||
f(" aaa")
|
||||
|
|
|
@ -5,6 +5,116 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkAreIdenticalSeriesFast(b *testing.B) {
|
||||
b.Run("identical-series-no-timestamps", func(b *testing.B) {
|
||||
s := `
|
||||
# HELP machine_cpu_cores Number of logical CPU cores.
|
||||
# TYPE machine_cpu_cores gauge
|
||||
machine_cpu_cores{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 4
|
||||
# HELP machine_cpu_physical_cores Number of physical CPU cores.
|
||||
# TYPE machine_cpu_physical_cores gauge
|
||||
machine_cpu_physical_cores{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 2
|
||||
# HELP machine_cpu_sockets Number of CPU sockets.
|
||||
# TYPE machine_cpu_sockets gauge
|
||||
machine_cpu_sockets{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 1
|
||||
# HELP machine_memory_bytes Amount of memory installed on the machine.
|
||||
# TYPE machine_memory_bytes gauge
|
||||
machine_memory_bytes{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 1.6706146304e+10
|
||||
# HELP machine_nvm_avg_power_budget_watts NVM power budget.
|
||||
# TYPE machine_nvm_avg_power_budget_watts gauge
|
||||
machine_nvm_avg_power_budget_watts{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 0
|
||||
# HELP machine_nvm_capacity NVM capacity value labeled by NVM mode (memory mode or app direct mode).
|
||||
# TYPE machine_nvm_capacity gauge
|
||||
machine_nvm_capacity{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",mode="app_direct_mode",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 0
|
||||
machine_nvm_capacity{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",mode="memory_mode",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 0
|
||||
# HELP machine_scrape_error 1 if there was an error while getting machine metrics, 0 otherwise.
|
||||
# TYPE machine_scrape_error gauge
|
||||
machine_scrape_error 0
|
||||
`
|
||||
benchmarkAreIdenticalSeriesFast(b, s, s, true)
|
||||
})
|
||||
b.Run("different-series-no-timestamps", func(b *testing.B) {
|
||||
s := `
|
||||
# HELP machine_cpu_cores Number of logical CPU cores.
|
||||
# TYPE machine_cpu_cores gauge
|
||||
machine_cpu_cores{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 4
|
||||
# HELP machine_cpu_physical_cores Number of physical CPU cores.
|
||||
# TYPE machine_cpu_physical_cores gauge
|
||||
machine_cpu_physical_cores{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 2
|
||||
# HELP machine_cpu_sockets Number of CPU sockets.
|
||||
# TYPE machine_cpu_sockets gauge
|
||||
machine_cpu_sockets{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 1
|
||||
# HELP machine_memory_bytes Amount of memory installed on the machine.
|
||||
# TYPE machine_memory_bytes gauge
|
||||
machine_memory_bytes{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 1.6706146304e+10
|
||||
# HELP machine_nvm_avg_power_budget_watts NVM power budget.
|
||||
# TYPE machine_nvm_avg_power_budget_watts gauge
|
||||
machine_nvm_avg_power_budget_watts{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 0
|
||||
# HELP machine_nvm_capacity NVM capacity value labeled by NVM mode (memory mode or app direct mode).
|
||||
# TYPE machine_nvm_capacity gauge
|
||||
machine_nvm_capacity{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",mode="app_direct_mode",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 0
|
||||
machine_nvm_capacity{boot_id="a1b49bdb-4c2a-4943-9ab3-363a316e9260",machine_id="857143c2dbea4a179223627cf9f47d06",mode="memory_mode",system_uuid="03a75ec7-5105-421a-8b8a-3d7190f6e890"} 0
|
||||
# HELP machine_scrape_error 1 if there was an error while getting machine metrics, 0 otherwise.
|
||||
# TYPE machine_scrape_error gauge
|
||||
machine_scrape_error 0
|
||||
`
|
||||
benchmarkAreIdenticalSeriesFast(b, s, s+"\nfoo 1", false)
|
||||
})
|
||||
b.Run("identical-series-with-timestamps", func(b *testing.B) {
|
||||
s := `
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/burstable/pod48ea6dbad93797db01928fb7884b8154/49d928b5e3e3398730c9ce9de02171bb139b5bf2f485b153d9a293114a5762a3",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="49d928b5e3e3398730c9ce9de02171bb139b5bf2f485b153d9a293114a5762a3",namespace="kube-system",pod="kube-apiserver-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113856793
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/burstable/pod69cd289b4ed80ced4f95a59ff60fa102/602a9be3cad5ca8aa57bdbb4a947ddd3b1b229b6e54c7acbb6906de061d51d05",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="602a9be3cad5ca8aa57bdbb4a947ddd3b1b229b6e54c7acbb6906de061d51d05",namespace="kube-system",pod="kube-scheduler-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113855488
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/burstable/pod86744a0c8ef8da0d937493e4ed918cda/2f1a3706328f86337864f7c2c7100aabf9cabf03fef5518e883380977372d53f",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="2f1a3706328f86337864f7c2c7100aabf9cabf03fef5518e883380977372d53f",namespace="kube-system",pod="kube-controller-manager-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113858430
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/burstable/poda4a6a8d4c9c0100deb8dc3a1d3adfa32/a84ce063fb5cab82bb938151e9fa1e98ad875c3cf5dad88d797d4c65c6229c13",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="a84ce063fb5cab82bb938151e9fa1e98ad875c3cf5dad88d797d4c65c6229c13",namespace="kube-system",pod="etcd-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113850216
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/poda922c399-764c-4614-8a2d-84bdd6765ffc/ec6b156815cc77c389fe08a4be82603514c8929a9827b8ba27f9cb9c0b57b067",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="ec6b156815cc77c389fe08a4be82603514c8929a9827b8ba27f9cb9c0b57b067",namespace="kube-system",pod="kindnet-nj4p9",ulimit="max_open_files"} 1.048576e+06 1631113865193
|
||||
container_ulimits_soft{container="etcd",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/burstable/poda4a6a8d4c9c0100deb8dc3a1d3adfa32/0cd86529af0ca0e389ed657b2c0a20f03275cf6d9e0cd52fe4c1f90b96037de7",image="k8s.gcr.io/etcd:3.4.13-0",name="0cd86529af0ca0e389ed657b2c0a20f03275cf6d9e0cd52fe4c1f90b96037de7",namespace="kube-system",pod="etcd-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113855044
|
||||
container_ulimits_soft{container="etcd",id="/kubelet/kubepods/burstable/poda4a6a8d4c9c0100deb8dc3a1d3adfa32/0cd86529af0ca0e389ed657b2c0a20f03275cf6d9e0cd52fe4c1f90b96037de7",image="k8s.gcr.io/etcd:3.4.13-0",name="0cd86529af0ca0e389ed657b2c0a20f03275cf6d9e0cd52fe4c1f90b96037de7",namespace="kube-system",pod="etcd-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113867411
|
||||
container_ulimits_soft{container="kindnet-cni",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/poda922c399-764c-4614-8a2d-84bdd6765ffc/b38094619c14a9f921e2d10fb0f84433bea774aeb223ba19dade527e1c46de22",image="docker.io/kindest/kindnetd:v20210119-d5ef916d",name="b38094619c14a9f921e2d10fb0f84433bea774aeb223ba19dade527e1c46de22",namespace="kube-system",pod="kindnet-nj4p9",ulimit="max_open_files"} 1.048576e+06 1631113868404
|
||||
container_ulimits_soft{container="kindnet-cni",id="/kubelet/kubepods/poda922c399-764c-4614-8a2d-84bdd6765ffc/b38094619c14a9f921e2d10fb0f84433bea774aeb223ba19dade527e1c46de22",image="docker.io/kindest/kindnetd:v20210119-d5ef916d",name="b38094619c14a9f921e2d10fb0f84433bea774aeb223ba19dade527e1c46de22",namespace="kube-system",pod="kindnet-nj4p9",ulimit="max_open_files"} 1.048576e+06 1631113862176
|
||||
container_ulimits_soft{container="kube-apiserver",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/burstable/pod48ea6dbad93797db01928fb7884b8154/4026cf5500d96c6e274a2607b507891abc21f7b1577e29c9400cfb0f0ce5d8aa",image="k8s.gcr.io/kube-apiserver:v1.20.2",name="4026cf5500d96c6e274a2607b507891abc21f7b1577e29c9400cfb0f0ce5d8aa",namespace="kube-system",pod="kube-apiserver-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113865919
|
||||
container_ulimits_soft{container="kube-apiserver",id="/kubelet/kubepods/burstable/pod48ea6dbad93797db01928fb7884b8154/4026cf5500d96c6e274a2607b507891abc21f7b1577e29c9400cfb0f0ce5d8aa",image="k8s.gcr.io/kube-apiserver:v1.20.2",name="4026cf5500d96c6e274a2607b507891abc21f7b1577e29c9400cfb0f0ce5d8aa",namespace="kube-system",pod="kube-apiserver-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113863531
|
||||
container_ulimits_soft{container="kube-controller-manager",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/burstable/pod86744a0c8ef8da0d937493e4ed918cda/04b0948ab58f83013fed7611f0ffadb13ff7336561c91606644848f60405771b",image="k8s.gcr.io/kube-controller-manager:v1.20.2",name="04b0948ab58f83013fed7611f0ffadb13ff7336561c91606644848f60405771b",namespace="kube-system",pod="kube-controller-manager-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113868172
|
||||
container_ulimits_soft{container="kube-controller-manager",id="/kubelet/kubepods/burstable/pod86744a0c8ef8da0d937493e4ed918cda/04b0948ab58f83013fed7611f0ffadb13ff7336561c91606644848f60405771b",image="k8s.gcr.io/kube-controller-manager:v1.20.2",name="04b0948ab58f83013fed7611f0ffadb13ff7336561c91606644848f60405771b",namespace="kube-system",pod="kube-controller-manager-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113860485
|
||||
container_ulimits_soft{container="kube-scheduler",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/burstable/pod69cd289b4ed80ced4f95a59ff60fa102/d9627625c8d60d859f2a13f9ed66c77c9767368e18eb5669fe1a85d600e43f9b",image="k8s.gcr.io/kube-scheduler:v1.20.2",name="d9627625c8d60d859f2a13f9ed66c77c9767368e18eb5669fe1a85d600e43f9b",namespace="kube-system",pod="kube-scheduler-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113857794
|
||||
container_ulimits_soft{container="kube-scheduler",id="/kubelet/kubepods/burstable/pod69cd289b4ed80ced4f95a59ff60fa102/d9627625c8d60d859f2a13f9ed66c77c9767368e18eb5669fe1a85d600e43f9b",image="k8s.gcr.io/kube-scheduler:v1.20.2",name="d9627625c8d60d859f2a13f9ed66c77c9767368e18eb5669fe1a85d600e43f9b",namespace="kube-system",pod="kube-scheduler-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113868640
|
||||
`
|
||||
benchmarkAreIdenticalSeriesFast(b, s, s, true)
|
||||
})
|
||||
b.Run("different-series-with-timestamps", func(b *testing.B) {
|
||||
s := `
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/burstable/pod48ea6dbad93797db01928fb7884b8154/49d928b5e3e3398730c9ce9de02171bb139b5bf2f485b153d9a293114a5762a3",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="49d928b5e3e3398730c9ce9de02171bb139b5bf2f485b153d9a293114a5762a3",namespace="kube-system",pod="kube-apiserver-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113856793
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/burstable/pod69cd289b4ed80ced4f95a59ff60fa102/602a9be3cad5ca8aa57bdbb4a947ddd3b1b229b6e54c7acbb6906de061d51d05",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="602a9be3cad5ca8aa57bdbb4a947ddd3b1b229b6e54c7acbb6906de061d51d05",namespace="kube-system",pod="kube-scheduler-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113855488
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/burstable/pod86744a0c8ef8da0d937493e4ed918cda/2f1a3706328f86337864f7c2c7100aabf9cabf03fef5518e883380977372d53f",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="2f1a3706328f86337864f7c2c7100aabf9cabf03fef5518e883380977372d53f",namespace="kube-system",pod="kube-controller-manager-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113858430
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/burstable/poda4a6a8d4c9c0100deb8dc3a1d3adfa32/a84ce063fb5cab82bb938151e9fa1e98ad875c3cf5dad88d797d4c65c6229c13",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="a84ce063fb5cab82bb938151e9fa1e98ad875c3cf5dad88d797d4c65c6229c13",namespace="kube-system",pod="etcd-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113850216
|
||||
container_ulimits_soft{container="",id="/kubelet/kubepods/poda922c399-764c-4614-8a2d-84bdd6765ffc/ec6b156815cc77c389fe08a4be82603514c8929a9827b8ba27f9cb9c0b57b067",image="sha256:0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da",name="ec6b156815cc77c389fe08a4be82603514c8929a9827b8ba27f9cb9c0b57b067",namespace="kube-system",pod="kindnet-nj4p9",ulimit="max_open_files"} 1.048576e+06 1631113865193
|
||||
container_ulimits_soft{container="etcd",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/burstable/poda4a6a8d4c9c0100deb8dc3a1d3adfa32/0cd86529af0ca0e389ed657b2c0a20f03275cf6d9e0cd52fe4c1f90b96037de7",image="k8s.gcr.io/etcd:3.4.13-0",name="0cd86529af0ca0e389ed657b2c0a20f03275cf6d9e0cd52fe4c1f90b96037de7",namespace="kube-system",pod="etcd-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113855044
|
||||
container_ulimits_soft{container="etcd",id="/kubelet/kubepods/burstable/poda4a6a8d4c9c0100deb8dc3a1d3adfa32/0cd86529af0ca0e389ed657b2c0a20f03275cf6d9e0cd52fe4c1f90b96037de7",image="k8s.gcr.io/etcd:3.4.13-0",name="0cd86529af0ca0e389ed657b2c0a20f03275cf6d9e0cd52fe4c1f90b96037de7",namespace="kube-system",pod="etcd-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113867411
|
||||
container_ulimits_soft{container="kindnet-cni",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/poda922c399-764c-4614-8a2d-84bdd6765ffc/b38094619c14a9f921e2d10fb0f84433bea774aeb223ba19dade527e1c46de22",image="docker.io/kindest/kindnetd:v20210119-d5ef916d",name="b38094619c14a9f921e2d10fb0f84433bea774aeb223ba19dade527e1c46de22",namespace="kube-system",pod="kindnet-nj4p9",ulimit="max_open_files"} 1.048576e+06 1631113868404
|
||||
container_ulimits_soft{container="kindnet-cni",id="/kubelet/kubepods/poda922c399-764c-4614-8a2d-84bdd6765ffc/b38094619c14a9f921e2d10fb0f84433bea774aeb223ba19dade527e1c46de22",image="docker.io/kindest/kindnetd:v20210119-d5ef916d",name="b38094619c14a9f921e2d10fb0f84433bea774aeb223ba19dade527e1c46de22",namespace="kube-system",pod="kindnet-nj4p9",ulimit="max_open_files"} 1.048576e+06 1631113862176
|
||||
container_ulimits_soft{container="kube-apiserver",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/burstable/pod48ea6dbad93797db01928fb7884b8154/4026cf5500d96c6e274a2607b507891abc21f7b1577e29c9400cfb0f0ce5d8aa",image="k8s.gcr.io/kube-apiserver:v1.20.2",name="4026cf5500d96c6e274a2607b507891abc21f7b1577e29c9400cfb0f0ce5d8aa",namespace="kube-system",pod="kube-apiserver-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113865919
|
||||
container_ulimits_soft{container="kube-apiserver",id="/kubelet/kubepods/burstable/pod48ea6dbad93797db01928fb7884b8154/4026cf5500d96c6e274a2607b507891abc21f7b1577e29c9400cfb0f0ce5d8aa",image="k8s.gcr.io/kube-apiserver:v1.20.2",name="4026cf5500d96c6e274a2607b507891abc21f7b1577e29c9400cfb0f0ce5d8aa",namespace="kube-system",pod="kube-apiserver-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113863531
|
||||
container_ulimits_soft{container="kube-controller-manager",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/burstable/pod86744a0c8ef8da0d937493e4ed918cda/04b0948ab58f83013fed7611f0ffadb13ff7336561c91606644848f60405771b",image="k8s.gcr.io/kube-controller-manager:v1.20.2",name="04b0948ab58f83013fed7611f0ffadb13ff7336561c91606644848f60405771b",namespace="kube-system",pod="kube-controller-manager-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113868172
|
||||
container_ulimits_soft{container="kube-controller-manager",id="/kubelet/kubepods/burstable/pod86744a0c8ef8da0d937493e4ed918cda/04b0948ab58f83013fed7611f0ffadb13ff7336561c91606644848f60405771b",image="k8s.gcr.io/kube-controller-manager:v1.20.2",name="04b0948ab58f83013fed7611f0ffadb13ff7336561c91606644848f60405771b",namespace="kube-system",pod="kube-controller-manager-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113860485
|
||||
container_ulimits_soft{container="kube-scheduler",id="/docker/6b7c234cfe92a0924e54e2a51d9607a5893a38ed14c7161f324863eeaa2fb985/kubelet/kubepods/burstable/pod69cd289b4ed80ced4f95a59ff60fa102/d9627625c8d60d859f2a13f9ed66c77c9767368e18eb5669fe1a85d600e43f9b",image="k8s.gcr.io/kube-scheduler:v1.20.2",name="d9627625c8d60d859f2a13f9ed66c77c9767368e18eb5669fe1a85d600e43f9b",namespace="kube-system",pod="kube-scheduler-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113857794
|
||||
container_ulimits_soft{container="kube-scheduler",id="/kubelet/kubepods/burstable/pod69cd289b4ed80ced4f95a59ff60fa102/d9627625c8d60d859f2a13f9ed66c77c9767368e18eb5669fe1a85d600e43f9b",image="k8s.gcr.io/kube-scheduler:v1.20.2",name="d9627625c8d60d859f2a13f9ed66c77c9767368e18eb5669fe1a85d600e43f9b",namespace="kube-system",pod="kube-scheduler-kind-control-plane",ulimit="max_open_files"} 1.048576e+06 1631113868640
|
||||
`
|
||||
benchmarkAreIdenticalSeriesFast(b, s, s+"\nfoo 1", false)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkAreIdenticalSeriesFast(b *testing.B, s1, s2 string, expectedResult bool) {
|
||||
b.SetBytes(int64(len(s1)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
result := AreIdenticalSeriesFast(s1, s2)
|
||||
if result != expectedResult {
|
||||
panic(fmt.Errorf("unexpected result; got %v; want %v", result, expectedResult))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `cpu_usage{mode="user"} 1.23
|
||||
cpu_usage{mode="system"} 23.344
|
||||
|
|
|
@ -2447,7 +2447,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
|
|||
}
|
||||
for i, tfw := range tfws {
|
||||
tf := tfw.tf
|
||||
if tf.isNegative {
|
||||
if tf.isNegative || tf.isEmptyMatch {
|
||||
tfwsRemaining = append(tfwsRemaining, tfw)
|
||||
continue
|
||||
}
|
||||
|
@ -2552,7 +2552,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
|
|||
return nil, err
|
||||
}
|
||||
storeFilterLoopsCount(&tfw, filterLoopsCount)
|
||||
if tf.isNegative {
|
||||
if tf.isNegative || tf.isEmptyMatch {
|
||||
metricIDs.Subtract(m)
|
||||
} else {
|
||||
metricIDs.Intersect(m)
|
||||
|
@ -2708,6 +2708,7 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64,
|
|||
logger.Panicf("BUG: unexpected tf.prefix %q; must start with commonPrefix %q", tf.prefix, commonPrefix)
|
||||
}
|
||||
kb := kbPool.Get()
|
||||
defer kbPool.Put(kb)
|
||||
if date != 0 {
|
||||
// Use per-date search.
|
||||
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixDateTagToMetricIDs)
|
||||
|
@ -2721,8 +2722,27 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64,
|
|||
tfNew.isNegative = false // isNegative for the original tf is handled by the caller.
|
||||
tfNew.prefix = kb.B
|
||||
metricIDs, loopsCount, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics, maxLoopsCount)
|
||||
kbPool.Put(kb)
|
||||
return metricIDs, loopsCount, err
|
||||
if err != nil {
|
||||
return nil, loopsCount, err
|
||||
}
|
||||
if tf.isNegative || !tf.isEmptyMatch {
|
||||
return metricIDs, loopsCount, nil
|
||||
}
|
||||
// The tag filter, which matches empty label such as {foo=~"bar|"}
|
||||
// Convert it to negative filter, which matches {foo=~".+",foo!~"bar|"}.
|
||||
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601
|
||||
// See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395
|
||||
maxLoopsCount -= loopsCount
|
||||
if err := tfNew.Init(kb.B, tf.key, []byte(".+"), false, true); err != nil {
|
||||
logger.Panicf(`BUG: cannot init tag filter: {%q=~".+"}: %s`, tf.key, err)
|
||||
}
|
||||
m, lc, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics, maxLoopsCount)
|
||||
loopsCount += lc
|
||||
if err != nil {
|
||||
return nil, loopsCount, err
|
||||
}
|
||||
m.Subtract(metricIDs)
|
||||
return m, loopsCount, nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) getLoopsCountAndTimestampForDateFilter(date uint64, tf *tagFilter) (int64, int64, uint64) {
|
||||
|
|
|
@ -866,9 +866,6 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
|||
if err := tfs.Add(nil, []byte(re), false, true); err != nil {
|
||||
return fmt.Errorf("cannot create regexp tag filter for Graphite wildcard")
|
||||
}
|
||||
if tfsNew := tfs.Finalize(); len(tfsNew) > 0 {
|
||||
return fmt.Errorf("unexpected non-empty tag filters returned by TagFilters.Finalize: %v", tfsNew)
|
||||
}
|
||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot search by regexp tag filter for Graphite wildcard: %w", err)
|
||||
|
@ -877,6 +874,43 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
|||
return fmt.Errorf("tsids is missing in regexp for Graphite wildcard tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn)
|
||||
}
|
||||
|
||||
// Search with a filter matching empty tag (a single filter)
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601
|
||||
tfs.Reset()
|
||||
if err := tfs.Add(nil, mn.MetricGroup, false, false); err != nil {
|
||||
return fmt.Errorf("cannot create tag filter for MetricGroup: %w", err)
|
||||
}
|
||||
if err := tfs.Add([]byte("non-existent-tag"), []byte("foo|"), false, true); err != nil {
|
||||
return fmt.Errorf("cannot create regexp tag filter for non-existing tag: %w", err)
|
||||
}
|
||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot search with a filter matching empty tag: %w", err)
|
||||
}
|
||||
if !testHasTSID(tsidsFound, tsid) {
|
||||
return fmt.Errorf("tsids is missing when matching a filter with empty tag tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn)
|
||||
}
|
||||
|
||||
// Search with filters matching empty tags (multiple filters)
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601
|
||||
tfs.Reset()
|
||||
if err := tfs.Add(nil, mn.MetricGroup, false, false); err != nil {
|
||||
return fmt.Errorf("cannot create tag filter for MetricGroup: %w", err)
|
||||
}
|
||||
if err := tfs.Add([]byte("non-existent-tag1"), []byte("foo|"), false, true); err != nil {
|
||||
return fmt.Errorf("cannot create regexp tag filter for non-existing tag1: %w", err)
|
||||
}
|
||||
if err := tfs.Add([]byte("non-existent-tag2"), []byte("bar|"), false, true); err != nil {
|
||||
return fmt.Errorf("cannot create regexp tag filter for non-existing tag2: %w", err)
|
||||
}
|
||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot search with multipel filters matching empty tags: %w", err)
|
||||
}
|
||||
if !testHasTSID(tsidsFound, tsid) {
|
||||
return fmt.Errorf("tsids is missing when matching multiple filters with empty tags tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn)
|
||||
}
|
||||
|
||||
// Search with regexps.
|
||||
tfs.Reset()
|
||||
if err := tfs.Add(nil, mn.MetricGroup, false, true); err != nil {
|
||||
|
|
|
@ -18,58 +18,102 @@ import (
|
|||
//
|
||||
// This converts `foo{bar="baz",x=~"a.+"}` to `{foo=bar="baz",foo=x=~"a.+"} filter.
|
||||
func convertToCompositeTagFilterss(tfss []*TagFilters) []*TagFilters {
|
||||
tfssNew := make([]*TagFilters, len(tfss))
|
||||
for i, tfs := range tfss {
|
||||
tfssNew[i] = convertToCompositeTagFilters(tfs)
|
||||
tfssNew := make([]*TagFilters, 0, len(tfss))
|
||||
for _, tfs := range tfss {
|
||||
tfssNew = append(tfssNew, convertToCompositeTagFilters(tfs)...)
|
||||
}
|
||||
return tfssNew
|
||||
}
|
||||
|
||||
func convertToCompositeTagFilters(tfs *TagFilters) *TagFilters {
|
||||
// Search for metric name filter, which must be used for creating composite filters.
|
||||
var name []byte
|
||||
func convertToCompositeTagFilters(tfs *TagFilters) []*TagFilters {
|
||||
var tfssCompiled []*TagFilters
|
||||
// Search for filters on metric name, which will be used for creating composite filters.
|
||||
var names [][]byte
|
||||
hasPositiveFilter := false
|
||||
for _, tf := range tfs.tfs {
|
||||
if len(tf.key) == 0 && !tf.isNegative && !tf.isRegexp {
|
||||
name = tf.value
|
||||
} else if !tf.isNegative {
|
||||
if len(tf.key) == 0 {
|
||||
if !tf.isNegative && !tf.isRegexp {
|
||||
names = [][]byte{tf.value}
|
||||
} else if !tf.isNegative && tf.isRegexp && len(tf.orSuffixes) > 0 {
|
||||
// Split the filter {__name__=~"name1|...|nameN", other_filters}
|
||||
// into name1{other_filters}, ..., nameN{other_filters}
|
||||
// and generate composite filters for each of them.
|
||||
names = names[:0] // override the previous filters on metric name
|
||||
for _, orSuffix := range tf.orSuffixes {
|
||||
names = append(names, []byte(orSuffix))
|
||||
}
|
||||
}
|
||||
} else if !tf.isNegative && !tf.isEmptyMatch {
|
||||
hasPositiveFilter = true
|
||||
}
|
||||
}
|
||||
if len(name) == 0 {
|
||||
if len(names) == 0 {
|
||||
atomic.AddUint64(&compositeFilterMissingConversions, 1)
|
||||
return tfs
|
||||
return []*TagFilters{tfs}
|
||||
}
|
||||
tfsNew := make([]tagFilter, 0, len(tfs.tfs))
|
||||
|
||||
// Create composite filters for the found names.
|
||||
var compositeKey []byte
|
||||
compositeFilters := 0
|
||||
for _, tf := range tfs.tfs {
|
||||
if len(tf.key) == 0 {
|
||||
if !hasPositiveFilter || tf.isNegative || tf.isRegexp || string(tf.value) != string(name) {
|
||||
tfsNew = append(tfsNew, tf)
|
||||
for _, name := range names {
|
||||
compositeFilters := 0
|
||||
tfsNew := make([]tagFilter, 0, len(tfs.tfs))
|
||||
for _, tf := range tfs.tfs {
|
||||
if len(tf.key) == 0 {
|
||||
if !hasPositiveFilter || tf.isNegative {
|
||||
// Negative filters on metric name cannot be used for building composite filter, so leave them as is.
|
||||
tfsNew = append(tfsNew, tf)
|
||||
continue
|
||||
}
|
||||
if tf.isRegexp {
|
||||
matchName := false
|
||||
for _, orSuffix := range tf.orSuffixes {
|
||||
if orSuffix == string(name) {
|
||||
matchName = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !matchName {
|
||||
// Leave as is the regexp filter on metric name if it doesn't match the current name.
|
||||
tfsNew = append(tfsNew, tf)
|
||||
continue
|
||||
}
|
||||
// Skip the tf, since its part (name) is used as a prefix in composite filter.
|
||||
continue
|
||||
}
|
||||
if string(tf.value) != string(name) {
|
||||
// Leave as is the filter on another metric name.
|
||||
tfsNew = append(tfsNew, tf)
|
||||
continue
|
||||
}
|
||||
// Skip the tf, since it is used as a prefix in composite filter.
|
||||
continue
|
||||
}
|
||||
continue
|
||||
if string(tf.key) == "__graphite__" || bytes.Equal(tf.key, graphiteReverseTagKey) {
|
||||
// Leave as is __graphite__ filters, since they cannot be used for building composite filter.
|
||||
tfsNew = append(tfsNew, tf)
|
||||
continue
|
||||
}
|
||||
// Create composite filter on (name, tf)
|
||||
compositeKey = marshalCompositeTagKey(compositeKey[:0], name, tf.key)
|
||||
var tfNew tagFilter
|
||||
if err := tfNew.Init(tfs.commonPrefix, compositeKey, tf.value, tf.isNegative, tf.isRegexp); err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating composite tag filter for name=%q and key=%q: %s", name, tf.key, err)
|
||||
}
|
||||
tfsNew = append(tfsNew, tfNew)
|
||||
compositeFilters++
|
||||
}
|
||||
if string(tf.key) == "__graphite__" || bytes.Equal(tf.key, graphiteReverseTagKey) {
|
||||
tfsNew = append(tfsNew, tf)
|
||||
continue
|
||||
if compositeFilters == 0 {
|
||||
// Cannot use tfsNew, since it doesn't contain composite filters, e.g. it may match broader set of series.
|
||||
// Fall back to the original tfs.
|
||||
atomic.AddUint64(&compositeFilterMissingConversions, 1)
|
||||
return []*TagFilters{tfs}
|
||||
}
|
||||
compositeKey = marshalCompositeTagKey(compositeKey[:0], name, tf.key)
|
||||
var tfNew tagFilter
|
||||
if err := tfNew.Init(tfs.commonPrefix, compositeKey, tf.value, tf.isNegative, tf.isRegexp); err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating composite tag filter for name=%q and key=%q: %s", name, tf.key, err)
|
||||
}
|
||||
tfsNew = append(tfsNew, tfNew)
|
||||
compositeFilters++
|
||||
tfsCompiled := NewTagFilters()
|
||||
tfsCompiled.tfs = tfsNew
|
||||
tfssCompiled = append(tfssCompiled, tfsCompiled)
|
||||
}
|
||||
if compositeFilters == 0 {
|
||||
atomic.AddUint64(&compositeFilterMissingConversions, 1)
|
||||
return tfs
|
||||
}
|
||||
tfsCompiled := NewTagFilters()
|
||||
tfsCompiled.tfs = tfsNew
|
||||
atomic.AddUint64(&compositeFilterSuccessConversions, 1)
|
||||
return tfsCompiled
|
||||
return tfssCompiled
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -102,8 +146,6 @@ func (tfs *TagFilters) AddGraphiteQuery(query []byte, paths []string, isNegative
|
|||
// Add adds the given tag filter to tfs.
|
||||
//
|
||||
// MetricGroup must be encoded with nil key.
|
||||
//
|
||||
// Finalize must be called after tfs is constructed.
|
||||
func (tfs *TagFilters) Add(key, value []byte, isNegative, isRegexp bool) error {
|
||||
// Verify whether tag filter is empty.
|
||||
if len(value) == 0 {
|
||||
|
@ -157,38 +199,6 @@ func (tfs *TagFilters) addTagFilter() *tagFilter {
|
|||
return &tfs.tfs[len(tfs.tfs)-1]
|
||||
}
|
||||
|
||||
// Finalize finalizes tfs and may return complementary TagFilters,
|
||||
// which must be added to the resulting set of tag filters.
|
||||
func (tfs *TagFilters) Finalize() []*TagFilters {
|
||||
var tfssNew []*TagFilters
|
||||
for i := range tfs.tfs {
|
||||
tf := &tfs.tfs[i]
|
||||
if !tf.isNegative && tf.isEmptyMatch {
|
||||
// tf matches empty value, so it must be accompanied with `key!~".+"` tag filter
|
||||
// in order to match time series without the given label.
|
||||
tfssNew = append(tfssNew, tfs.cloneWithNegativeFilter(tf))
|
||||
}
|
||||
}
|
||||
return tfssNew
|
||||
}
|
||||
|
||||
func (tfs *TagFilters) cloneWithNegativeFilter(tfNegative *tagFilter) *TagFilters {
|
||||
tfsNew := NewTagFilters()
|
||||
for i := range tfs.tfs {
|
||||
tf := &tfs.tfs[i]
|
||||
if tf == tfNegative {
|
||||
if err := tfsNew.Add(tf.key, []byte(".+"), true, true); err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating a tag filter key=~'.+': %s", err)
|
||||
}
|
||||
} else {
|
||||
if err := tfsNew.Add(tf.key, tf.value, tf.isNegative, tf.isRegexp); err != nil {
|
||||
logger.Panicf("BUG: unexpected error when cloning a tag filter %s: %s", tf, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return tfsNew
|
||||
}
|
||||
|
||||
// String returns human-readable value for tfs.
|
||||
func (tfs *TagFilters) String() string {
|
||||
if len(tfs.tfs) == 0 {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue