mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-01 14:47:38 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
e2c4578751
999 changed files with 165069 additions and 107929 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -19,4 +19,5 @@
|
|||
.DS_store
|
||||
Gemfile.lock
|
||||
/_site
|
||||
_site
|
||||
_site
|
||||
*.tmp
|
13
Makefile
13
Makefile
|
@ -397,14 +397,21 @@ check-licenses: install-wwhrd
|
|||
wwhrd check -f .wwhrd.yml
|
||||
|
||||
copy-docs:
|
||||
echo "---\nsort: ${ORDER}\n---\n" > ${DST}
|
||||
echo '' > ${DST}
|
||||
@if [ ${ORDER} -ne 0 ]; then \
|
||||
echo "---\nsort: ${ORDER}\n---\n" > ${DST}; \
|
||||
fi
|
||||
cat ${SRC} >> ${DST}
|
||||
sed -i='.tmp' 's/<img src=\"docs\//<img src=\"/' ${DST}
|
||||
rm -rf docs/*.tmp
|
||||
|
||||
# Copies docs for all components and adds the order tag.
|
||||
# For ORDER=0 it adds no order tag.
|
||||
# Images starting with <img src="docs/ are replaced with <img src="
|
||||
# Cluster docs are supposed to be ordered as 9th.
|
||||
# For The rest of docs is ordered manually.t
|
||||
# The rest of docs is ordered manually.
|
||||
docs-sync:
|
||||
cp README.md docs/README.md
|
||||
SRC=README.md DST=docs/README.md ORDER=0 $(MAKE) copy-docs
|
||||
SRC=README.md DST=docs/Single-server-VictoriaMetrics.md ORDER=1 $(MAKE) copy-docs
|
||||
SRC=app/vmagent/README.md DST=docs/vmagent.md ORDER=3 $(MAKE) copy-docs
|
||||
SRC=app/vmalert/README.md DST=docs/vmalert.md ORDER=4 $(MAKE) copy-docs
|
||||
|
|
126
README.md
126
README.md
|
@ -279,7 +279,7 @@ When querying the [backfilled data](https://docs.victoriametrics.com/#backfillin
|
|||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
||||
VMUI allows investigating correlations between two queries on the same graph. Just click `+` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||
VMUI allows investigating correlations between multiple queries on the same graph. Just click `Add Query` button, enter an additional query in the newly appeared input field and press `Ctrl+Enter`. Results for all the queries should be displayed simultaneously on the same graph.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
|
||||
|
@ -311,7 +311,7 @@ matching the specified [series selector](https://prometheus.io/docs/prometheus/l
|
|||
Cardinality explorer is built on top of [/api/v1/status/tsdb](#tsdb-stats).
|
||||
|
||||
See [cardinality explorer playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/cardinality).
|
||||
|
||||
See the example of using the cardinality explorer [here](https://victoriametrics.com/blog/cardinality-explorer/).
|
||||
|
||||
## How to apply new config to VictoriaMetrics
|
||||
|
||||
|
@ -337,100 +337,86 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
|||
|
||||
## How to send data from DataDog agent
|
||||
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/)
|
||||
or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/)
|
||||
via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics)
|
||||
at `/datadog/api/v1/series` path.
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
### Sending metrics to VictoriaMetrics
|
||||
|
||||
Run DataDog agent with environment variable `DD_DD_URL=http://victoriametrics-host:8428/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||
DataDog agent allows configuring destinations for metrics sending via ENV variable `DD_DD_URL`
|
||||
or via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) in section `dd_url`.
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
|
||||
Run DataDog agent with environment variable `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`.
|
||||
|
||||
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
||||
|
||||
Example of how to send data to VictoriaMetrics via [DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line:
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
|
||||
```console
|
||||
echo '
|
||||
{
|
||||
"series": [
|
||||
{
|
||||
"host": "test.example.com",
|
||||
"interval": 20,
|
||||
"metric": "system.load.1",
|
||||
"points": [[
|
||||
0,
|
||||
0.5
|
||||
]],
|
||||
"tags": [
|
||||
"environment:test"
|
||||
],
|
||||
"type": "rate"
|
||||
}
|
||||
]
|
||||
}
|
||||
' | curl -X POST --data-binary @- http://victoriametrics-host:8428/datadog/api/v1/series
|
||||
```
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
<p align="center">
|
||||
<img src="Single-server-VictoriaMetrics-sending_DD_metrics_to_VM.png" width="800">
|
||||
</p>
|
||||
|
||||
To configure DataDog agent via ENV variable add the following prefix:
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
echo '
|
||||
{
|
||||
"series": [
|
||||
{
|
||||
"host": "test.example.com",
|
||||
"interval": 20,
|
||||
"metric": "system.load.1",
|
||||
"points": [[
|
||||
0,
|
||||
0.5
|
||||
]],
|
||||
"tags": [
|
||||
"environment:test"
|
||||
],
|
||||
"type": "rate"
|
||||
}
|
||||
]
|
||||
}
|
||||
' | curl -X POST --data-binary @- http://vminsert-host:8480/insert/0/datadog/api/v1/series
|
||||
```
|
||||
DD_DD_URL=http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
|
||||
|
||||
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export):
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
To configure DataDog agent via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files)
|
||||
add the following line:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://victoriametrics-host:8428/api/v1/export -d 'match[]=system.load.1'
|
||||
```
|
||||
dd_url: http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
vmagent also can accept Datadog metrics format. Depending on where vmagent will forward data,
|
||||
pick [single-node or cluster URL]((https://docs.victoriametrics.com/url-examples.html#datadog)) formats.
|
||||
|
||||
### Sending metrics to Datadog and VictoriaMetrics
|
||||
|
||||
DataDog allows configuring [Dual Shipping](https://docs.datadoghq.com/agent/guide/dual-shipping/) for metrics
|
||||
sending via ENV variable `DD_ADDITIONAL_ENDPOINTS` or via configuration file `additional_endpoints`.
|
||||
|
||||
<p align="center">
|
||||
<img src="Single-server-VictoriaMetrics-sending_DD_metrics_to_VM_and_DD.png" width="800">
|
||||
</p>
|
||||
|
||||
Run DataDog using the following ENV variable with VictoriaMetrics as additional metrics receiver:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://vmselect-host:8481/select/0/prometheus/api/v1/export -d 'match[]=system.load.1'
|
||||
```
|
||||
DD_ADDITIONAL_ENDPOINTS='{\"http://victoriametrics:8428/datadog\"}'
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
This command should return the following output if everything is OK:
|
||||
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
|
||||
|
||||
|
||||
To configure DataDog Dual Shipping via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files)
|
||||
add the following line:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```json
|
||||
{"metric":{"__name__":"system.load.1","environment":"test","host":"test.example.com"},"values":[0.5],"timestamps":[1632833641000]}
|
||||
```
|
||||
additional_endpoints: http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Send via cURL
|
||||
|
||||
See how to send data to VictoriaMetrics via
|
||||
[DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line.
|
||||
|
||||
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export).
|
||||
|
||||
### Additional details
|
||||
|
||||
VictoriaMetrics automatically sanitizes metric names for the data ingested via DataDog protocol
|
||||
according to [DataDog metric naming recommendations](https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics).
|
||||
|
@ -451,7 +437,7 @@ See [these docs](https://docs.victoriametrics.com/vmagent.html#adding-labels-to-
|
|||
|
||||
## How to send data from InfluxDB-compatible agents such as [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)
|
||||
|
||||
Use `http://<victoriametric-addr>:8428` url instead of InfluxDB url in agents' configs.
|
||||
Use `http://<victoriametrics-addr>:8428` url instead of InfluxDB url in agents' configs.
|
||||
For instance, put the following lines into `Telegraf` config, so it sends data to VictoriaMetrics instead of InfluxDB:
|
||||
|
||||
```toml
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
# vmagent
|
||||
|
||||
`vmagent` is a tiny but mighty agent which helps you collect metrics from various sources
|
||||
`vmagent` is a tiny agent which helps you collect metrics from various sources,
|
||||
[relabel and filter the collected metrics](#relabeling)
|
||||
and store them in [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
or any other Prometheus-compatible storage systems with Prometheus `remote_write` protocol support.
|
||||
or any other storage systems via Prometheus `remote_write` protocol.
|
||||
|
||||
See [Quick Start](#quick-start) for details.
|
||||
|
||||
<img alt="vmagent" src="vmagent.png">
|
||||
|
||||
|
@ -16,27 +19,40 @@ additionally to [discovering Prometheus-compatible targets and scraping metrics
|
|||
|
||||
## Features
|
||||
|
||||
* Can be used as a drop-in replacement for Prometheus for scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter). See [Quick Start](#quick-start) for details.
|
||||
* Can read data from Kafka. See [these docs](#reading-metrics-from-kafka).
|
||||
* Can write data to Kafka. See [these docs](#writing-metrics-to-kafka).
|
||||
* Can be used as a drop-in replacement for Prometheus for discovering and scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter).
|
||||
Note that single-node VictoriaMetrics can also discover and scrape Prometheus-compatible targets in the same way as `vmagent` does -
|
||||
see [these docs](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
* Can add, remove and modify labels (aka tags) via Prometheus relabeling. Can filter data before sending it to remote storage. See [these docs](#relabeling) for details.
|
||||
* Accepts data via all the ingestion protocols supported by VictoriaMetrics - see [these docs](#how-to-push-data-to-vmagent).
|
||||
* Can replicate collected metrics simultaneously to multiple remote storage systems.
|
||||
* Can accept data via all the ingestion protocols supported by VictoriaMetrics - see [these docs](#how-to-push-data-to-vmagent).
|
||||
* Can replicate collected metrics simultaneously to multiple remote storage systems -
|
||||
see [these docs](#replication-and-high-availability).
|
||||
* Works smoothly in environments with unstable connections to remote storage. If the remote storage is unavailable, the collected metrics
|
||||
are buffered at `-remoteWrite.tmpDataPath`. The buffered metrics are sent to remote storage as soon as the connection
|
||||
to the remote storage is repaired. The maximum disk usage for the buffer can be limited with `-remoteWrite.maxDiskUsagePerURL`.
|
||||
* Uses lower amounts of RAM, CPU, disk IO and network bandwidth compared with Prometheus.
|
||||
* Uses lower amounts of RAM, CPU, disk IO and network bandwidth than Prometheus.
|
||||
* Scrape targets can be spread among multiple `vmagent` instances when big number of targets must be scraped. See [these docs](#scraping-big-number-of-targets).
|
||||
* Can efficiently scrape targets that expose millions of time series such as [/federate endpoint in Prometheus](https://prometheus.io/docs/prometheus/latest/federation/). See [these docs](#stream-parsing-mode).
|
||||
* Can deal with [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues by limiting the number of unique time series at scrape time and before sending them to remote storage systems. See [these docs](#cardinality-limiter).
|
||||
* Can load scrape configs from multiple files. See [these docs](#loading-scrape-configs-from-multiple-files).
|
||||
* Can efficiently scrape targets that expose millions of time series such as [/federate endpoint in Prometheus](https://prometheus.io/docs/prometheus/latest/federation/).
|
||||
See [these docs](#stream-parsing-mode).
|
||||
* Can deal with [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)
|
||||
and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues by limiting the number of unique time series at scrape time
|
||||
and before sending them to remote storage systems. See [these docs](#cardinality-limiter).
|
||||
* Can write collected metrics to multiple tenants. See [these docs](#multitenancy).
|
||||
* Can read data from Kafka. See [these docs](#reading-metrics-from-kafka).
|
||||
* Can write data to Kafka. See [these docs](#writing-metrics-to-kafka).
|
||||
|
||||
## Quick Start
|
||||
|
||||
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) (`vmagent` is also available in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags)), unpack it and pass the following flags to the `vmagent` binary in order to start scraping Prometheus-compatible targets:
|
||||
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) (
|
||||
`vmagent` is also available in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags)),
|
||||
unpack it and pass the following flags to the `vmagent` binary in order to start scraping Prometheus-compatible targets:
|
||||
|
||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url. `vmagent` doesn't support some sections of Prometheus config file, so you may need either to delete these sections or to run `vmagent` with `-promscrape.config.strictParse=false` command-line flag, so `vmagent` ignores unsupported sections. See [the list of unsupported sections](#unsupported-prometheus-config-sections).
|
||||
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
|
||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`).
|
||||
The path can point either to local file or to http url. `vmagent` doesn't support some sections of Prometheus config file,
|
||||
so you may need either to delete these sections or to run `vmagent` with `-promscrape.config.strictParse=false` command-line flag.
|
||||
In this case `vmagent` ignores unsupported sections. See [the list of unsupported sections](#unsupported-prometheus-config-sections).
|
||||
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified
|
||||
multiple times to replicate data concurrently to an arbitrary number of remote storage systems. See [various use cases](#use-cases).
|
||||
|
||||
Example command line:
|
||||
|
||||
|
@ -46,7 +62,12 @@ Example command line:
|
|||
|
||||
See [how to scrape Prometheus-compatible targets](#how-to-collect-metrics-in-prometheus-format) for more details.
|
||||
|
||||
If you don't need to scrape Prometheus-compatible targets, then the `-promscrape.config` option isn't needed. For example, the following command is sufficient for accepting data via [supported "push"-based protocols](#how-to-push-data-to-vmagent) and sending it to the provided `-remoteWrite.url`:
|
||||
If you use single-node VictoriaMetrics, then you can discover and scrape Prometheus-compatible targets directly from VictoriaMetrics
|
||||
without the need to use `vmagent` - see [these docs](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
|
||||
If you don't need to scrape Prometheus-compatible targets, then the `-promscrape.config` option isn't needed.
|
||||
For example, the following command is sufficient for accepting data via [supported push-based protocols](#how-to-push-data-to-vmagent)
|
||||
and sending it to the provided `-remoteWrite.url`:
|
||||
|
||||
```console
|
||||
/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||
|
@ -58,7 +79,8 @@ Pass `-help` to `vmagent` in order to see [the full list of supported command-li
|
|||
|
||||
## How to push data to vmagent
|
||||
|
||||
`vmagent` supports [the same set of push-based data ingestion protocols as VictoriaMetrics does](https://docs.victoriametrics.com/#how-to-import-time-series-data) additionally to pull-based Prometheus-compatible targets' scraping:
|
||||
`vmagent` supports [the same set of push-based data ingestion protocols as VictoriaMetrics does](https://docs.victoriametrics.com/#how-to-import-time-series-data)
|
||||
additionally to pull-based Prometheus-compatible targets' scraping:
|
||||
|
||||
* DataDog "submit metrics" API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent).
|
||||
* InfluxDB line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
|
@ -73,10 +95,10 @@ Pass `-help` to `vmagent` in order to see [the full list of supported command-li
|
|||
## Configuration update
|
||||
|
||||
`vmagent` should be restarted in order to update config options set via command-line args.
|
||||
`vmagent` supports multiple approaches for reloading configs from updated config files such as
|
||||
`-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`:
|
||||
|
||||
`vmagent` supports multiple approaches for reloading configs from updated config files such as `-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`:
|
||||
|
||||
* Sending `SUGHUP` signal to `vmagent` process:
|
||||
* Sending `SIGHUP` signal to `vmagent` process:
|
||||
|
||||
```console
|
||||
kill -SIGHUP `pidof vmagent`
|
||||
|
@ -106,13 +128,16 @@ See [these docs](#how-to-collect-metrics-in-prometheus-format) for details.
|
|||
|
||||
### Flexible metrics relay
|
||||
|
||||
`vmagent` can accept metrics in [various popular data ingestion protocols](#how-to-push-data-to-vmagent), apply [relabeling](#relabeling) to the accepted metrics (for example, change metric names/labels or drop unneeded metrics) and then forward the relabeled metrics to other remote storage systems, which support Prometheus `remote_write` protocol (including other `vmagent` instances).
|
||||
`vmagent` can accept metrics in [various popular data ingestion protocols](#how-to-push-data-to-vmagent), apply [relabeling](#relabeling)
|
||||
to the accepted metrics (for example, change metric names/labels or drop unneeded metrics) and then forward the relabeled metrics
|
||||
to other remote storage systems, which support Prometheus `remote_write` protocol (including other `vmagent` instances).
|
||||
|
||||
### Replication and high availability
|
||||
|
||||
`vmagent` replicates the collected metrics among multiple remote storage instances configured via `-remoteWrite.url` args.
|
||||
If a single remote storage instance temporarily is out of service, then the collected data remains available in another remote storage instance.
|
||||
`vmagent` buffers the collected data in files at `-remoteWrite.tmpDataPath` until the remote storage becomes available again and then it sends the buffered data to the remote storage in order to prevent data gaps.
|
||||
`vmagent` buffers the collected data in files at `-remoteWrite.tmpDataPath` until the remote storage becomes available again
|
||||
and then it sends the buffered data to the remote storage in order to prevent data gaps.
|
||||
|
||||
### Relabeling and filtering
|
||||
|
||||
|
@ -136,7 +161,11 @@ Also, Basic Auth can be enabled for the incoming `remote_write` requests with `-
|
|||
|
||||
### remote_write for clustered version
|
||||
|
||||
While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes are always performed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html), `-remoteWrite.url` the command-line flag should be configured as `<schema>://<vminsert-host>:8480/insert/<accountID>/prometheus/api/v1/write` according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). There is also support for multitenant writes. See [these docs](#multitenancy).
|
||||
While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets,
|
||||
writes are always performed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html),
|
||||
the `-remoteWrite.url` command-line flag should be configured as `<schema>://<vminsert-host>:8480/insert/<accountID>/prometheus/api/v1/write`
|
||||
according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format).
|
||||
There is also support for multitenant writes. See [these docs](#multitenancy).
|
||||
|
||||
## Multitenancy
|
||||
|
||||
|
@ -144,12 +173,24 @@ By default `vmagent` collects the data without tenant identifiers and routes it
|
|||
|
||||
[VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) supports writing data to multiple tenants
|
||||
specified via special labels - see [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy-via-labels).
|
||||
This allows specifying tenant ids via [relabeling](#relabeling) and writing multitenant data to a single `-remoteWrite.url=http://<vminsert-addr>/insert/multitenant/api/v1/write`.
|
||||
This allows specifying tenant ids via [relabeling](#relabeling) and writing multitenant data
|
||||
to a single `-remoteWrite.url=http://<vminsert-addr>/insert/multitenant/prometheus/api/v1/write`.
|
||||
|
||||
[Multitenancy](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) support is enabled when `-remoteWrite.multitenantURL` command-line flag is set. In this case `vmagent` accepts multitenant data at `http://vmagent:8429/insert/<accountID>/...` in the same way as cluster version of VictoriaMetrics does according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and routes it to `<-remoteWrite.multitenantURL>/insert/<accountID>/prometheus/api/v1/write`. If multiple `-remoteWrite.multitenantURL` command-line options are set, then `vmagent` replicates the collected data across all the configured urls. This allows using a single `vmagent` instance in front of VictoriaMetrics clusters for processing the data from all the tenants.
|
||||
`vmagent` can accept data from the same multitenant endpoints as `vminsert` from [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html)
|
||||
does according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and route the accepted data
|
||||
to the corresponding [tenants](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) in VictoriaMetrics cluster
|
||||
pointed by the `-remoteWrite.multitenantURL` command-line flag. For example, if `-remoteWrite.multitenantURL` is set to `http://vminsert-service`,
|
||||
then `vmagent` would accept multitenant data at `http://vmagent:8429/insert/<accountID>/...` endpoints in the same way
|
||||
as [VictoriaMetrics cluster does](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and route
|
||||
it to `http://vminsert-service/insert/<accountID>/prometheus/api/v1/write`.
|
||||
|
||||
If `-remoteWrite.multitenantURL` command-line flag is set and `vmagent` is configured to scrape Prometheus-compatible targets (e.g. if `-promscrape.config` command-line flag is set)
|
||||
then `vmagent` reads tenantID from `__tenant_id__` label for the discovered targets and routes all the metrics from this target to the given `__tenant_id__`, e.g. to the url `<-remoteWrite.multitnenatURL>/insert/<__tenant_id__>/prometheus/api/v1/write`.
|
||||
If multiple `-remoteWrite.multitenantURL` command-line options are set, then `vmagent` replicates the collected data across all the configured urls.
|
||||
This allows using a single `vmagent` instance in front of multiple VictoriaMetrics clusters.
|
||||
|
||||
If `-remoteWrite.multitenantURL` command-line flag is set and `vmagent` is configured to scrape Prometheus-compatible targets
|
||||
(e.g. if `-promscrape.config` command-line flag is set) then `vmagent` reads tenantID from `__tenant_id__` label
|
||||
for the discovered targets and routes all the metrics from this target to the given `__tenant_id__`,
|
||||
e.g. to the url `<-remoteWrite.multitnenatURL>/insert/<__tenant_id__>/prometheus/api/v1/write`.
|
||||
|
||||
For example, the following relabeling rule instructs sending metrics to tenantID defined in the `prometheus.io/tenant` annotation of Kubernetes pod deployment:
|
||||
|
||||
|
@ -184,7 +225,8 @@ See [the list of supported service discovery types for Prometheus scrape targets
|
|||
|
||||
`vmagent` supports the following additional options in [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) section:
|
||||
|
||||
* `headers` - a list of HTTP headers to send to scrape target with each scrape request. This can be used when the scrape target needs custom authorization and authentication. For example:
|
||||
* `headers` - a list of HTTP headers to send to scrape target with each scrape request. This can be used when the scrape target
|
||||
needs custom authorization and authentication. For example:
|
||||
|
||||
```yaml
|
||||
scrape_configs:
|
||||
|
@ -194,11 +236,14 @@ scrape_configs:
|
|||
- "My-Auth: TopSecret"
|
||||
```
|
||||
|
||||
* `disable_compression: true` for disabling response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets for saving network bandwidth.
|
||||
* `disable_keepalive: true` for disabling [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) on a per-job basis. By default `vmagent` uses keep-alive connections to scrape targets for reducing overhead on connection re-establishing.
|
||||
* `disable_compression: true` for disabling response compression on a per-job basis. By default `vmagent` requests compressed responses
|
||||
from scrape targets for saving network bandwidth.
|
||||
* `disable_keepalive: true` for disabling [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection)
|
||||
on a per-job basis. By default `vmagent` uses keep-alive connections to scrape targets for reducing overhead on connection re-establishing.
|
||||
* `series_limit: N` for limiting the number of unique time series a single scrape target can expose. See [these docs](#cardinality-limiter).
|
||||
* `stream_parse: true` for scraping targets in a streaming manner. This may be useful when targets export big number of metrics. See [these docs](#stream-parsing-mode).
|
||||
* `scrape_align_interval: duration` for aligning scrapes to the given interval instead of using random offset in the range `[0 ... scrape_interval]` for scraping each target. The random offset helps spreading scrapes evenly in time.
|
||||
* `scrape_align_interval: duration` for aligning scrapes to the given interval instead of using random offset
|
||||
in the range `[0 ... scrape_interval]` for scraping each target. The random offset helps spreading scrapes evenly in time.
|
||||
* `scrape_offset: duration` for specifying the exact offset for scraping instead of using random offset in the range `[0 ... scrape_interval]`.
|
||||
* `relabel_debug: true` for enabling debug logging during relabeling of the discovered targets. See [these docs](#relabeling).
|
||||
* `metric_relabel_debug: true` for enabling debug logging during relabeling of the scraped metrics. See [these docs](#relabeling).
|
||||
|
@ -208,7 +253,10 @@ See [scrape_configs docs](https://docs.victoriametrics.com/sd_configs.html#scrap
|
|||
|
||||
## Loading scrape configs from multiple files
|
||||
|
||||
`vmagent` supports loading [scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||
`vmagent` supports loading [scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) from multiple files specified
|
||||
in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent`
|
||||
loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file
|
||||
and from `https://config-server/scrape_config.yml` url:
|
||||
|
||||
```yml
|
||||
scrape_config_files:
|
||||
|
@ -217,7 +265,8 @@ scrape_config_files:
|
|||
- https://config-server/scrape_config.yml
|
||||
```
|
||||
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||
There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
|
||||
```yml
|
||||
- job_name: foo
|
||||
|
@ -234,24 +283,34 @@ Every referred file can contain arbitrary number of [supported scrape configs](h
|
|||
|
||||
`vmagent` doesn't support the following sections in Prometheus config file passed to `-promscrape.config` command-line flag:
|
||||
|
||||
* [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This section is substituted with various `-remoteWrite*` command-line flags. See [the full list of flags](#advanced-usage). The `remote_write` section isn't supported in order to reduce possible confusion when `vmagent` is used for accepting incoming metrics via [supported push protocols](#how-to-push-data-to-vmagent). In this case the `-promscrape.config` file isn't needed.
|
||||
* `remote_read`. This section isn't supported at all, since `vmagent` doesn't provide Prometheus querying API. It is expected that the querying API is provided by the remote storage specified via `-remoteWrite.url` such as VictoriaMetrics. See [Prometheus querying API docs for VictoriaMetrics](https://docs.victoriametrics.com/#prometheus-querying-api-usage).
|
||||
* [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This section is substituted
|
||||
with various `-remoteWrite*` command-line flags. See [the full list of flags](#advanced-usage). The `remote_write` section isn't supported
|
||||
in order to reduce possible confusion when `vmagent` is used for accepting incoming metrics via [supported push protocols](#how-to-push-data-to-vmagent).
|
||||
In this case the `-promscrape.config` file isn't needed.
|
||||
* `remote_read`. This section isn't supported at all, since `vmagent` doesn't provide Prometheus querying API.
|
||||
It is expected that the querying API is provided by the remote storage specified via `-remoteWrite.url` such as VictoriaMetrics.
|
||||
See [Prometheus querying API docs for VictoriaMetrics](https://docs.victoriametrics.com/#prometheus-querying-api-usage).
|
||||
* `rule_files` and `alerting`. These sections are supported by [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
||||
|
||||
The list of supported service discovery types is available [here](#how-to-collect-metrics-in-prometheus-format).
|
||||
|
||||
Additionally `vmagent` doesn't support `refresh_interval` option at service discovery sections. This option is substituted with `-promscrape.*CheckInterval` command-line options, which are specific per each service discovery type. See [the full list of command-line flags for vmagent](#advanced-usage).
|
||||
Additionally `vmagent` doesn't support `refresh_interval` option at service discovery sections.
|
||||
This option is substituted with `-promscrape.*CheckInterval` command-line options, which are specific per each service discovery type.
|
||||
See [the full list of command-line flags for vmagent](#advanced-usage).
|
||||
|
||||
## Adding labels to metrics
|
||||
|
||||
Extra labels can be added to metrics collected by `vmagent` via the following mechanisms:
|
||||
|
||||
* The `global -> external_labels` section in `-promscrape.config` file. These labels are added only to metrics scraped from targets configured in the `-promscrape.config` file. They aren't added to metrics collected via other [data ingestion protocols](#how-to-push-data-to-vmagent).
|
||||
* The `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`. For example, the following command starts `vmagent`, which adds `{datacenter="foobar"}` label to all the metrics pushed to all the configured remote storage systems (all the `-remoteWrite.url` flag values):
|
||||
* The `global -> external_labels` section in `-promscrape.config` file. These labels are added only to metrics scraped from targets configured
|
||||
in the `-promscrape.config` file. They aren't added to metrics collected via other [data ingestion protocols](#how-to-push-data-to-vmagent).
|
||||
* The `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`.
|
||||
For example, the following command starts `vmagent`, which adds `{datacenter="foobar"}` label to all the metrics pushed
|
||||
to all the configured remote storage systems (all the `-remoteWrite.url` flag values):
|
||||
|
||||
```
|
||||
/path/to/vmagent -remoteWrite.label=datacenter=foobar ...
|
||||
```
|
||||
```
|
||||
/path/to/vmagent -remoteWrite.label=datacenter=foobar ...
|
||||
```
|
||||
|
||||
* Via relabeling. See [these docs](#relabeling).
|
||||
|
||||
|
@ -260,59 +319,87 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
|
|||
|
||||
`vmagent` automatically generates the following metrics per each scrape of every [Prometheus-compatible target](#how-to-collect-metrics-in-prometheus-format):
|
||||
|
||||
* `up` - this metric exposes `1` value on successful scrape and `0` value on unsuccessful scrape. This allows monitoring failing scrapes with the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html):
|
||||
* `up` - this metric exposes `1` value on successful scrape and `0` value on unsuccessful scrape. This allows monitoring
|
||||
failing scrapes with the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html):
|
||||
|
||||
```metricsql
|
||||
up == 0
|
||||
```
|
||||
|
||||
* `scrape_duration_seconds` - the duration of the scrape for the given target. This allows monitoring slow scrapes. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns scrapes, which take more than 1.5 seconds to complete:
|
||||
* `scrape_duration_seconds` - the duration of the scrape for the given target. This allows monitoring slow scrapes.
|
||||
For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns scrapes,
|
||||
which take more than 1.5 seconds to complete:
|
||||
|
||||
```metricsql
|
||||
scrape_duration_seconds > 1.5
|
||||
```
|
||||
|
||||
* `scrape_timeout_seconds` - the configured timeout for the current scrape target (aka `scrape_timeout`). This allows detecting targets with scrape durations close to the configured scrape timeout. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets (identified by `instance` label), which take more than 80% of the configured `scrape_timeout` during scrapes:
|
||||
* `scrape_timeout_seconds` - the configured timeout for the current scrape target (aka `scrape_timeout`).
|
||||
This allows detecting targets with scrape durations close to the configured scrape timeout.
|
||||
For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets (identified by `instance` label),
|
||||
which take more than 80% of the configured `scrape_timeout` during scrapes:
|
||||
|
||||
```metricsql
|
||||
scrape_duration_seconds / scrape_timeout_seconds > 0.8
|
||||
```
|
||||
|
||||
* `scrape_samples_scraped` - the number of samples (aka metrics) parsed per each scrape. This allows detecting targets, which expose too many metrics. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets, which expose more than 10000 metrics:
|
||||
* `scrape_samples_scraped` - the number of samples (aka metrics) parsed per each scrape. This allows detecting targets,
|
||||
which expose too many metrics. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html)
|
||||
returns targets, which expose more than 10000 metrics:
|
||||
|
||||
```metricsql
|
||||
scrape_samples_scraped > 10000
|
||||
```
|
||||
|
||||
* `scrape_samples_limit` - the configured limit on the number of metrics the given target can expose. The limit can be set via `sample_limit` option at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs). This metric is exposed only if the `sample_limit` is set. This allows detecting targets, which expose too many metrics compared to the configured `sample_limit`. For example, the following query returns targets (identified by `instance` label), which expose more than 80% metrics compared to the configed `sample_limit`:
|
||||
* `scrape_samples_limit` - the configured limit on the number of metrics the given target can expose.
|
||||
The limit can be set via `sample_limit` option at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||
This metric is exposed only if the `sample_limit` is set. This allows detecting targets,
|
||||
which expose too many metrics compared to the configured `sample_limit`. For example, the following query
|
||||
returns targets (identified by `instance` label), which expose more than 80% metrics compared to the configed `sample_limit`:
|
||||
|
||||
```metricsql
|
||||
scrape_samples_scraped / scrape_samples_limit > 0.8
|
||||
```
|
||||
|
||||
* `scrape_samples_post_metric_relabeling` - the number of samples (aka metrics) left after applying metric-level relabeling from `metric_relabel_configs` section (see [relabeling docs](#relabeling) for more details). This allows detecting targets with too many metrics after the relabeling. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets with more than 10000 metrics after the relabeling:
|
||||
* `scrape_samples_post_metric_relabeling` - the number of samples (aka metrics) left after applying metric-level relabeling
|
||||
from `metric_relabel_configs` section (see [relabeling docs](#relabeling) for more details).
|
||||
This allows detecting targets with too many metrics after the relabeling.
|
||||
For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets
|
||||
with more than 10000 metrics after the relabeling:
|
||||
|
||||
```metricsql
|
||||
scrape_samples_post_metric_relabeling > 10000
|
||||
```
|
||||
|
||||
* `scrape_series_added` - **an approximate** number of new series the given target generates during the current scrape. This metric allows detecting targets (identified by `instance` label), which lead to [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets, which generate more than 1000 new series during the last hour:
|
||||
* `scrape_series_added` - **an approximate** number of new series the given target generates during the current scrape.
|
||||
This metric allows detecting targets (identified by `instance` label),
|
||||
which lead to [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets,
|
||||
which generate more than 1000 new series during the last hour:
|
||||
|
||||
```metricsql
|
||||
sum_over_time(scrape_series_added[1h]) > 1000
|
||||
```
|
||||
|
||||
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line option (e.g. when [staleness markers](#prometheus-staleness-markers) are disabled).
|
||||
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line option
|
||||
(e.g. when [staleness markers](#prometheus-staleness-markers) are disabled).
|
||||
|
||||
* `scrape_series_limit` - the limit on the number of unique time series the given target can expose according to [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). This metric is exposed only if the series limit is set.
|
||||
* `scrape_series_limit` - the limit on the number of unique time series the given target can expose according to [these docs](#cardinality-limiter).
|
||||
This metric is exposed only if the series limit is set.
|
||||
|
||||
* `scrape_series_current` - the number of unique series the given target exposed so far. This metric is exposed only if the series limit is set according to [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). This metric allows alerting when the number of exposed series by the given target reaches the limit. For example, the following query would alert when the target exposes more than 90% of unique series compared to the configured limit.
|
||||
* `scrape_series_current` - the number of unique series the given target exposed so far.
|
||||
This metric is exposed only if the series limit is set according to [these docs](#cardinality-limiter).
|
||||
This metric allows alerting when the number of exposed series by the given target reaches the limit.
|
||||
For example, the following query would alert when the target exposes more than 90% of unique series compared to the configured limit.
|
||||
|
||||
```metricsql
|
||||
scrape_series_current / scrape_series_limit > 0.9
|
||||
```
|
||||
|
||||
* `scrape_series_limit_samples_dropped` - exposes the number of dropped samples during the scrape because of the exceeded limit on the number of unique series. This metric is exposed only if the series limit is set according to [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). This metric allows alerting when scraped samples are dropped because of the exceeded limit. For example, the following query alerts when at least a single sample is dropped because of the exceeded limit during the last hour:
|
||||
* `scrape_series_limit_samples_dropped` - exposes the number of dropped samples during the scrape because of the exceeded limit
|
||||
on the number of unique series. This metric is exposed only if the series limit is set according to [these docs](#cardinality-limiter).
|
||||
This metric allows alerting when scraped samples are dropped because of the exceeded limit.
|
||||
For example, the following query alerts when at least a single sample is dropped because of the exceeded limit during the last hour:
|
||||
|
||||
```metricsql
|
||||
sum_over_time(scrape_series_limit_samples_dropped[1h]) > 0
|
||||
|
@ -321,14 +408,36 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
|
|||
|
||||
## Relabeling
|
||||
|
||||
VictoriaMetrics components support [Prometheus-compatible relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) with [additional enhancements](#relabeling-enhancements) at various stages of data processing. The relabeling can be defined in the following places processed by `vmagent`:
|
||||
VictoriaMetrics components support [Prometheus-compatible relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
|
||||
with [additional enhancements](#relabeling-enhancements). The relabeling can be defined in the following places processed by `vmagent`:
|
||||
|
||||
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is used for modifying labels in discovered targets and for dropping unneded targets. This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
||||
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is used for modifying labels in scraped metrics and for dropping unneeded metrics. This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is used for modifying labels for all the collected metrics (inluding [metrics obtained via push-based protocols](#how-to-push-data-to-vmagent)) and for dropping unneeded metrics before sending them to all the configured `-remoteWrite.url` addresses. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is used for modifying labels for metrics and for dropping unneeded metrics before sending them to a particular `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file.
|
||||
This relabeling is used for modifying labels in discovered targets and for dropping unneded targets.
|
||||
See [relabeling cookbook](https://docs.victoriametrics.com/relabeling.html) for details.
|
||||
|
||||
All the files with relabeling configs can contain special placeholders in the form `%{ENV_VAR}`, which are replaced by the corresponding environment variable values.
|
||||
This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section.
|
||||
In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
||||
|
||||
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file.
|
||||
This relabeling is used for modifying labels in scraped metrics and for dropping unneeded metrics.
|
||||
See [relabeling cookbook](https://docs.victoriametrics.com/relabeling.html) for details.
|
||||
|
||||
This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section.
|
||||
In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
||||
|
||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is used for modifying labels for all the collected metrics
|
||||
(inluding [metrics obtained via push-based protocols](#how-to-push-data-to-vmagent)) and for dropping unneeded metrics
|
||||
before sending them to all the configured `-remoteWrite.url` addresses.
|
||||
This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`.
|
||||
In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||
|
||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is used for modifying labels for metrics
|
||||
and for dropping unneeded metrics before sending them to a particular `-remoteWrite.url`.
|
||||
This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`.
|
||||
In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||
|
||||
All the files with relabeling configs can contain special placeholders in the form `%{ENV_VAR}`,
|
||||
which are replaced by the corresponding environment variable values.
|
||||
|
||||
The following articles contain useful information about Prometheus relabeling:
|
||||
|
||||
|
@ -345,7 +454,11 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
|
||||
## Relabeling enhancements
|
||||
|
||||
* The `replacement` option can refer arbitrary labels via {% raw %}`{{label_name}}`{% endraw %} placeholders. Such placeholders are substituted with the corresponding label value. For example, the following relabeling rule sets `instance-job` label value to `host123-foo` when applied to the metric with `{instance="host123",job="foo"}` labels:
|
||||
`vmagent` provides the following enhancements on top of Prometheus-compatible relabeling:
|
||||
|
||||
* The `replacement` option can refer arbitrary labels via {% raw %}`{{label_name}}`{% endraw %} placeholders.
|
||||
Such placeholders are substituted with the corresponding label value. For example, the following relabeling rule
|
||||
sets `instance-job` label value to `host123-foo` when applied to the metric with `{instance="host123",job="foo"}` labels:
|
||||
|
||||
{% raw %}
|
||||
```yaml
|
||||
|
@ -354,11 +467,13 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
```
|
||||
{% endraw %}
|
||||
|
||||
* An optional `if` filter can be used for conditional relabeling. The `if` filter may contain arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). For example, the following relabeling rule drops metrics, which don't match `foo{bar="baz"}` series selector, while leaving the rest of metrics:
|
||||
* An optional `if` filter can be used for conditional relabeling. The `if` filter may contain
|
||||
arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors).
|
||||
For example, the following relabeling rule drops metrics, which don't match `foo{bar="baz"}` series selector, while leaving the rest of metrics:
|
||||
|
||||
```yaml
|
||||
- action: keep
|
||||
if: 'foo{bar="baz"}'
|
||||
- if: 'foo{bar="baz"}'
|
||||
action: keep
|
||||
```
|
||||
|
||||
This is equivalent to less clear Prometheus-compatible relabeling rule:
|
||||
|
@ -369,7 +484,8 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
regex: 'foo;baz'
|
||||
```
|
||||
|
||||
* The `regex` value can be split into multiple lines for improved readability and maintainability. These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
* The `regex` value can be split into multiple lines for improved readability and maintainability.
|
||||
These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
|
@ -384,9 +500,12 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
- "foo_.+"
|
||||
```
|
||||
|
||||
* VictoriaMetrics provides the following additional relabeling actions on top of standard actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
* VictoriaMetrics provides the following additional relabeling actions on top of standard actions
|
||||
from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
|
||||
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
|
||||
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement`
|
||||
and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences
|
||||
of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
|
||||
|
||||
```yaml
|
||||
- action: replace_all
|
||||
|
@ -396,7 +515,9 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
replacement: "_"
|
||||
```
|
||||
|
||||
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurrences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
|
||||
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`.
|
||||
For example, the following relabeling config replaces all the occurrences of `-` char in all the label names
|
||||
with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
|
||||
|
||||
```yaml
|
||||
- action: labelmap_all
|
||||
|
@ -404,28 +525,35 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
replacement: "_"
|
||||
```
|
||||
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal, while dropping all the other entries. For example, the following relabeling config keeps targets if they contain equal values for `instance` and `host` labels, while dropping all the other targets:
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal,
|
||||
while dropping all the other entries. For example, the following relabeling config keeps targets
|
||||
if they contain equal values for `instance` and `host` labels, while dropping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: keep_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal, while keeping all the other entries. For example, the following relabeling config drops targets if they contain equal values for `instance` and `host` labels, while keeping all the other targets:
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal,
|
||||
while keeping all the other entries. For example, the following relabeling config drops targets
|
||||
if they contain equal values for `instance` and `host` labels, while keeping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: drop_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`, while dropping all the other metrics. For example, the following relabeling config keeps metrics with `fo` and `bar` names, while dropping all the other metrics:
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`,
|
||||
while dropping all the other metrics. For example, the following relabeling config keeps metrics
|
||||
with `fo` and `bar` names, while dropping all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "foo|bar"
|
||||
```
|
||||
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`, while keeping all the other metrics. For example, the following relabeling config drops metrics with `foo` and `bar` names, while leaving all the other metrics:
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`, while keeping all the other metrics.
|
||||
For example, the following relabeling config drops metrics with `foo` and `bar` names, while leaving all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: drop_metrics
|
||||
|
@ -475,17 +603,32 @@ Additionally, the `action: graphite` relabeling rules usually work much faster t
|
|||
* If the scrape target becomes temporarily unavailable, then stale markers are sent for all the metrics scraped from this target.
|
||||
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
|
||||
|
||||
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series) for details.
|
||||
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target
|
||||
in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers`
|
||||
command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series
|
||||
per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](#automatically-generated-metrics) for details.
|
||||
|
||||
## Stream parsing mode
|
||||
|
||||
By default `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics.
|
||||
By default `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling)
|
||||
and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases
|
||||
when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory
|
||||
when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode.
|
||||
When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk
|
||||
and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics.
|
||||
|
||||
Stream parsing mode is automatically enabled for scrape targets returning response bodies with sizes bigger than the `-promscrape.minResponseSizeForStreamParse` command-line flag value. Additionally, the stream parsing mode can be explicitly enabled in the following places:
|
||||
Stream parsing mode is automatically enabled for scrape targets returning response bodies with sizes bigger than
|
||||
the `-promscrape.minResponseSizeForStreamParse` command-line flag value. Additionally,
|
||||
stream parsing mode can be explicitly enabled in the following places:
|
||||
|
||||
* Via `-promscrape.streamParse` command-line flag. In this case all the scrape targets defined in the file pointed by `-promscrape.config` are scraped in stream parsing mode.
|
||||
* Via `stream_parse: true` option at `scrape_configs` section. In this case all the scrape targets defined in this section are scraped in stream parsing mode.
|
||||
* Via `__stream_parse__=true` label, which can be set via [relabeling](#relabeling) at `relabel_configs` section. In this case stream parsing mode is enabled for the corresponding scrape targets. Typical use case: to set the label via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets exposing big number of metrics.
|
||||
* Via `-promscrape.streamParse` command-line flag. In this case all the scrape targets defined
|
||||
in the file pointed by `-promscrape.config` are scraped in stream parsing mode.
|
||||
* Via `stream_parse: true` option at `scrape_configs` section. In this case all the scrape targets defined
|
||||
in this section are scraped in stream parsing mode.
|
||||
* Via `__stream_parse__=true` label, which can be set via [relabeling](#relabeling) at `relabel_configs` section.
|
||||
In this case stream parsing mode is enabled for the corresponding scrape targets.
|
||||
Typical use case: to set the label via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
|
||||
for targets exposing big number of metrics.
|
||||
|
||||
Examples:
|
||||
|
||||
|
@ -503,7 +646,8 @@ scrape_configs:
|
|||
'match[]': ['{__name__!=""}']
|
||||
```
|
||||
|
||||
Note that `sample_limit` and `series_limit` options cannot be used in stream parsing mode because the parsed data is pushed to remote storage as soon as it is parsed.
|
||||
Note that `sample_limit` and `series_limit` [scrape_config options](https://docs.victoriametrics.com/sd_configs.html#scrape_configs)
|
||||
cannot be used in stream parsing mode because the parsed data is pushed to remote storage as soon as it is parsed.
|
||||
|
||||
## Scraping big number of targets
|
||||
|
||||
|
@ -519,7 +663,8 @@ spread scrape targets among a cluster of two `vmagent` instances:
|
|||
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
||||
```
|
||||
|
||||
The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes. The pod name must end with a number in the range `0 ... promscrape.cluster.memberNum-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`.
|
||||
The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes.
|
||||
The pod name must end with a number in the range `0 ... promscrape.cluster.memberNum-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`.
|
||||
|
||||
By default each scrape target is scraped only by a single `vmagent` instance in the cluster. If there is a need for replicating scrape targets among multiple `vmagent` instances,
|
||||
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
|
||||
|
@ -589,9 +734,14 @@ scrape_configs:
|
|||
|
||||
By default `vmagent` doesn't limit the number of time series each scrape target can expose. The limit can be enforced in the following places:
|
||||
|
||||
* Via `-promscrape.seriesLimitPerTarget` command-line option. This limit is applied individually to all the scrape targets defined in the file pointed by `-promscrape.config`.
|
||||
* Via `series_limit` config option at `scrape_config` section. This limit is applied individually to all the scrape targets defined in the given `scrape_config`.
|
||||
* Via `__series_limit__` label, which can be set with [relabeling](#relabeling) at `relabel_configs` section. This limit is applied to the corresponding scrape targets. Typical use case: to set the limit via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets, which may expose too high number of time series.
|
||||
* Via `-promscrape.seriesLimitPerTarget` command-line option. This limit is applied individually
|
||||
to all the scrape targets defined in the file pointed by `-promscrape.config`.
|
||||
* Via `series_limit` config option at `scrape_config` section. This limit is applied individually
|
||||
to all the scrape targets defined in the given `scrape_config`.
|
||||
* Via `__series_limit__` label, which can be set with [relabeling](#relabeling) at `relabel_configs` section.
|
||||
This limit is applied to the corresponding scrape targets. Typical use case: to set the limit
|
||||
via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets,
|
||||
which may expose too high number of time series.
|
||||
|
||||
See also `sample_limit` option at [scrape_config section](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||
|
||||
|
@ -611,12 +761,16 @@ These metrics allow building the following alerting rules:
|
|||
- `sum_over_time(scrape_series_limit_samples_dropped[1h]) > 0` - alerts when some samples are dropped because the series limit on a particular target is reached.
|
||||
|
||||
|
||||
By default `vmagent` doesn't limit the number of time series written to remote storage systems specified at `-remoteWrite.url`. The limit can be enforced by setting the following command-line flags:
|
||||
By default `vmagent` doesn't limit the number of time series written to remote storage systems specified at `-remoteWrite.url`.
|
||||
The limit can be enforced by setting the following command-line flags:
|
||||
|
||||
* `-remoteWrite.maxHourlySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last hour. Useful for limiting the number of active time series.
|
||||
* `-remoteWrite.maxDailySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last day. Useful for limiting daily churn rate.
|
||||
* `-remoteWrite.maxHourlySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last hour.
|
||||
Useful for limiting the number of active time series.
|
||||
* `-remoteWrite.maxDailySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last day.
|
||||
Useful for limiting daily churn rate.
|
||||
|
||||
Both limits can be set simultaneously. If any of these limits is reached, then samples for new time series are dropped instead of sending them to remote storage systems. A sample of dropped series is put in the log with `WARNING` level.
|
||||
Both limits can be set simultaneously. If any of these limits is reached, then samples for new time series are dropped instead of sending
|
||||
them to remote storage systems. A sample of dropped series is put in the log with `WARNING` level.
|
||||
|
||||
`vmagent` exposes the following metrics at `http://vmagent:8429/metrics` page (see [monitoring docs](#monitoring) for details):
|
||||
|
||||
|
@ -633,21 +787,25 @@ See also [cardinality explorer docs](https://docs.victoriametrics.com/#cardinali
|
|||
|
||||
## Monitoring
|
||||
|
||||
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. We recommend setting up regular scraping of this page
|
||||
either through `vmagent` itself or by Prometheus so that the exported metrics may be analyzed later.
|
||||
Use official [Grafana dashboard](https://grafana.com/grafana/dashboards/12683) for `vmagent` state overview. Graphs on this dashboard contain useful hints - hover the `i` icon at the top left corner of each graph in order to read it.
|
||||
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page.
|
||||
We recommend setting up regular scraping of this page either through `vmagent` itself or by Prometheus
|
||||
so that the exported metrics may be analyzed later.
|
||||
|
||||
Use official [Grafana dashboard](https://grafana.com/grafana/dashboards/12683) for `vmagent` state overview.
|
||||
Graphs on this dashboard contain useful hints - hover the `i` icon at the top left corner of each graph in order to read it.
|
||||
If you have suggestions for improvements or have found a bug - please open an issue on github or add a review to the dashboard.
|
||||
|
||||
`vmagent` also exports the status for various targets at the following handlers:
|
||||
`vmagent` also exports the status for various targets at the following pages:
|
||||
|
||||
* `http://vmagent-host:8429/targets`. This handler returns human-readable status for every active target.
|
||||
This page is easy to query from the command line with `wget`, `curl` or similar tools.
|
||||
It accepts optional `show_original_labels=1` query arg which shows the original labels per each target before applying the relabeling.
|
||||
This information may be useful for debugging target relabeling.
|
||||
* `http://vmagent-host:8429/api/v1/targets`. This handler returns data compatible with [the corresponding page from Prometheus API](https://prometheus.io/docs/prometheus/latest/querying/api/#targets).
|
||||
|
||||
* `http://vmagent-host:8429/ready`. This handler returns http 200 status code when `vmagent` finishes it's initialization for all service_discovery configs.
|
||||
It may be useful to perform `vmagent` rolling update without any scrape loss.
|
||||
* `http://vmagent-host:8429/targets`. This pages shows the current status for every active target.
|
||||
* `http://vmagent-host:8429/service-discovery`. This pages shows the list of discovered targets with the discovered `__meta_*` labels
|
||||
according to [these docs](https://docs.victoriametrics.com/sd_configs.html).
|
||||
This page may help debugging target [relabeling](#relabeling).
|
||||
* `http://vmagent-host:8429/api/v1/targets`. This handler returns JSON response
|
||||
compatible with [the corresponding page from Prometheus API](https://prometheus.io/docs/prometheus/latest/querying/api/#targets).
|
||||
* `http://vmagent-host:8429/ready`. This handler returns http 200 status code when `vmagent` finishes
|
||||
it's initialization for all the [service_discovery configs](https://docs.victoriametrics.com/sd_configs.html).
|
||||
It may be useful to perform `vmagent` rolling update without any scrape loss.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
@ -660,24 +818,40 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
|
|||
* Disabling staleness tracking with `-promscrape.noStaleMarkers` option. See [these docs](#prometheus-staleness-markers).
|
||||
* Enabling stream parsing mode if `vmagent` scrapes targets with millions of metrics per target. See [these docs](#stream-parsing-mode).
|
||||
* Reducing the number of output queues with `-remoteWrite.queues` command-line option.
|
||||
* Reducing the amounts of RAM vmagent can use for in-memory buffering with `-memory.allowedPercent` or `-memory.allowedBytes` command-line option. Another option is to reduce memory limits in Docker and/or Kubernetes if `vmagent` runs under these systems.
|
||||
* Reducing the number of CPU cores vmagent can use by passing `GOMAXPROCS=N` environment variable to `vmagent`, where `N` is the desired limit on CPU cores. Another option is to reduce CPU limits in Docker or Kubernetes if `vmagent` runs under these systems.
|
||||
* Passing `-promscrape.dropOriginalLabels` command-line option to `vmagent`, so it drops `"discoveredLabels"` and `"droppedTargets"` lists at `/api/v1/targets` page. This reduces memory usage when scraping big number of targets at the cost of reduced debuggability for improperly configured per-target relabeling.
|
||||
* Reducing the amounts of RAM vmagent can use for in-memory buffering with `-memory.allowedPercent` or `-memory.allowedBytes` command-line option.
|
||||
Another option is to reduce memory limits in Docker and/or Kubernetes if `vmagent` runs under these systems.
|
||||
* Reducing the number of CPU cores vmagent can use by passing `GOMAXPROCS=N` environment variable to `vmagent`,
|
||||
where `N` is the desired limit on CPU cores. Another option is to reduce CPU limits in Docker or Kubernetes if `vmagent` runs under these systems.
|
||||
* Passing `-promscrape.dropOriginalLabels` command-line option to `vmagent`, so it drops `"discoveredLabels"` and `"droppedTargets"`
|
||||
lists at `/api/v1/targets` page. This reduces memory usage when scraping big number of targets at the cost
|
||||
of reduced debuggability for improperly configured per-target relabeling.
|
||||
|
||||
* When `vmagent` scrapes many unreliable targets, it can flood the error log with scrape errors. These errors can be suppressed
|
||||
by passing `-promscrape.suppressScrapeErrors` command-line flag to `vmagent`. The most recent scrape error per each target can be observed at `http://vmagent-host:8429/targets`
|
||||
and `http://vmagent-host:8429/api/v1/targets`.
|
||||
|
||||
* The `/api/v1/targets` page could be useful for debugging relabeling process for scrape targets.
|
||||
This page contains original labels for targets dropped during relabeling (see "droppedTargets" section in the page output). By default the `-promscrape.maxDroppedTargets` targets are shown here. If your setup drops more targets during relabeling, then increase `-promscrape.maxDroppedTargets` command-line flag value to see all the dropped targets. Note that tracking each dropped target requires up to 10Kb of RAM. Therefore big values for `-promscrape.maxDroppedTargets` may result in increased memory usage if a big number of scrape targets are dropped during relabeling.
|
||||
* The `/service-discovery` page could be useful for debugging relabeling process for scrape targets.
|
||||
This page contains original labels for targets dropped during relabeling.
|
||||
By default the `-promscrape.maxDroppedTargets` targets are shown here. If your setup drops more targets during relabeling,
|
||||
then increase `-promscrape.maxDroppedTargets` command-line flag value to see all the dropped targets.
|
||||
Note that tracking each dropped target requires up to 10Kb of RAM. Therefore big values for `-promscrape.maxDroppedTargets`
|
||||
may result in increased memory usage if a big number of scrape targets are dropped during relabeling.
|
||||
|
||||
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported at `http://vmagent-host:8429/metrics` page grows constantly. It is also recommended increasing `-remoteWrite.maxBlockSize` and `-remoteWrite.maxRowsPerBlock` command-line options in this case. This can improve data ingestion performance to the configured remote storage systems at the cost of higher memory usage.
|
||||
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported
|
||||
at `http://vmagent-host:8429/metrics` page grows constantly. It is also recommended increasing `-remoteWrite.maxBlockSize`
|
||||
and `-remoteWrite.maxRowsPerBlock` command-line options in this case. This can improve data ingestion performance
|
||||
to the configured remote storage systems at the cost of higher memory usage.
|
||||
|
||||
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set, try increasing `-remoteWrite.queues`. Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage. Therefore it starts dropping the buffered data if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
|
||||
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set,
|
||||
try increasing `-remoteWrite.queues`. Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage.
|
||||
Therefore it starts dropping the buffered data if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
|
||||
|
||||
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses. The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
|
||||
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses.
|
||||
The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
|
||||
|
||||
* Use `-remoteWrite.queues=1` when `-remoteWrite.url` points to remote storage, which doesn't accept out-of-order samples (aka data backfilling). Such storage systems include Prometheus, Cortex and Thanos, which typically emit `out of order sample` errors. The best solution is to use remote storage with [backfilling support](https://docs.victoriametrics.com/#backfilling).
|
||||
* Use `-remoteWrite.queues=1` when `-remoteWrite.url` points to remote storage, which doesn't accept out-of-order samples (aka data backfilling).
|
||||
Such storage systems include Prometheus, Cortex and Thanos, which typically emit `out of order sample` errors.
|
||||
The best solution is to use remote storage with [backfilling support](https://docs.victoriametrics.com/#backfilling) such as VictoriaMetrics.
|
||||
|
||||
* `vmagent` buffers scraped data at the `-remoteWrite.tmpDataPath` directory until it is sent to `-remoteWrite.url`.
|
||||
The directory can grow large when remote storage is unavailable for extended periods of time and if `-remoteWrite.maxDiskUsagePerURL` isn't set.
|
||||
|
@ -735,25 +909,33 @@ See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting
|
|||
* [Reading metrics from Kafka](#reading-metrics-from-kafka)
|
||||
* [Writing metrics to Kafka](#writing-metrics-to-kafka)
|
||||
|
||||
The enterprise version of vmagent is available for evaluation at [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page in `vmutils-*-enteprise.tar.gz` archives and in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||
The enterprise version of vmagent is available for evaluation at [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page
|
||||
in `vmutils-...-enteprise.tar.gz` archives and in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||
|
||||
### Reading metrics from Kafka
|
||||
|
||||
[Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` can read metrics in various formats from Kafka messages. These formats can be configured with `-kafka.consumer.topic.defaultFormat` or `-kafka.consumer.topic.format` command-line options. The following formats are supported:
|
||||
[Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` can read metrics in various formats from Kafka messages.
|
||||
These formats can be configured with `-kafka.consumer.topic.defaultFormat` or `-kafka.consumer.topic.format` command-line options. The following formats are supported:
|
||||
|
||||
* `promremotewrite` - [Prometheus remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). Messages in this format can be sent by vmagent - see [these docs](#writing-metrics-to-kafka).
|
||||
* `promremotewrite` - [Prometheus remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write).
|
||||
Messages in this format can be sent by vmagent - see [these docs](#writing-metrics-to-kafka).
|
||||
* `influx` - [InfluxDB line protocol format](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/).
|
||||
* `prometheus` - [Prometheus text exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) and [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md).
|
||||
* `prometheus` - [Prometheus text exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format)
|
||||
and [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md).
|
||||
* `graphite` - [Graphite plaintext format](https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol).
|
||||
* `jsonline` - [JSON line format](https://docs.victoriametrics.com/#how-to-import-data-in-json-line-format).
|
||||
|
||||
Every Kafka message may contain multiple lines in `influx`, `prometheus`, `graphite` and `jsonline` format delimited by `\n`.
|
||||
|
||||
`vmagent` consumes messages from Kafka topics specified by `-kafka.consumer.topic` command-line flag. Multiple topics can be specified by passing multiple `-kafka.consumer.topic` command-line flags to `vmagent`.
|
||||
`vmagent` consumes messages from Kafka topics specified by `-kafka.consumer.topic` command-line flag. Multiple topics can be specified
|
||||
by passing multiple `-kafka.consumer.topic` command-line flags to `vmagent`.
|
||||
|
||||
`vmagent` consumes messages from Kafka brokers specified by `-kafka.consumer.topic.brokers` command-line flag. Multiple brokers can be specified per each `-kafka.consumer.topic` by passing a list of brokers delimited by `;`. For example, `-kafka.consumer.topic.brokers=host1:9092;host2:9092`.
|
||||
`vmagent` consumes messages from Kafka brokers specified by `-kafka.consumer.topic.brokers` command-line flag.
|
||||
Multiple brokers can be specified per each `-kafka.consumer.topic` by passing a list of brokers delimited by `;`.
|
||||
For example, `-kafka.consumer.topic.brokers=host1:9092;host2:9092`.
|
||||
|
||||
The following command starts `vmagent`, which reads metrics in InfluxDB line protocol format from Kafka broker at `localhost:9092` from the topic `metrics-by-telegraf` and sends them to remote storage at `http://localhost:8428/api/v1/write`:
|
||||
The following command starts `vmagent`, which reads metrics in InfluxDB line protocol format from Kafka broker at `localhost:9092`
|
||||
from the topic `metrics-by-telegraf` and sends them to remote storage at `http://localhost:8428/api/v1/write`:
|
||||
|
||||
```console
|
||||
./bin/vmagent -remoteWrite.url=http://localhost:8428/api/v1/write \
|
||||
|
@ -774,7 +956,9 @@ data_format = "influx"
|
|||
|
||||
#### Command-line flags for Kafka consumer
|
||||
|
||||
These command-line flags are available only in [enterprise](https://victoriametrics.com/products/enterprise/) version of `vmagent`, which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page (see `vmutils-*-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||
These command-line flags are available only in [enterprise](https://victoriametrics.com/products/enterprise/) version of `vmagent`,
|
||||
which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page
|
||||
(see `vmutils-...-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||
|
||||
```
|
||||
-kafka.consumer.topic array
|
||||
|
@ -807,9 +991,13 @@ These command-line flags are available only in [enterprise](https://victoriametr
|
|||
|
||||
### Writing metrics to Kafka
|
||||
|
||||
[Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` writes data to Kafka with `at-least-once` semantics if `-remoteWrite.url` contains e.g. Kafka url. For example, if `vmagent` is started with `-remoteWrite.url=kafka://localhost:9092/?topic=prom-rw`, then it would send Prometheus remote_write messages to Kafka bootstrap server at `localhost:9092` with the topic `prom-rw`. These messages can be read later from Kafka by another `vmagent` - see [these docs](#reading-metrics-from-kafka) for details.
|
||||
[Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` writes data to Kafka with `at-least-once`
|
||||
semantics if `-remoteWrite.url` contains e.g. Kafka url. For example, if `vmagent` is started with `-remoteWrite.url=kafka://localhost:9092/?topic=prom-rw`,
|
||||
then it would send Prometheus remote_write messages to Kafka bootstrap server at `localhost:9092` with the topic `prom-rw`.
|
||||
These messages can be read later from Kafka by another `vmagent` - see [these docs](#reading-metrics-from-kafka) for details.
|
||||
|
||||
Additional Kafka options can be passed as query params to `-remoteWrite.url`. For instance, `kafka://localhost:9092/?topic=prom-rw&client.id=my-favorite-id` sets `client.id` Kafka option to `my-favorite-id`. The full list of Kafka options is available [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
|
||||
Additional Kafka options can be passed as query params to `-remoteWrite.url`. For instance, `kafka://localhost:9092/?topic=prom-rw&client.id=my-favorite-id`
|
||||
sets `client.id` Kafka option to `my-favorite-id`. The full list of Kafka options is available [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
|
||||
|
||||
#### Kafka broker authorization and authentication
|
||||
|
||||
|
@ -829,7 +1017,9 @@ Two types of auth are supported:
|
|||
|
||||
## How to build from sources
|
||||
|
||||
We recommend using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in the `vmutils-*` archives .
|
||||
We recommend using [official binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in the `vmutils-...` archives.
|
||||
|
||||
It may be needed to build `vmagent` from source code when developing or testing new feature or bugfix.
|
||||
|
||||
### Development build
|
||||
|
||||
|
@ -899,6 +1089,7 @@ curl http://0.0.0.0:8429/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Advanced usage
|
||||
|
@ -1188,9 +1379,10 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 8388608)
|
||||
-remoteWrite.maxDailySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
-remoteWrite.maxDiskUsagePerURL size
|
||||
-remoteWrite.maxDiskUsagePerURL array
|
||||
The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500MB. Disk usage is unlimited if the value is set to 0
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB.
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.maxHourlySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
-remoteWrite.maxRowsPerBlock int
|
||||
|
|
|
@ -223,7 +223,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
path = strings.TrimSuffix(path, "/")
|
||||
}
|
||||
switch path {
|
||||
case "/api/v1/write":
|
||||
case "/prometheus/api/v1/write", "/api/v1/write":
|
||||
prometheusWriteRequests.Inc()
|
||||
if err := promremotewrite.InsertHandler(nil, r); err != nil {
|
||||
prometheusWriteErrors.Inc()
|
||||
|
@ -232,7 +232,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import":
|
||||
case "/prometheus/api/v1/import", "/api/v1/import":
|
||||
vmimportRequests.Inc()
|
||||
if err := vmimport.InsertHandler(nil, r); err != nil {
|
||||
vmimportErrors.Inc()
|
||||
|
@ -241,7 +241,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import/csv":
|
||||
case "/prometheus/api/v1/import/csv", "/api/v1/import/csv":
|
||||
csvimportRequests.Inc()
|
||||
if err := csvimport.InsertHandler(nil, r); err != nil {
|
||||
csvimportErrors.Inc()
|
||||
|
@ -250,7 +250,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import/prometheus":
|
||||
case "/prometheus/api/v1/import/prometheus", "/api/v1/import/prometheus":
|
||||
prometheusimportRequests.Inc()
|
||||
if err := prometheusimport.InsertHandler(nil, r); err != nil {
|
||||
prometheusimportErrors.Inc()
|
||||
|
@ -259,7 +259,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import/native":
|
||||
case "/prometheus/api/v1/import/native", "/api/v1/import/native":
|
||||
nativeimportRequests.Inc()
|
||||
if err := native.InsertHandler(nil, r); err != nil {
|
||||
nativeimportErrors.Inc()
|
||||
|
@ -268,7 +268,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/write", "/api/v2/write":
|
||||
case "/influx/write", "/influx/api/v2/write", "/write", "/api/v2/write":
|
||||
influxWriteRequests.Inc()
|
||||
if err := influx.InsertHandlerForHTTP(nil, r); err != nil {
|
||||
influxWriteErrors.Inc()
|
||||
|
@ -277,7 +277,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/query":
|
||||
case "/influx/query", "/query":
|
||||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
|
@ -316,15 +316,21 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
case "/targets":
|
||||
case "/prometheus/targets", "/targets":
|
||||
promscrapeTargetsRequests.Inc()
|
||||
promscrape.WriteHumanReadableTargetsStatus(w, r)
|
||||
return true
|
||||
case "/service-discovery":
|
||||
case "/prometheus/service-discovery", "/service-discovery":
|
||||
promscrapeServiceDiscoveryRequests.Inc()
|
||||
promscrape.WriteServiceDiscovery(w, r)
|
||||
return true
|
||||
case "/target_response":
|
||||
case "/prometheus/api/v1/targets", "/api/v1/targets":
|
||||
promscrapeAPIV1TargetsRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
state := r.FormValue("state")
|
||||
promscrape.WriteAPIV1Targets(w, state)
|
||||
return true
|
||||
case "/prometheus/target_response", "/target_response":
|
||||
promscrapeTargetResponseRequests.Inc()
|
||||
if err := promscrape.WriteTargetResponse(w, r); err != nil {
|
||||
promscrapeTargetResponseErrors.Inc()
|
||||
|
@ -332,7 +338,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
return true
|
||||
}
|
||||
return true
|
||||
case "/config":
|
||||
case "/prometheus/config", "/config":
|
||||
if *configAuthKey != "" && r.FormValue("authKey") != *configAuthKey {
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("The provided authKey doesn't match -configAuthKey"),
|
||||
|
@ -345,7 +351,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
promscrape.WriteConfigData(w)
|
||||
return true
|
||||
case "/api/v1/status/config":
|
||||
case "/prometheus/api/v1/status/config", "/api/v1/status/config":
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#config
|
||||
if *configAuthKey != "" && r.FormValue("authKey") != *configAuthKey {
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
|
@ -361,13 +367,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
promscrape.WriteConfigData(&bb)
|
||||
fmt.Fprintf(w, `{"status":"success","data":{"yaml":%q}}`, bb.B)
|
||||
return true
|
||||
case "/api/v1/targets":
|
||||
promscrapeAPIV1TargetsRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
state := r.FormValue("state")
|
||||
promscrape.WriteAPIV1Targets(w, state)
|
||||
return true
|
||||
case "/-/reload":
|
||||
case "/prometheus/-/reload", "/-/reload":
|
||||
promscrapeConfigReloadRequests.Inc()
|
||||
procutil.SelfSIGHUP()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
|
|
@ -24,46 +24,46 @@ var (
|
|||
"By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+
|
||||
"is sent after temporary unavailability of the remote storage")
|
||||
sendTimeout = flagutil.NewArrayDuration("remoteWrite.sendTimeout", "Timeout for sending a single block of data to the corresponding -remoteWrite.url")
|
||||
proxyURL = flagutil.NewArray("remoteWrite.proxyURL", "Optional proxy URL for writing data to the corresponding -remoteWrite.url. "+
|
||||
proxyURL = flagutil.NewArrayString("remoteWrite.proxyURL", "Optional proxy URL for writing data to the corresponding -remoteWrite.url. "+
|
||||
"Supported proxies: http, https, socks5. Example: -remoteWrite.proxyURL=socks5://proxy:1234")
|
||||
|
||||
tlsInsecureSkipVerify = flagutil.NewArrayBool("remoteWrite.tlsInsecureSkipVerify", "Whether to skip tls verification when connecting to the corresponding -remoteWrite.url")
|
||||
tlsCertFile = flagutil.NewArray("remoteWrite.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting "+
|
||||
tlsCertFile = flagutil.NewArrayString("remoteWrite.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting "+
|
||||
"to the corresponding -remoteWrite.url")
|
||||
tlsKeyFile = flagutil.NewArray("remoteWrite.tlsKeyFile", "Optional path to client-side TLS certificate key to use when connecting to the corresponding -remoteWrite.url")
|
||||
tlsCAFile = flagutil.NewArray("remoteWrite.tlsCAFile", "Optional path to TLS CA file to use for verifying connections to the corresponding -remoteWrite.url. "+
|
||||
tlsKeyFile = flagutil.NewArrayString("remoteWrite.tlsKeyFile", "Optional path to client-side TLS certificate key to use when connecting to the corresponding -remoteWrite.url")
|
||||
tlsCAFile = flagutil.NewArrayString("remoteWrite.tlsCAFile", "Optional path to TLS CA file to use for verifying connections to the corresponding -remoteWrite.url. "+
|
||||
"By default system CA is used")
|
||||
tlsServerName = flagutil.NewArray("remoteWrite.tlsServerName", "Optional TLS server name to use for connections to the corresponding -remoteWrite.url. "+
|
||||
tlsServerName = flagutil.NewArrayString("remoteWrite.tlsServerName", "Optional TLS server name to use for connections to the corresponding -remoteWrite.url. "+
|
||||
"By default the server name from -remoteWrite.url is used")
|
||||
|
||||
headers = flagutil.NewArray("remoteWrite.headers", "Optional HTTP headers to send with each request to the corresponding -remoteWrite.url. "+
|
||||
headers = flagutil.NewArrayString("remoteWrite.headers", "Optional HTTP headers to send with each request to the corresponding -remoteWrite.url. "+
|
||||
"For example, -remoteWrite.headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding -remoteWrite.url. "+
|
||||
"Multiple headers must be delimited by '^^': -remoteWrite.headers='header1:value1^^header2:value2'")
|
||||
|
||||
basicAuthUsername = flagutil.NewArray("remoteWrite.basicAuth.username", "Optional basic auth username to use for the corresponding -remoteWrite.url")
|
||||
basicAuthPassword = flagutil.NewArray("remoteWrite.basicAuth.password", "Optional basic auth password to use for the corresponding -remoteWrite.url")
|
||||
basicAuthPasswordFile = flagutil.NewArray("remoteWrite.basicAuth.passwordFile", "Optional path to basic auth password to use for the corresponding -remoteWrite.url. "+
|
||||
basicAuthUsername = flagutil.NewArrayString("remoteWrite.basicAuth.username", "Optional basic auth username to use for the corresponding -remoteWrite.url")
|
||||
basicAuthPassword = flagutil.NewArrayString("remoteWrite.basicAuth.password", "Optional basic auth password to use for the corresponding -remoteWrite.url")
|
||||
basicAuthPasswordFile = flagutil.NewArrayString("remoteWrite.basicAuth.passwordFile", "Optional path to basic auth password to use for the corresponding -remoteWrite.url. "+
|
||||
"The file is re-read every second")
|
||||
bearerToken = flagutil.NewArray("remoteWrite.bearerToken", "Optional bearer auth token to use for the corresponding -remoteWrite.url")
|
||||
bearerTokenFile = flagutil.NewArray("remoteWrite.bearerTokenFile", "Optional path to bearer token file to use for the corresponding -remoteWrite.url. "+
|
||||
bearerToken = flagutil.NewArrayString("remoteWrite.bearerToken", "Optional bearer auth token to use for the corresponding -remoteWrite.url")
|
||||
bearerTokenFile = flagutil.NewArrayString("remoteWrite.bearerTokenFile", "Optional path to bearer token file to use for the corresponding -remoteWrite.url. "+
|
||||
"The token is re-read from the file every second")
|
||||
|
||||
oauth2ClientID = flagutil.NewArray("remoteWrite.oauth2.clientID", "Optional OAuth2 clientID to use for the corresponding -remoteWrite.url")
|
||||
oauth2ClientSecret = flagutil.NewArray("remoteWrite.oauth2.clientSecret", "Optional OAuth2 clientSecret to use for the corresponding -remoteWrite.url")
|
||||
oauth2ClientSecretFile = flagutil.NewArray("remoteWrite.oauth2.clientSecretFile", "Optional OAuth2 clientSecretFile to use for the corresponding -remoteWrite.url")
|
||||
oauth2TokenURL = flagutil.NewArray("remoteWrite.oauth2.tokenUrl", "Optional OAuth2 tokenURL to use for the corresponding -remoteWrite.url")
|
||||
oauth2Scopes = flagutil.NewArray("remoteWrite.oauth2.scopes", "Optional OAuth2 scopes to use for the corresponding -remoteWrite.url. Scopes must be delimited by ';'")
|
||||
oauth2ClientID = flagutil.NewArrayString("remoteWrite.oauth2.clientID", "Optional OAuth2 clientID to use for the corresponding -remoteWrite.url")
|
||||
oauth2ClientSecret = flagutil.NewArrayString("remoteWrite.oauth2.clientSecret", "Optional OAuth2 clientSecret to use for the corresponding -remoteWrite.url")
|
||||
oauth2ClientSecretFile = flagutil.NewArrayString("remoteWrite.oauth2.clientSecretFile", "Optional OAuth2 clientSecretFile to use for the corresponding -remoteWrite.url")
|
||||
oauth2TokenURL = flagutil.NewArrayString("remoteWrite.oauth2.tokenUrl", "Optional OAuth2 tokenURL to use for the corresponding -remoteWrite.url")
|
||||
oauth2Scopes = flagutil.NewArrayString("remoteWrite.oauth2.scopes", "Optional OAuth2 scopes to use for the corresponding -remoteWrite.url. Scopes must be delimited by ';'")
|
||||
|
||||
awsUseSigv4 = flagutil.NewArrayBool("remoteWrite.aws.useSigv4", "Enables SigV4 request signing for the corresponding -remoteWrite.url. "+
|
||||
"It is expected that other -remoteWrite.aws.* command-line flags are set if sigv4 request signing is enabled")
|
||||
awsEC2Endpoint = flagutil.NewArray("remoteWrite.aws.ec2Endpoint", "Optional AWS EC2 API endpoint to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsSTSEndpoint = flagutil.NewArray("remoteWrite.aws.stsEndpoint", "Optional AWS STS API endpoint to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRegion = flagutil.NewArray("remoteWrite.aws.region", "Optional AWS region to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRoleARN = flagutil.NewArray("remoteWrite.aws.roleARN", "Optional AWS roleARN to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsAccessKey = flagutil.NewArray("remoteWrite.aws.accessKey", "Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsService = flagutil.NewArray("remoteWrite.aws.service", "Optional AWS Service to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set. "+
|
||||
awsEC2Endpoint = flagutil.NewArrayString("remoteWrite.aws.ec2Endpoint", "Optional AWS EC2 API endpoint to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsSTSEndpoint = flagutil.NewArrayString("remoteWrite.aws.stsEndpoint", "Optional AWS STS API endpoint to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRegion = flagutil.NewArrayString("remoteWrite.aws.region", "Optional AWS region to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRoleARN = flagutil.NewArrayString("remoteWrite.aws.roleARN", "Optional AWS roleARN to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsAccessKey = flagutil.NewArrayString("remoteWrite.aws.accessKey", "Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsService = flagutil.NewArrayString("remoteWrite.aws.service", "Optional AWS Service to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set. "+
|
||||
"Defaults to \"aps\"")
|
||||
awsSecretKey = flagutil.NewArray("remoteWrite.aws.secretKey", "Optional AWS SecretKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsSecretKey = flagutil.NewArrayString("remoteWrite.aws.secretKey", "Optional AWS SecretKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
)
|
||||
|
||||
type client struct {
|
||||
|
|
|
@ -13,14 +13,14 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
unparsedLabelsGlobal = flagutil.NewArray("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
|
||||
unparsedLabelsGlobal = flagutil.NewArrayString("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
|
||||
"Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage")
|
||||
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabel_config entries. "+
|
||||
"The path can point either to local file or to http url. These entries are applied to all the metrics "+
|
||||
"before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details")
|
||||
relabelDebugGlobal = flag.Bool("remoteWrite.relabelDebug", false, "Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. "+
|
||||
"If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs")
|
||||
relabelConfigPaths = flagutil.NewArray("remoteWrite.urlRelabelConfig", "Optional path to relabel config for the corresponding -remoteWrite.url. "+
|
||||
relabelConfigPaths = flagutil.NewArrayString("remoteWrite.urlRelabelConfig", "Optional path to relabel config for the corresponding -remoteWrite.url. "+
|
||||
"The path can point either to local file or to http url")
|
||||
relabelDebug = flagutil.NewArrayBool("remoteWrite.urlRelabelDebug", "Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. "+
|
||||
"If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. "+
|
||||
|
|
|
@ -26,10 +26,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
remoteWriteURLs = flagutil.NewArray("remoteWrite.url", "Remote storage URL to write data to. It must support Prometheus remote_write API. "+
|
||||
remoteWriteURLs = flagutil.NewArrayString("remoteWrite.url", "Remote storage URL to write data to. It must support Prometheus remote_write API. "+
|
||||
"It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . "+
|
||||
"Pass multiple -remoteWrite.url flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.multitenantURL")
|
||||
remoteWriteMultitenantURLs = flagutil.NewArray("remoteWrite.multitenantURL", "Base path for multitenant remote storage URL to write data to. "+
|
||||
remoteWriteMultitenantURLs = flagutil.NewArrayString("remoteWrite.multitenantURL", "Base path for multitenant remote storage URL to write data to. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#multitenancy for details. Example url: http://<vminsert>:8480 . "+
|
||||
"Pass multiple -remoteWrite.multitenantURL flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.url")
|
||||
tmpDataPath = flag.String("remoteWrite.tmpDataPath", "vmagent-remotewrite-data", "Path to directory where temporary data for remote write component is stored. "+
|
||||
|
@ -38,7 +38,7 @@ var (
|
|||
"isn't enough for sending high volume of collected data to remote storage. Default value is 2 * numberOfAvailableCPUs")
|
||||
showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
maxPendingBytesPerURL = flagutil.NewBytes("remoteWrite.maxDiskUsagePerURL", 0, "The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath "+
|
||||
maxPendingBytesPerURL = flagutil.NewArrayBytes("remoteWrite.maxDiskUsagePerURL", "The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath "+
|
||||
"for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. "+
|
||||
"Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500MB. "+
|
||||
"Disk usage is unlimited if the value is set to 0")
|
||||
|
@ -436,7 +436,8 @@ func newRemoteWriteCtx(argIdx int, at *auth.Token, remoteWriteURL *url.URL, maxI
|
|||
pqURL.Fragment = ""
|
||||
h := xxhash.Sum64([]byte(pqURL.String()))
|
||||
queuePath := fmt.Sprintf("%s/persistent-queue/%d_%016X", *tmpDataPath, argIdx+1, h)
|
||||
fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytesPerURL.N)
|
||||
maxPendingBytes := maxPendingBytesPerURL.GetOptionalArgOrDefault(argIdx, 0)
|
||||
fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytes)
|
||||
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{path=%q, url=%q}`, queuePath, sanitizedURL), func() float64 {
|
||||
return float64(fq.GetPendingBytes())
|
||||
})
|
||||
|
|
|
@ -331,6 +331,10 @@ groups:
|
|||
# Rules for accountID=456, projectID=789
|
||||
```
|
||||
|
||||
The results of alerting and recording rules contain `vm_account_id` and `vm_project_id` labels
|
||||
if `-clusterMode` is enabled. These labels can be used during [templating](https://docs.victoriametrics.com/vmalert.html#templating),
|
||||
and help to identify to which account or project the triggered alert or produced recording belongs.
|
||||
|
||||
If `-clusterMode` is enabled, then `-datasource.url`, `-remoteRead.url` and `-remoteWrite.url` must
|
||||
contain only the hostname without tenant id. For example: `-datasource.url=http://vmselect:8481`.
|
||||
`vmalert` automatically adds the specified tenant to urls per each recording rule in this case.
|
||||
|
@ -810,8 +814,7 @@ The shortlist of configuration flags is the following:
|
|||
-evaluationInterval duration
|
||||
How often to evaluate the rules (default 1m0s)
|
||||
-external.alert.source string
|
||||
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
||||
Supports templating. For example, link to Grafana: 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.
|
||||
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . For example, link to Grafana: -external.alert.source='explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]' . If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used
|
||||
If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.
|
||||
-external.label array
|
||||
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
rulePath = flagutil.NewArray("rule", `Path to the file with alert rules.
|
||||
rulePath = flagutil.NewArrayString("rule", `Path to the file with alert rules.
|
||||
Supports patterns. Flag can be specified multiple times.
|
||||
Examples:
|
||||
-rule="/path/to/file". Path to a single file with alerting rules
|
||||
|
@ -36,7 +36,7 @@ Examples:
|
|||
absolute path to all .yaml files in root.
|
||||
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.`)
|
||||
|
||||
ruleTemplatesPath = flagutil.NewArray("rule.templates", `Path or glob pattern to location with go template definitions
|
||||
ruleTemplatesPath = flagutil.NewArrayString("rule.templates", `Path or glob pattern to location with go template definitions
|
||||
for rules annotations templating. Flag can be specified multiple times.
|
||||
Examples:
|
||||
-rule.templates="/path/to/file". Path to a single file with go templates
|
||||
|
@ -59,10 +59,12 @@ absolute path to all .tpl files in root.`)
|
|||
resendDelay = flag.Duration("rule.resendDelay", 0, "Minimum amount of time to wait before resending an alert to notifier")
|
||||
|
||||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
|
||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
||||
Supports templating. For example, link to Grafana: 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.
|
||||
If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`)
|
||||
externalLabels = flagutil.NewArray("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
|
||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
|
||||
`for cases where you want to build a custom link to Grafana, Prometheus or any other service. `+
|
||||
`Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . `+
|
||||
`For example, link to Grafana: -external.alert.source='explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]' . `+
|
||||
`If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used`)
|
||||
externalLabels = flagutil.NewArrayString("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
|
||||
"Pass multiple -label flags in order to add multiple label sets.")
|
||||
|
||||
remoteReadLookBack = flag.Duration("remoteRead.lookback", time.Hour, "Lookback defines how far to look into past for alerts timeseries."+
|
||||
|
@ -264,7 +266,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
|
|||
"tpl": externalAlertSource,
|
||||
}
|
||||
return func(alert notifier.Alert) string {
|
||||
templated, err := alert.ExecTemplate(nil, nil, m)
|
||||
templated, err := alert.ExecTemplate(nil, alert.Labels, m)
|
||||
if err != nil {
|
||||
logger.Errorf("can not exec source template %s", err)
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestGetExternalURL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAlertURLGenerator(t *testing.T) {
|
||||
testAlert := notifier.Alert{GroupID: 42, ID: 2, Value: 4}
|
||||
testAlert := notifier.Alert{GroupID: 42, ID: 2, Value: 4, Labels: map[string]string{"tenant": "baz"}}
|
||||
u, _ := url.Parse("https://victoriametrics.com/path")
|
||||
fn, err := getAlertURLGenerator(u, "", false)
|
||||
if err != nil {
|
||||
|
@ -48,11 +48,11 @@ func TestGetAlertURLGenerator(t *testing.T) {
|
|||
if err == nil {
|
||||
t.Errorf("expected template validation error got nil")
|
||||
}
|
||||
fn, err = getAlertURLGenerator(u, "foo?query={{$value}}", true)
|
||||
fn, err = getAlertURLGenerator(u, "foo?query={{$value}}&ds={{ $labels.tenant }}", true)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error %s", err)
|
||||
}
|
||||
if exp := "https://victoriametrics.com/path/foo?query=4"; exp != fn(testAlert) {
|
||||
if exp := "https://victoriametrics.com/path/foo?query=4&ds=baz"; exp != fn(testAlert) {
|
||||
t.Errorf("unexpected url want %s, got %s", exp, fn(testAlert))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,32 +17,32 @@ var (
|
|||
configPath = flag.String("notifier.config", "", "Path to configuration file for notifiers")
|
||||
suppressDuplicateTargetErrors = flag.Bool("notifier.suppressDuplicateTargetErrors", false, "Whether to suppress 'duplicate target' errors during discovery")
|
||||
|
||||
addrs = flagutil.NewArray("notifier.url", "Prometheus alertmanager URL, e.g. http://127.0.0.1:9093")
|
||||
addrs = flagutil.NewArrayString("notifier.url", "Prometheus alertmanager URL, e.g. http://127.0.0.1:9093")
|
||||
|
||||
basicAuthUsername = flagutil.NewArray("notifier.basicAuth.username", "Optional basic auth username for -notifier.url")
|
||||
basicAuthPassword = flagutil.NewArray("notifier.basicAuth.password", "Optional basic auth password for -notifier.url")
|
||||
basicAuthPasswordFile = flagutil.NewArray("notifier.basicAuth.passwordFile", "Optional path to basic auth password file for -notifier.url")
|
||||
basicAuthUsername = flagutil.NewArrayString("notifier.basicAuth.username", "Optional basic auth username for -notifier.url")
|
||||
basicAuthPassword = flagutil.NewArrayString("notifier.basicAuth.password", "Optional basic auth password for -notifier.url")
|
||||
basicAuthPasswordFile = flagutil.NewArrayString("notifier.basicAuth.passwordFile", "Optional path to basic auth password file for -notifier.url")
|
||||
|
||||
bearerToken = flagutil.NewArray("notifier.bearerToken", "Optional bearer token for -notifier.url")
|
||||
bearerTokenFile = flagutil.NewArray("notifier.bearerTokenFile", "Optional path to bearer token file for -notifier.url")
|
||||
bearerToken = flagutil.NewArrayString("notifier.bearerToken", "Optional bearer token for -notifier.url")
|
||||
bearerTokenFile = flagutil.NewArrayString("notifier.bearerTokenFile", "Optional path to bearer token file for -notifier.url")
|
||||
|
||||
tlsInsecureSkipVerify = flagutil.NewArrayBool("notifier.tlsInsecureSkipVerify", "Whether to skip tls verification when connecting to -notifier.url")
|
||||
tlsCertFile = flagutil.NewArray("notifier.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting to -notifier.url")
|
||||
tlsKeyFile = flagutil.NewArray("notifier.tlsKeyFile", "Optional path to client-side TLS certificate key to use when connecting to -notifier.url")
|
||||
tlsCAFile = flagutil.NewArray("notifier.tlsCAFile", "Optional path to TLS CA file to use for verifying connections to -notifier.url. "+
|
||||
tlsCertFile = flagutil.NewArrayString("notifier.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting to -notifier.url")
|
||||
tlsKeyFile = flagutil.NewArrayString("notifier.tlsKeyFile", "Optional path to client-side TLS certificate key to use when connecting to -notifier.url")
|
||||
tlsCAFile = flagutil.NewArrayString("notifier.tlsCAFile", "Optional path to TLS CA file to use for verifying connections to -notifier.url. "+
|
||||
"By default system CA is used")
|
||||
tlsServerName = flagutil.NewArray("notifier.tlsServerName", "Optional TLS server name to use for connections to -notifier.url. "+
|
||||
tlsServerName = flagutil.NewArrayString("notifier.tlsServerName", "Optional TLS server name to use for connections to -notifier.url. "+
|
||||
"By default the server name from -notifier.url is used")
|
||||
|
||||
oauth2ClientID = flagutil.NewArray("notifier.oauth2.clientID", "Optional OAuth2 clientID to use for -notifier.url. "+
|
||||
oauth2ClientID = flagutil.NewArrayString("notifier.oauth2.clientID", "Optional OAuth2 clientID to use for -notifier.url. "+
|
||||
"If multiple args are set, then they are applied independently for the corresponding -notifier.url")
|
||||
oauth2ClientSecret = flagutil.NewArray("notifier.oauth2.clientSecret", "Optional OAuth2 clientSecret to use for -notifier.url. "+
|
||||
oauth2ClientSecret = flagutil.NewArrayString("notifier.oauth2.clientSecret", "Optional OAuth2 clientSecret to use for -notifier.url. "+
|
||||
"If multiple args are set, then they are applied independently for the corresponding -notifier.url")
|
||||
oauth2ClientSecretFile = flagutil.NewArray("notifier.oauth2.clientSecretFile", "Optional OAuth2 clientSecretFile to use for -notifier.url. "+
|
||||
oauth2ClientSecretFile = flagutil.NewArrayString("notifier.oauth2.clientSecretFile", "Optional OAuth2 clientSecretFile to use for -notifier.url. "+
|
||||
"If multiple args are set, then they are applied independently for the corresponding -notifier.url")
|
||||
oauth2TokenURL = flagutil.NewArray("notifier.oauth2.tokenUrl", "Optional OAuth2 tokenURL to use for -notifier.url. "+
|
||||
oauth2TokenURL = flagutil.NewArrayString("notifier.oauth2.tokenUrl", "Optional OAuth2 tokenURL to use for -notifier.url. "+
|
||||
"If multiple args are set, then they are applied independently for the corresponding -notifier.url")
|
||||
oauth2Scopes = flagutil.NewArray("notifier.oauth2.scopes", "Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'. "+
|
||||
oauth2Scopes = flagutil.NewArrayString("notifier.oauth2.scopes", "Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'. "+
|
||||
"If multiple args are set, then they are applied independently for the corresponding -notifier.url")
|
||||
)
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
if strings.HasPrefix(r.URL.Path, "/api/v1/") {
|
||||
redirectURL = alert.APILink()
|
||||
}
|
||||
httpserver.RedirectPermanent(w, "/"+redirectURL)
|
||||
httpserver.Redirect(w, "/"+redirectURL)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,10 +39,19 @@ func createTargetURL(ui *UserInfo, uOrig *url.URL) (*url.URL, []Header, error) {
|
|||
u := *uOrig
|
||||
// Prevent from attacks with using `..` in r.URL.Path
|
||||
u.Path = path.Clean(u.Path)
|
||||
if !strings.HasSuffix(u.Path, "/") && strings.HasSuffix(uOrig.Path, "/") {
|
||||
// The path.Clean() removes traling slash.
|
||||
// Return it back if needed.
|
||||
// This should fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752
|
||||
u.Path += "/"
|
||||
}
|
||||
if !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
u.Path = strings.TrimSuffix(u.Path, "/")
|
||||
if u.Path == "/" {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1554
|
||||
u.Path = ""
|
||||
}
|
||||
for _, e := range ui.URLMap {
|
||||
for _, sp := range e.SrcPaths {
|
||||
if sp.match(u.Path) {
|
||||
|
|
|
@ -6,6 +6,7 @@ Supported storage systems for backups:
|
|||
|
||||
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
||||
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
|
||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
||||
|
||||
|
@ -179,7 +180,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
-customS3Endpoint string
|
||||
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
|
||||
-dst string
|
||||
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
|
||||
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup/dir
|
||||
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
|
||||
-enableTCP6
|
||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||
|
|
|
@ -30,7 +30,7 @@ var (
|
|||
snapshotDeleteURL = flag.String("snapshot.deleteURL", "", "VictoriaMetrics delete snapshot url. Optional. Will be generated from -snapshot.createURL if not provided. "+
|
||||
"All created snapshots will be automatically deleted. Example: http://victoriametrics:8428/snapshot/delete")
|
||||
dst = flag.String("dst", "", "Where to put the backup on the remote storage. "+
|
||||
"Example: gs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir\n"+
|
||||
"Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup/dir\n"+
|
||||
"-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded")
|
||||
origin = flag.String("origin", "", "Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups")
|
||||
concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce backup duration")
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# vmrestore
|
||||
|
||||
`vmrestore` restores data from backups created by [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||
VictoriaMetrics `v1.29.0` and newer versions must be used for working with the restored data.
|
||||
|
||||
Restore process can be interrupted at any time. It is automatically resumed from the interruption point
|
||||
when restarting `vmrestore` with the same args.
|
||||
|
@ -10,19 +9,28 @@ when restarting `vmrestore` with the same args.
|
|||
|
||||
VictoriaMetrics must be stopped during the restore process.
|
||||
|
||||
```console
|
||||
vmrestore -src=gs://<bucket>/<path/to/backup> -storageDataPath=<local/path/to/restore>
|
||||
Run the following command to restore backup from the given `-src` into the given `-storageDataPath`:
|
||||
|
||||
```console
|
||||
vmrestore -src=<storageType>://<path/to/backup> -storageDataPath=<local/path/to/restore>
|
||||
```
|
||||
|
||||
* `<bucket>` is [GCS bucket](https://cloud.google.com/storage/docs/creating-buckets) name.
|
||||
* `<path/to/backup>` is the path to backup made with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) on GCS bucket.
|
||||
* `<storageType>://<path/to/backup>` is the path to backup made with [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||
`vmrestore` can restore backups from the following storage types:
|
||||
* [GCS](https://cloud.google.com/storage/). Example: `-src=gs://<bucket>/<path/to/backup>`
|
||||
* [S3](https://aws.amazon.com/s3/). Example: `-src=s3://<bucket>/<path/to/backup>`
|
||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `-src=azblob://<bucket>/<path/to/backup>`
|
||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/)
|
||||
or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||
* Local filesystem. Example: `-src=fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup
|
||||
into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
||||
* `<local/path/to/restore>` is the path to folder where data will be restored. This folder must be passed
|
||||
to VictoriaMetrics in `-storageDataPath` command-line flag after the restore process is complete.
|
||||
|
||||
The original `-storageDataPath` directory may contain old files. They will be substituted by the files from backup,
|
||||
i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/questions/476041/how-do-i-make-rsync-delete-files-that-have-been-deleted-from-the-source-folder).
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
* If `vmrestore` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value.
|
||||
|
@ -154,7 +162,7 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
|||
-skipBackupCompleteCheck
|
||||
Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file
|
||||
-src string
|
||||
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
|
||||
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup
|
||||
-storageDataPath string
|
||||
Destination path where backup must be restored. VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir is synchronized with -src contents, i.e. it works like 'rsync --delete' (default "victoria-metrics-data")
|
||||
-tls
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8421", "TCP address for exporting metrics at /metrics page")
|
||||
src = flag.String("src", "", "Source path with backup on the remote storage. "+
|
||||
"Example: gs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir")
|
||||
"Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup")
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-metrics-data", "Destination path where backup must be restored. "+
|
||||
"VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir "+
|
||||
"is synchronized with -src contents, i.e. it works like 'rsync --delete'")
|
||||
|
|
|
@ -54,6 +54,9 @@ func (bw *Writer) reset() {
|
|||
|
||||
// Write writes p to bw.
|
||||
func (bw *Writer) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
bw.lock.Lock()
|
||||
defer bw.lock.Unlock()
|
||||
if bw.err != nil {
|
||||
|
|
|
@ -168,7 +168,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
_ = r.ParseForm()
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
newURL := path + "/?" + r.Form.Encode()
|
||||
httpserver.RedirectPermanent(w, newURL)
|
||||
httpserver.Redirect(w, newURL)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmui/") {
|
||||
|
@ -217,7 +217,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
// vmalert access via incomplete url without `/` in the end. Redirecto to complete url.
|
||||
// Use relative redirect, since, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
httpserver.RedirectPermanent(w, "vmalert/")
|
||||
httpserver.Redirect(w, "vmalert/")
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmalert/") {
|
||||
|
|
|
@ -925,7 +925,8 @@ var ssPool sync.Pool
|
|||
// Data processing is immediately stopped if f returns non-nil error.
|
||||
// It is the responsibility of f to call b.UnmarshalData before reading timestamps and values from the block.
|
||||
// It is the responsibility of f to filter blocks according to the given tr.
|
||||
func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline, f func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error) error {
|
||||
func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline,
|
||||
f func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange, workerID uint) error) error {
|
||||
qt = qt.NewChild("export blocks: %s", sq)
|
||||
defer qt.Done()
|
||||
if deadline.Exceeded() {
|
||||
|
@ -960,10 +961,10 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
|||
var wg sync.WaitGroup
|
||||
wg.Add(gomaxprocs)
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
go func() {
|
||||
go func(workerID uint) {
|
||||
defer wg.Done()
|
||||
for xw := range workCh {
|
||||
if err := f(&xw.mn, &xw.b, tr); err != nil {
|
||||
if err := f(&xw.mn, &xw.b, tr, workerID); err != nil {
|
||||
errGlobalLock.Lock()
|
||||
if errGlobal != nil {
|
||||
errGlobal = err
|
||||
|
@ -974,7 +975,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
|||
xw.reset()
|
||||
exportWorkPool.Put(xw)
|
||||
}
|
||||
}()
|
||||
}(uint(i))
|
||||
}
|
||||
|
||||
// Feed workers with work
|
||||
|
|
|
@ -99,10 +99,10 @@
|
|||
"values":[
|
||||
{% if len(xb.values) > 0 %}
|
||||
{% code values := xb.values %}
|
||||
{%f= values[0] %}
|
||||
{%= convertValueToSpecialJSON(values[0]) %}
|
||||
{% code values = values[1:] %}
|
||||
{% for _, v := range values %}
|
||||
,{% if math.IsNaN(v) %}null{% else %}{%f= v %}{% endif %}
|
||||
,{%= convertValueToSpecialJSON(v) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
],
|
||||
|
@ -126,49 +126,24 @@
|
|||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) %}
|
||||
{% func ExportPromAPIHeader() %}
|
||||
{
|
||||
{% code
|
||||
lines := 0
|
||||
bytesTotal := 0
|
||||
%}
|
||||
"status":"success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[
|
||||
{% code bb, ok := <-resultsCh %}
|
||||
{% if ok %}
|
||||
{%z= bb.B %}
|
||||
{% code
|
||||
lines++
|
||||
bytesTotal += len(bb.B)
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
%}
|
||||
{% for bb := range resultsCh %}
|
||||
,{%z= bb.B %}
|
||||
{% code
|
||||
lines++
|
||||
bytesTotal += len(bb.B)
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
%}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportPromAPIFooter(qt *querytracer.Tracer) %}
|
||||
]
|
||||
}
|
||||
{% code
|
||||
qt.Donef("export format=promapi: lines=%d, bytes=%d", lines, bytesTotal)
|
||||
qt.Donef("export format=promapi")
|
||||
%}
|
||||
{%= dumpQueryTrace(qt) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) %}
|
||||
{% for bb := range resultsCh %}
|
||||
{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfor %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func prometheusMetricName(mn *storage.MetricName) %}
|
||||
{%z= mn.MetricGroup %}
|
||||
{% if len(mn.Tags) > 0 %}
|
||||
|
@ -183,4 +158,19 @@
|
|||
}
|
||||
{% endif %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func convertValueToSpecialJSON(v float64) %}
|
||||
{% if math.IsNaN(v) %}
|
||||
null
|
||||
{% elseif math.IsInf(v, 0) %}
|
||||
{% if v > 0 %}
|
||||
"Infinity"
|
||||
{% else %}
|
||||
"-Infinity"
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{%f= v %}
|
||||
{% endif %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
|
|
|
@ -295,7 +295,7 @@ func StreamExportJSONLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
|||
values := xb.values
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:102
|
||||
qw422016.N().F(values[0])
|
||||
streamconvertValueToSpecialJSON(qw422016, values[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:103
|
||||
values = values[1:]
|
||||
|
||||
|
@ -304,15 +304,7 @@ func StreamExportJSONLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
|||
//line app/vmselect/prometheus/export.qtpl:104
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
if math.IsNaN(v) {
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
qw422016.N().S(`null`)
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
qw422016.N().F(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
}
|
||||
streamconvertValueToSpecialJSON(qw422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:106
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:107
|
||||
|
@ -415,184 +407,195 @@ func ExportPromAPILine(xb *exportBlock) string {
|
|||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:129
|
||||
func StreamExportPromAPIResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
|
||||
func StreamExportPromAPIHeader(qw422016 *qt422016.Writer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:129
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/export.qtpl:132
|
||||
lines := 0
|
||||
bytesTotal := 0
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:134
|
||||
qw422016.N().S(`"status":"success","data":{"resultType":"matrix","result":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:139
|
||||
bb, ok := <-resultsCh
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
func WriteExportPromAPIHeader(qq422016 qtio422016.Writer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
StreamExportPromAPIHeader(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:140
|
||||
if ok {
|
||||
//line app/vmselect/prometheus/export.qtpl:141
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:143
|
||||
lines++
|
||||
bytesTotal += len(bb.B)
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
func ExportPromAPIHeader() string {
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
WriteExportPromAPIHeader(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:147
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/export.qtpl:147
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:148
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:150
|
||||
lines++
|
||||
bytesTotal += len(bb.B)
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:154
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:155
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:155
|
||||
//line app/vmselect/prometheus/export.qtpl:137
|
||||
func StreamExportPromAPIFooter(qw422016 *qt422016.Writer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:137
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:159
|
||||
qt.Donef("export format=promapi: lines=%d, bytes=%d", lines, bytesTotal)
|
||||
//line app/vmselect/prometheus/export.qtpl:141
|
||||
qt.Donef("export format=promapi")
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
//line app/vmselect/prometheus/export.qtpl:143
|
||||
streamdumpQueryTrace(qw422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
//line app/vmselect/prometheus/export.qtpl:143
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
func WriteExportPromAPIResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
func WriteExportPromAPIFooter(qq422016 qtio422016.Writer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
StreamExportPromAPIResponse(qw422016, resultsCh, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
StreamExportPromAPIFooter(qw422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
func ExportPromAPIFooter(qt *querytracer.Tracer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
WriteExportPromAPIResponse(qb422016, resultsCh, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
WriteExportPromAPIFooter(qb422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:165
|
||||
func StreamExportStdResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/export.qtpl:167
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:168
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:169
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
func WriteExportStdResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
StreamExportStdResponse(qw422016, resultsCh, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
WriteExportStdResponse(qb422016, resultsCh, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:172
|
||||
//line app/vmselect/prometheus/export.qtpl:147
|
||||
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:173
|
||||
//line app/vmselect/prometheus/export.qtpl:148
|
||||
qw422016.N().Z(mn.MetricGroup)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:149
|
||||
if len(mn.Tags) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:149
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/export.qtpl:176
|
||||
//line app/vmselect/prometheus/export.qtpl:151
|
||||
tags := mn.Tags
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:177
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
qw422016.N().Z(tags[0].Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:177
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:177
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
qw422016.N().QZ(tags[0].Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:178
|
||||
//line app/vmselect/prometheus/export.qtpl:153
|
||||
tags = tags[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:179
|
||||
//line app/vmselect/prometheus/export.qtpl:154
|
||||
for i := range tags {
|
||||
//line app/vmselect/prometheus/export.qtpl:180
|
||||
//line app/vmselect/prometheus/export.qtpl:155
|
||||
tag := &tags[i]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:180
|
||||
//line app/vmselect/prometheus/export.qtpl:155
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:181
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
qw422016.N().Z(tag.Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:181
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:181
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
qw422016.N().QZ(tag.Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:182
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:182
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:184
|
||||
//line app/vmselect/prometheus/export.qtpl:159
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
func writeprometheusMetricName(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
streamprometheusMetricName(qw422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
func prometheusMetricName(mn *storage.MetricName) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
writeprometheusMetricName(qb422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:162
|
||||
func streamconvertValueToSpecialJSON(qw422016 *qt422016.Writer, v float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
if math.IsNaN(v) {
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
qw422016.N().S(`null`)
|
||||
//line app/vmselect/prometheus/export.qtpl:165
|
||||
} else if math.IsInf(v, 0) {
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
if v > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
qw422016.N().S(`"Infinity"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:168
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:168
|
||||
qw422016.N().S(`"-Infinity"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:171
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:172
|
||||
qw422016.N().F(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:173
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
func writeconvertValueToSpecialJSON(qq422016 qtio422016.Writer, v float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
streamconvertValueToSpecialJSON(qw422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
func convertValueToSpecialJSON(v float64) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
writeconvertValueToSpecialJSON(qb422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
}
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
{% import (
|
||||
"math"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
|
@ -7,10 +9,25 @@
|
|||
// Federate writes rs in /federate format.
|
||||
// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
{% func Federate(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 || len(rs.Values) == 0 %}{% return %}{% endif %}
|
||||
{% code
|
||||
values := rs.Values
|
||||
timestamps := rs.Timestamps
|
||||
%}
|
||||
{% if len(timestamps) == 0 || len(values) == 0 %}{% return %}{% endif %}
|
||||
{% code
|
||||
lastValue := values[len(values)-1]
|
||||
%}
|
||||
{% if math.IsNaN(lastValue) %}
|
||||
{% comment %}
|
||||
This is most likely a staleness marker.
|
||||
Return nothing after the staleness marker.
|
||||
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3185
|
||||
{% endcomment %}
|
||||
{% return %}
|
||||
{% endif %}
|
||||
{%= prometheusMetricName(&rs.MetricName) %}{% space %}
|
||||
{%f= rs.Values[len(rs.Values)-1] %}{% space %}
|
||||
{%dl= rs.Timestamps[len(rs.Timestamps)-1] %}{% newline %}
|
||||
{%f= lastValue %}{% space %}
|
||||
{%dl= timestamps[len(timestamps)-1] %}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
|
|
|
@ -6,70 +6,85 @@ package prometheus
|
|||
|
||||
//line app/vmselect/prometheus/federate.qtpl:1
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// Federate writes rs in /federate format.// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
func StreamFederate(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
if len(rs.Timestamps) == 0 || len(rs.Values) == 0 {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
values := rs.Values
|
||||
timestamps := rs.Timestamps
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
if len(timestamps) == 0 || len(values) == 0 {
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
return
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
}
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
//line app/vmselect/prometheus/federate.qtpl:18
|
||||
lastValue := values[len(values)-1]
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:20
|
||||
if math.IsNaN(lastValue) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:26
|
||||
return
|
||||
//line app/vmselect/prometheus/federate.qtpl:27
|
||||
}
|
||||
//line app/vmselect/prometheus/federate.qtpl:28
|
||||
streamprometheusMetricName(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
//line app/vmselect/prometheus/federate.qtpl:28
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
qw422016.N().F(rs.Values[len(rs.Values)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
//line app/vmselect/prometheus/federate.qtpl:29
|
||||
qw422016.N().F(lastValue)
|
||||
//line app/vmselect/prometheus/federate.qtpl:29
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().DL(rs.Timestamps[len(rs.Timestamps)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
//line app/vmselect/prometheus/federate.qtpl:30
|
||||
qw422016.N().DL(timestamps[len(timestamps)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:30
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
func WriteFederate(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
StreamFederate(qw422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
func Federate(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
WriteFederate(qb422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
|
|
@ -5,9 +5,11 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
||||
|
@ -16,7 +18,6 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
"github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -84,23 +84,19 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
|||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
sw := newScalableWriter(bw)
|
||||
err = rss.RunParallel(nil, func(rs *netstorage.Result, workerID uint) error {
|
||||
if err := bw.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bb := sw.getBuffer(workerID)
|
||||
WriteFederate(bb, rs)
|
||||
_, err := bw.Write(bb.B)
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
return err
|
||||
return sw.maybeFlushBuffer(bb)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during sending data to remote client: %w", err)
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return sw.flush()
|
||||
}
|
||||
|
||||
var federateDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/federate"}`)
|
||||
|
@ -125,15 +121,14 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
|
||||
writeCSVLine := func(xb *exportBlock) {
|
||||
sw := newScalableWriter(bw)
|
||||
writeCSVLine := func(xb *exportBlock, workerID uint) error {
|
||||
if len(xb.timestamps) == 0 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bb := sw.getBuffer(workerID)
|
||||
WriteExportCSVLine(bb, xb, fieldNames)
|
||||
resultsCh <- bb
|
||||
return sw.maybeFlushBuffer(bb)
|
||||
}
|
||||
doneCh := make(chan error, 1)
|
||||
if !reduceMemUsage {
|
||||
|
@ -150,17 +145,18 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
xb.mn = &rs.MetricName
|
||||
xb.timestamps = rs.Timestamps
|
||||
xb.values = rs.Values
|
||||
writeCSVLine(xb)
|
||||
if err := writeCSVLine(xb, workerID); err != nil {
|
||||
return err
|
||||
}
|
||||
xb.reset()
|
||||
exportBlockPool.Put(xb)
|
||||
return nil
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
} else {
|
||||
go func() {
|
||||
err := netstorage.ExportBlocks(nil, sq, cp.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
||||
err := netstorage.ExportBlocks(nil, sq, cp.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange, workerID uint) error {
|
||||
if err := bw.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -170,29 +166,21 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
xb := exportBlockPool.Get().(*exportBlock)
|
||||
xb.mn = mn
|
||||
xb.timestamps, xb.values = b.AppendRowsWithTimeRangeFilter(xb.timestamps[:0], xb.values[:0], tr)
|
||||
writeCSVLine(xb)
|
||||
if err := writeCSVLine(xb, workerID); err != nil {
|
||||
return err
|
||||
}
|
||||
xb.reset()
|
||||
exportBlockPool.Put(xb)
|
||||
return nil
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
}
|
||||
// Consume all the data from resultsCh.
|
||||
for bb := range resultsCh {
|
||||
// Do not check for error in bw.Write, since this error is checked inside netstorage.ExportBlocks above.
|
||||
_, _ = bw.Write(bb.B)
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during sending the exported csv data to remote client: %w", err)
|
||||
}
|
||||
return nil
|
||||
return sw.flush()
|
||||
}
|
||||
|
||||
var exportCSVDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export/csv"}`)
|
||||
|
@ -210,6 +198,7 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
|
|||
w.Header().Set("Content-Type", "VictoriaMetrics/native")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
sw := newScalableWriter(bw)
|
||||
|
||||
// Marshal tr
|
||||
trBuf := make([]byte, 0, 16)
|
||||
|
@ -218,13 +207,13 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
|
|||
_, _ = bw.Write(trBuf)
|
||||
|
||||
// Marshal native blocks.
|
||||
err = netstorage.ExportBlocks(nil, sq, cp.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
||||
err = netstorage.ExportBlocks(nil, sq, cp.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange, workerID uint) error {
|
||||
if err := bw.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
dstBuf := bbPool.Get()
|
||||
bb := sw.getBuffer(workerID)
|
||||
dst := bb.B
|
||||
tmpBuf := bbPool.Get()
|
||||
dst := dstBuf.B
|
||||
tmp := tmpBuf.B
|
||||
|
||||
// Marshal mn
|
||||
|
@ -240,19 +229,13 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
|
|||
tmpBuf.B = tmp
|
||||
bbPool.Put(tmpBuf)
|
||||
|
||||
_, err := bw.Write(dst)
|
||||
|
||||
dstBuf.B = dst
|
||||
bbPool.Put(dstBuf)
|
||||
return err
|
||||
bb.B = dst
|
||||
return sw.maybeFlushBuffer(bb)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during sending native data to remote client: %w", err)
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
return fmt.Errorf("error during flushing native data to remote client: %w", err)
|
||||
}
|
||||
return nil
|
||||
return sw.flush()
|
||||
}
|
||||
|
||||
var exportNativeDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export/native"}`)
|
||||
|
@ -279,31 +262,48 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||
|
||||
func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, cp *commonParams, format string, maxRowsPerLine int, reduceMemUsage bool) error {
|
||||
writeResponseFunc := WriteExportStdResponse
|
||||
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
sw := newScalableWriter(bw)
|
||||
writeLineFunc := func(xb *exportBlock, workerID uint) error {
|
||||
bb := sw.getBuffer(workerID)
|
||||
WriteExportJSONLine(bb, xb)
|
||||
resultsCh <- bb
|
||||
return sw.maybeFlushBuffer(bb)
|
||||
}
|
||||
contentType := "application/stream+json; charset=utf-8"
|
||||
if format == "prometheus" {
|
||||
contentType = "text/plain; charset=utf-8"
|
||||
writeLineFunc = func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writeLineFunc = func(xb *exportBlock, workerID uint) error {
|
||||
bb := sw.getBuffer(workerID)
|
||||
WriteExportPrometheusLine(bb, xb)
|
||||
resultsCh <- bb
|
||||
return sw.maybeFlushBuffer(bb)
|
||||
}
|
||||
} else if format == "promapi" {
|
||||
writeResponseFunc = WriteExportPromAPIResponse
|
||||
writeLineFunc = func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
WriteExportPromAPIHeader(bw)
|
||||
firstLineOnce := uint32(0)
|
||||
firstLineSent := uint32(0)
|
||||
writeLineFunc = func(xb *exportBlock, workerID uint) error {
|
||||
bb := sw.getBuffer(workerID)
|
||||
if atomic.CompareAndSwapUint32(&firstLineOnce, 0, 1) {
|
||||
// Send the first line to sw.bw
|
||||
WriteExportPromAPILine(bb, xb)
|
||||
_, err := sw.bw.Write(bb.B)
|
||||
bb.Reset()
|
||||
atomic.StoreUint32(&firstLineSent, 1)
|
||||
return err
|
||||
}
|
||||
for atomic.LoadUint32(&firstLineSent) == 0 {
|
||||
// Busy wait until the first line is sent to sw.bw
|
||||
runtime.Gosched()
|
||||
}
|
||||
bb.B = append(bb.B, ',')
|
||||
WriteExportPromAPILine(bb, xb)
|
||||
resultsCh <- bb
|
||||
return sw.maybeFlushBuffer(bb)
|
||||
}
|
||||
}
|
||||
if maxRowsPerLine > 0 {
|
||||
writeLineFuncOrig := writeLineFunc
|
||||
writeLineFunc = func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||
writeLineFunc = func(xb *exportBlock, workerID uint) error {
|
||||
valuesOrig := xb.values
|
||||
timestampsOrig := xb.timestamps
|
||||
values := valuesOrig
|
||||
|
@ -324,19 +324,19 @@ func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, cp *commonPara
|
|||
}
|
||||
xb.values = valuesChunk
|
||||
xb.timestamps = timestampsChunk
|
||||
writeLineFuncOrig(xb, resultsCh)
|
||||
if err := writeLineFuncOrig(xb, workerID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
xb.values = valuesOrig
|
||||
xb.timestamps = timestampsOrig
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxExportSeries)
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
|
||||
doneCh := make(chan error, 1)
|
||||
if !reduceMemUsage {
|
||||
rss, err := netstorage.ProcessSearchQuery(qt, sq, cp.deadline)
|
||||
|
@ -353,19 +353,20 @@ func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, cp *commonPara
|
|||
xb.mn = &rs.MetricName
|
||||
xb.timestamps = rs.Timestamps
|
||||
xb.values = rs.Values
|
||||
writeLineFunc(xb, resultsCh)
|
||||
if err := writeLineFunc(xb, workerID); err != nil {
|
||||
return err
|
||||
}
|
||||
xb.reset()
|
||||
exportBlockPool.Put(xb)
|
||||
return nil
|
||||
})
|
||||
qtChild.Done()
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
} else {
|
||||
qtChild := qt.NewChild("background export format=%s", format)
|
||||
go func() {
|
||||
err := netstorage.ExportBlocks(qtChild, sq, cp.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
||||
err := netstorage.ExportBlocks(qtChild, sq, cp.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange, workerID uint) error {
|
||||
if err := bw.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -376,26 +377,30 @@ func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, cp *commonPara
|
|||
xb.mn = mn
|
||||
xb.timestamps, xb.values = b.AppendRowsWithTimeRangeFilter(xb.timestamps[:0], xb.values[:0], tr)
|
||||
if len(xb.timestamps) > 0 {
|
||||
writeLineFunc(xb, resultsCh)
|
||||
if err := writeLineFunc(xb, workerID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
xb.reset()
|
||||
exportBlockPool.Put(xb)
|
||||
return nil
|
||||
})
|
||||
qtChild.Done()
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
}
|
||||
|
||||
// writeResponseFunc must consume all the data from resultsCh.
|
||||
writeResponseFunc(bw, resultsCh, qt)
|
||||
if err := bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
err := <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during sending the data to remote client: %w", err)
|
||||
return fmt.Errorf("cannot send data to remote client: %w", err)
|
||||
}
|
||||
if err := sw.flush(); err != nil {
|
||||
return fmt.Errorf("cannot send data to remote client: %w", err)
|
||||
}
|
||||
if format == "promapi" {
|
||||
WriteExportPromAPIFooter(bw, qt)
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1123,3 +1128,41 @@ func getCommonParams(r *http.Request, startTime time.Time, requireNonEmptyMatch
|
|||
}
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
type scalableWriter struct {
|
||||
bw *bufferedwriter.Writer
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
func newScalableWriter(bw *bufferedwriter.Writer) *scalableWriter {
|
||||
return &scalableWriter{
|
||||
bw: bw,
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *scalableWriter) getBuffer(workerID uint) *bytesutil.ByteBuffer {
|
||||
v, ok := sw.m.Load(workerID)
|
||||
if !ok {
|
||||
v = &bytesutil.ByteBuffer{}
|
||||
sw.m.Store(workerID, v)
|
||||
}
|
||||
return v.(*bytesutil.ByteBuffer)
|
||||
}
|
||||
|
||||
func (sw *scalableWriter) maybeFlushBuffer(bb *bytesutil.ByteBuffer) error {
|
||||
if len(bb.B) < 1024*1024 {
|
||||
return nil
|
||||
}
|
||||
_, err := sw.bw.Write(bb.B)
|
||||
bb.Reset()
|
||||
return err
|
||||
}
|
||||
|
||||
func (sw *scalableWriter) flush() error {
|
||||
sw.m.Range(func(k, v interface{}) bool {
|
||||
bb := v.(*bytesutil.ByteBuffer)
|
||||
_, err := sw.bw.Write(bb.B)
|
||||
return err == nil
|
||||
})
|
||||
return sw.bw.Flush()
|
||||
}
|
||||
|
|
|
@ -65,8 +65,7 @@ var incrementalAggrFuncCallbacksMap = map[string]*incrementalAggrFuncCallbacks{
|
|||
type incrementalAggrFuncContext struct {
|
||||
ae *metricsql.AggrFuncExpr
|
||||
|
||||
mLock sync.Mutex
|
||||
m map[uint]map[string]*incrementalAggrContext
|
||||
m sync.Map
|
||||
|
||||
callbacks *incrementalAggrFuncCallbacks
|
||||
}
|
||||
|
@ -74,19 +73,20 @@ type incrementalAggrFuncContext struct {
|
|||
func newIncrementalAggrFuncContext(ae *metricsql.AggrFuncExpr, callbacks *incrementalAggrFuncCallbacks) *incrementalAggrFuncContext {
|
||||
return &incrementalAggrFuncContext{
|
||||
ae: ae,
|
||||
m: make(map[uint]map[string]*incrementalAggrContext),
|
||||
callbacks: callbacks,
|
||||
}
|
||||
}
|
||||
|
||||
func (iafc *incrementalAggrFuncContext) updateTimeseries(tsOrig *timeseries, workerID uint) {
|
||||
iafc.mLock.Lock()
|
||||
m := iafc.m[workerID]
|
||||
if m == nil {
|
||||
m = make(map[string]*incrementalAggrContext, 1)
|
||||
iafc.m[workerID] = m
|
||||
v, ok := iafc.m.Load(workerID)
|
||||
if !ok {
|
||||
// It is safe creating and storing m in iafc.m without locking,
|
||||
// since it is guaranteed that only a single goroutine can execute
|
||||
// code for the given workerID at a time.
|
||||
v = make(map[string]*incrementalAggrContext, 1)
|
||||
iafc.m.Store(workerID, v)
|
||||
}
|
||||
iafc.mLock.Unlock()
|
||||
m := v.(map[string]*incrementalAggrContext)
|
||||
|
||||
ts := tsOrig
|
||||
keepOriginal := iafc.callbacks.keepOriginal
|
||||
|
@ -124,11 +124,10 @@ func (iafc *incrementalAggrFuncContext) updateTimeseries(tsOrig *timeseries, wor
|
|||
}
|
||||
|
||||
func (iafc *incrementalAggrFuncContext) finalizeTimeseries() []*timeseries {
|
||||
// There is no need in iafc.mLock.Lock here, since finalizeTimeseries must be called
|
||||
// without concurrent goroutines touching iafc.
|
||||
mGlobal := make(map[string]*incrementalAggrContext)
|
||||
mergeAggrFunc := iafc.callbacks.mergeAggrFunc
|
||||
for _, m := range iafc.m {
|
||||
iafc.m.Range(func(k, v interface{}) bool {
|
||||
m := v.(map[string]*incrementalAggrContext)
|
||||
for k, iac := range m {
|
||||
iacGlobal := mGlobal[k]
|
||||
if iacGlobal == nil {
|
||||
|
@ -141,7 +140,8 @@ func (iafc *incrementalAggrFuncContext) finalizeTimeseries() []*timeseries {
|
|||
}
|
||||
mergeAggrFunc(iacGlobal, iac)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
tss := make([]*timeseries, 0, len(mGlobal))
|
||||
finalizeAggrFunc := iafc.callbacks.finalizeAggrFunc
|
||||
for _, iac := range mGlobal {
|
||||
|
|
|
@ -5100,7 +5100,40 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantiles_over_time`, func(t *testing.T) {
|
||||
t.Run(`quantiles_over_time(single_sample)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
quantiles_over_time("phi", 0.5, 0.9,
|
||||
time()[100s:100s]
|
||||
),
|
||||
"phi",
|
||||
)`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("phi"),
|
||||
Value: []byte("0.5"),
|
||||
},
|
||||
}
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("phi"),
|
||||
Value: []byte("0.9"),
|
||||
},
|
||||
}
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantiles_over_time(multiple_samples)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
quantiles_over_time("phi", 0.5, 0.9,
|
||||
|
|
|
@ -167,6 +167,8 @@ var rollupFuncsCanAdjustWindow = map[string]bool{
|
|||
"timestamp": true,
|
||||
}
|
||||
|
||||
// rollupFuncsRemoveCounterResets contains functions, which need to call removeCounterResets
|
||||
// over input samples before calling the corresponding rollup functions.
|
||||
var rollupFuncsRemoveCounterResets = map[string]bool{
|
||||
"increase": true,
|
||||
"increase_prometheus": true,
|
||||
|
@ -177,6 +179,36 @@ var rollupFuncsRemoveCounterResets = map[string]bool{
|
|||
"rollup_rate": true,
|
||||
}
|
||||
|
||||
// rollupFuncsSamplesScannedPerCall contains functions, which scan lower number of samples
|
||||
// than is passed to the rollup func.
|
||||
//
|
||||
// It is expected that the remaining rollupFuncs scan all the samples passed to them.
|
||||
var rollupFuncsSamplesScannedPerCall = map[string]int{
|
||||
"absent_over_time": 1,
|
||||
"count_over_time": 1,
|
||||
"default_rollup": 1,
|
||||
"delta": 2,
|
||||
"delta_prometheus": 2,
|
||||
"deriv_fast": 2,
|
||||
"first_over_time": 1,
|
||||
"idelta": 2,
|
||||
"ideriv": 2,
|
||||
"increase": 2,
|
||||
"increase_prometheus": 2,
|
||||
"increase_pure": 2,
|
||||
"irate": 2,
|
||||
"lag": 1,
|
||||
"last_over_time": 1,
|
||||
"lifetime": 2,
|
||||
"present_over_time": 1,
|
||||
"rate": 2,
|
||||
"scrape_interval": 2,
|
||||
"tfirst_over_time": 1,
|
||||
"timestamp": 1,
|
||||
"timestamp_with_name": 1,
|
||||
"tlast_over_time": 1,
|
||||
}
|
||||
|
||||
// These functions don't change physical meaning of input time series,
|
||||
// so they don't drop metric name
|
||||
var rollupFuncsKeepMetricName = map[string]bool{
|
||||
|
@ -248,14 +280,17 @@ func getRollupAggrFuncNames(expr metricsql.Expr) ([]string, error) {
|
|||
return aggrFuncNames, nil
|
||||
}
|
||||
|
||||
func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, end, step int64, maxPointsPerSeries int, window, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func getRollupConfigs(funcName string, rf rollupFunc, expr metricsql.Expr, start, end, step int64, maxPointsPerSeries int,
|
||||
window, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func(values []float64, timestamps []int64), []*rollupConfig, error) {
|
||||
preFunc := func(values []float64, timestamps []int64) {}
|
||||
if rollupFuncsRemoveCounterResets[name] {
|
||||
funcName = strings.ToLower(funcName)
|
||||
if rollupFuncsRemoveCounterResets[funcName] {
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
removeCounterResets(values)
|
||||
}
|
||||
}
|
||||
samplesScannedPerCall := rollupFuncsSamplesScannedPerCall[funcName]
|
||||
newRollupConfig := func(rf rollupFunc, tagValue string) *rollupConfig {
|
||||
return &rollupConfig{
|
||||
TagValue: tagValue,
|
||||
|
@ -267,10 +302,11 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en
|
|||
|
||||
MaxPointsPerSeries: maxPointsPerSeries,
|
||||
|
||||
MayAdjustWindow: rollupFuncsCanAdjustWindow[name],
|
||||
LookbackDelta: lookbackDelta,
|
||||
Timestamps: sharedTimestamps,
|
||||
isDefaultRollup: name == "default_rollup",
|
||||
MayAdjustWindow: rollupFuncsCanAdjustWindow[funcName],
|
||||
LookbackDelta: lookbackDelta,
|
||||
Timestamps: sharedTimestamps,
|
||||
isDefaultRollup: funcName == "default_rollup",
|
||||
samplesScannedPerCall: samplesScannedPerCall,
|
||||
}
|
||||
}
|
||||
appendRollupConfigs := func(dst []*rollupConfig) []*rollupConfig {
|
||||
|
@ -280,7 +316,7 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en
|
|||
return dst
|
||||
}
|
||||
var rcs []*rollupConfig
|
||||
switch name {
|
||||
switch funcName {
|
||||
case "rollup":
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_rate", "rollup_deriv":
|
||||
|
@ -420,6 +456,11 @@ type rollupConfig struct {
|
|||
|
||||
// Whether default_rollup is used.
|
||||
isDefaultRollup bool
|
||||
|
||||
// The estimated number of samples scanned per Func call.
|
||||
//
|
||||
// If zero, then it is considered that Func scans all the samples passed to it.
|
||||
samplesScannedPerCall int
|
||||
}
|
||||
|
||||
func (rc *rollupConfig) getTimestamps() []int64 {
|
||||
|
@ -562,7 +603,8 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
|||
ni := 0
|
||||
nj := 0
|
||||
f := rc.Func
|
||||
var samplesScanned uint64
|
||||
samplesScanned := uint64(len(values))
|
||||
samplesScannedPerCall := uint64(rc.samplesScannedPerCall)
|
||||
for _, tEnd := range rc.Timestamps {
|
||||
tStart := tEnd - window
|
||||
ni = seekFirstTimestampIdxAfter(timestamps[i:], tStart, ni)
|
||||
|
@ -594,7 +636,11 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
|||
rfa.currTimestamp = tEnd
|
||||
value := f(rfa)
|
||||
rfa.idx++
|
||||
samplesScanned += uint64(len(rfa.values))
|
||||
if samplesScannedPerCall > 0 {
|
||||
samplesScanned += samplesScannedPerCall
|
||||
} else {
|
||||
samplesScanned += uint64(len(rfa.values))
|
||||
}
|
||||
dstValues = append(dstValues, value)
|
||||
}
|
||||
putRollupFuncArg(rfa)
|
||||
|
@ -1122,11 +1168,7 @@ func newRollupQuantiles(args []interface{}) (rollupFunc, error) {
|
|||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return rfa.prevValue
|
||||
}
|
||||
if len(values) == 1 {
|
||||
// Fast path - only a single value.
|
||||
return values[0]
|
||||
return nan
|
||||
}
|
||||
qs := getFloat64s()
|
||||
qs.A = quantiles(qs.A[:0], phis, values)
|
||||
|
|
|
@ -587,8 +587,8 @@ func TestRollupNoWindowNoPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{0, 1, 2, 3, 4}
|
||||
|
@ -626,8 +626,8 @@ func TestRollupWindowNoPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{0, 1, 2, 3, 4}
|
||||
|
@ -644,8 +644,8 @@ func TestRollupWindowNoPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{161, 171, 181, 191}
|
||||
|
@ -665,8 +665,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 15 {
|
||||
t.Fatalf("expecting 15 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, nan, 34, nan, 44}
|
||||
timestampsExpected := []int64{0, 5, 10, 15, 20, 25}
|
||||
|
@ -683,8 +683,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{44, 32, 34, nan}
|
||||
timestampsExpected := []int64{100, 120, 140, 160}
|
||||
|
@ -701,8 +701,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, 123, 34, 32}
|
||||
timestampsExpected := []int64{-50, 0, 50, 100, 150}
|
||||
|
@ -722,8 +722,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 123, 34, 34}
|
||||
timestampsExpected := []int64{0, 5, 10, 15, 20}
|
||||
|
@ -740,8 +740,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{44, 34, 34, nan}
|
||||
timestampsExpected := []int64{100, 120, 140, 160}
|
||||
|
@ -758,8 +758,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 15 {
|
||||
t.Fatalf("expecting 15 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 54, 44, nan}
|
||||
timestampsExpected := []int64{0, 50, 100, 150}
|
||||
|
@ -779,8 +779,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
|
@ -797,8 +797,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
|
@ -815,8 +815,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
|
@ -836,8 +836,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 54, 44, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -854,8 +854,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 1}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -872,8 +872,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, 12, 32, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -890,8 +890,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 99, 44, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -908,8 +908,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 222, 199, 110, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -926,8 +926,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, -9, 22, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -944,8 +944,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -102, -42, -10, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -962,8 +962,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{123, 33, -87, 0}
|
||||
timestampsExpected := []int64{10, 50, 90, 130}
|
||||
|
@ -980,8 +980,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.004, 0, 0, 0.03}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -998,8 +998,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.031, 0.044, 0.04, 0.01}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1016,8 +1016,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 47 {
|
||||
t.Fatalf("expecting 47 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.031, 0.075, 0.115, 0.125}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1034,8 +1034,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.010333333333333333, 0.011, 0.013333333333333334, 0.01}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1052,8 +1052,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.010333333333333333, 0.010714285714285714, 0.012, 0.0125}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1070,8 +1070,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1088,8 +1088,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 3, 3, 2, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1106,8 +1106,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 1, 1, 1, 1, 0}
|
||||
timestampsExpected := []int64{0, 9, 18, 27, 36, 45}
|
||||
|
@ -1124,8 +1124,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2, 2, 1, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1142,8 +1142,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 55.5, 49.75, 36.666666666666664, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1160,8 +1160,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -2879.310344827588, 127.87627310448904, -496.5831435079728, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1178,8 +1178,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 14 {
|
||||
t.Fatalf("expecting 14 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, 0, -8900, 0}
|
||||
timestampsExpected := []int64{0, 4, 8, 12, 16, 20}
|
||||
|
@ -1196,8 +1196,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -1916.6666666666665, -43500, 400, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1214,8 +1214,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 39.81519810323691, 32.080952292598795, 5.2493385826745405, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1232,8 +1232,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2.148, 1.593, 1.156, 1.36}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1250,8 +1250,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 1}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1268,8 +1268,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 7, 6, 3}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1286,8 +1286,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, 34, 34, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1304,8 +1304,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2775, 5262.5, 3862.5, 1800}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1322,8 +1322,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -0.86650328627136, -1.1200838283548589, -0.40035755084856683, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
|
@ -1348,8 +1348,8 @@ func TestRollupBigNumberOfValues(t *testing.T) {
|
|||
srcTimestamps[i] = int64(i / 2)
|
||||
}
|
||||
values, samplesScanned := rc.Do(nil, srcValues, srcTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 22002 {
|
||||
t.Fatalf("expecting 22002 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{1, 4001, 8001, 9999, nan, nan}
|
||||
timestampsExpected := []int64{0, 2000, 4000, 6000, 8000, 10000}
|
||||
|
|
|
@ -46,7 +46,7 @@ const AdditionalSettings: FC = () => {
|
|||
control={<BasicSwitch checked={isTracingEnabled} onChange={onChangeQueryTracing} />}
|
||||
/>
|
||||
</Box>
|
||||
<Box ml={2}>
|
||||
<Box ml={2} mr={2}>
|
||||
<StepConfigurator defaultStep={step} customStepEnable={customStep.enable}
|
||||
setStep={(value) => {
|
||||
graphDispatch({type: "SET_CUSTOM_STEP", payload: value});
|
||||
|
|
|
@ -4,17 +4,21 @@ import IconButton from "@mui/material/IconButton";
|
|||
import Tooltip from "@mui/material/Tooltip";
|
||||
import QueryEditor from "./QueryEditor";
|
||||
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||
import HighlightOffIcon from "@mui/icons-material/HighlightOff";
|
||||
import AddCircleOutlineIcon from "@mui/icons-material/AddCircleOutline";
|
||||
import PlayCircleOutlineIcon from "@mui/icons-material/PlayCircleOutline";
|
||||
import DeleteIcon from "@mui/icons-material/Delete";
|
||||
import AddIcon from "@mui/icons-material/Add";
|
||||
import PlayArrowIcon from "@mui/icons-material/PlayArrow";
|
||||
import AdditionalSettings from "./AdditionalSettings";
|
||||
import {ErrorTypes} from "../../../../types";
|
||||
import Button from "@mui/material/Button";
|
||||
import Typography from "@mui/material/Typography";
|
||||
|
||||
export interface QueryConfiguratorProps {
|
||||
error?: ErrorTypes | string;
|
||||
queryOptions: string[]
|
||||
}
|
||||
|
||||
export const MAX_QUERY_FIELDS = 4;
|
||||
|
||||
const QueryConfigurator: FC<QueryConfiguratorProps> = ({error, queryOptions}) => {
|
||||
|
||||
const {query, queryHistory, queryControls: {autocomplete}} = useAppState();
|
||||
|
@ -65,31 +69,31 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({error, queryOptions}) =>
|
|||
return <Box boxShadow="rgba(99, 99, 99, 0.2) 0px 2px 8px 0px;" p={4} pb={2} m={-4} mb={2}>
|
||||
<Box>
|
||||
{stateQuery.map((q, i) =>
|
||||
<Box key={i} display="grid" gridTemplateColumns="1fr auto auto" gap="4px" width="100%"
|
||||
mb={i === stateQuery.length - 1 ? 0 : 2.5}>
|
||||
<Box key={i} display="grid" gridTemplateColumns="1fr auto" gap="4px" width="100%" position="relative"
|
||||
mb={i === stateQuery.length - 1 ? 0 : 2}>
|
||||
<QueryEditor
|
||||
query={stateQuery[i]} index={i} autocomplete={autocomplete} queryOptions={queryOptions}
|
||||
error={error} setHistoryIndex={setHistoryIndex} runQuery={onRunQuery} setQuery={onSetQuery}
|
||||
label={`Query ${i + 1}`}/>
|
||||
{i === 0 && <Tooltip title="Execute Query">
|
||||
<IconButton onClick={onRunQuery} sx={{height: "49px", width: "49px"}}>
|
||||
<PlayCircleOutlineIcon/>
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
{stateQuery.length < 2 && <Tooltip title="Add Query">
|
||||
<IconButton onClick={onAddQuery} sx={{height: "49px", width: "49px"}}>
|
||||
<AddCircleOutlineIcon/>
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
{i > 0 && <Tooltip title="Remove Query">
|
||||
<IconButton onClick={() => onRemoveQuery(i)} sx={{height: "49px", width: "49px"}}>
|
||||
<HighlightOffIcon/>
|
||||
label={`Query ${i + 1}`} size={"small"}/>
|
||||
{stateQuery.length > 1 && <Tooltip title="Remove Query">
|
||||
<IconButton onClick={() => onRemoveQuery(i)} sx={{height: "33px", width: "33px", padding: 0}} color={"error"}>
|
||||
<DeleteIcon fontSize={"small"}/>
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
</Box>)}
|
||||
</Box>
|
||||
<Box mt={3}>
|
||||
<Box mt={3} display="grid" gridTemplateColumns="1fr auto" alignItems="center">
|
||||
<AdditionalSettings/>
|
||||
<Box>
|
||||
{stateQuery.length < MAX_QUERY_FIELDS && (
|
||||
<Button variant="outlined" onClick={onAddQuery} startIcon={<AddIcon/>} sx={{mr: 2}}>
|
||||
<Typography lineHeight={"20px"} fontWeight="500">Add Query</Typography>
|
||||
</Button>
|
||||
)}
|
||||
<Button variant="contained" onClick={onRunQuery} startIcon={<PlayArrowIcon/>}>
|
||||
<Typography lineHeight={"20px"} fontWeight="500">Execute Query</Typography>
|
||||
</Button>
|
||||
</Box>
|
||||
</Box>
|
||||
</Box>;
|
||||
};
|
||||
|
|
|
@ -20,6 +20,7 @@ export interface QueryEditorProps {
|
|||
error?: ErrorTypes | string;
|
||||
queryOptions: string[];
|
||||
label: string;
|
||||
size?: "small" | "medium" | undefined;
|
||||
}
|
||||
|
||||
const QueryEditor: FC<QueryEditorProps> = ({
|
||||
|
@ -32,6 +33,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
error,
|
||||
queryOptions,
|
||||
label,
|
||||
size = "medium"
|
||||
}) => {
|
||||
|
||||
const [focusField, setFocusField] = useState(false);
|
||||
|
@ -112,6 +114,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
onFocus={() => setFocusField(true)}
|
||||
onKeyDown={handleKeyDown}
|
||||
onChange={(e) => setQuery(e.target.value, index)}
|
||||
size={size}
|
||||
/>
|
||||
<Popper open={openAutocomplete} anchorEl={autocompleteAnchorEl.current} placement="bottom-start" sx={{zIndex: 3}}>
|
||||
<ClickAwayListener onClickAway={() => setOpenAutocomplete(false)}>
|
||||
|
|
|
@ -66,7 +66,7 @@ const CustomPanel: FC = () => {
|
|||
<Box height="100%">
|
||||
{isLoading && <Spinner isLoading={isLoading} height={"500px"}/>}
|
||||
{<Box height={"100%"} bgcolor={"#fff"}>
|
||||
<Box display="grid" gridTemplateColumns="1fr auto" alignItems="center" mx={-4} px={4} mb={2}
|
||||
<Box display="grid" gridTemplateColumns="1fr auto" alignItems="center" mb={2}
|
||||
borderBottom={1} borderColor="divider">
|
||||
<DisplayTypeSwitch/>
|
||||
<Box display={"flex"}>
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
import React, {FC, useMemo, useState} from "preact/compat";
|
||||
import {hexToRGB} from "../../utils/color";
|
||||
import {LegendItem} from "../../utils/uplot/types";
|
||||
import "./legend.css";
|
||||
import {getDashLine} from "../../utils/uplot/helpers";
|
||||
import {getLegendLabel} from "../../utils/uplot/helpers";
|
||||
import Tooltip from "@mui/material/Tooltip";
|
||||
|
||||
export interface LegendProps {
|
||||
|
@ -28,31 +27,22 @@ const Legend: FC<LegendProps> = ({labels, query, onChange}) => {
|
|||
<div className="legendWrapper">
|
||||
{groups.map((group) => <div className="legendGroup" key={group}>
|
||||
<div className="legendGroupTitle">
|
||||
<svg className="legendGroupLine" width="33" height="3" version="1.1" xmlns="http://www.w3.org/2000/svg">
|
||||
<line strokeWidth="3" x1="0" y1="0" x2="33" y2="0" stroke="#363636"
|
||||
strokeDasharray={getDashLine(group).join(",")}
|
||||
/>
|
||||
</svg>
|
||||
<span className="legendGroupQuery">Query {group}</span>
|
||||
<span>("{query[group - 1]}")</span>
|
||||
</div>
|
||||
<div>
|
||||
{labels.filter(l => l.group === group).map((legendItem: LegendItem) =>
|
||||
<div className={legendItem.checked ? "legendItem" : "legendItem legendItemHide"}
|
||||
key={`${legendItem.group}.${legendItem.label}`}
|
||||
key={legendItem.label}
|
||||
onClick={(e) => onChange(legendItem, e.ctrlKey || e.metaKey)}>
|
||||
<div className="legendMarker"
|
||||
style={{
|
||||
borderColor: legendItem.color,
|
||||
backgroundColor: `rgba(${hexToRGB(legendItem.color)}, 0.1)`
|
||||
}}/>
|
||||
<div className="legendMarker" style={{backgroundColor: legendItem.color}}/>
|
||||
<div className="legendLabel">
|
||||
{legendItem.label.replace(/{.+}/gmi, "")}
|
||||
{getLegendLabel(legendItem.label)}
|
||||
{!!Object.keys(legendItem.freeFormFields).length && <>
|
||||
 {
|
||||
{Object.keys(legendItem.freeFormFields).filter(f => f !== "__name__").map((f) => {
|
||||
const freeField = `${f}="${legendItem.freeFormFields[f]}"`;
|
||||
const fieldId = `${legendItem.group}.${legendItem.label}.${freeField}`;
|
||||
const fieldId = `${legendItem.label}.${freeField}`;
|
||||
return <Tooltip arrow key={f} open={copiedValue === fieldId} title={"Copied!"}>
|
||||
<span className="legendFreeFields" onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
|
|
|
@ -53,8 +53,6 @@
|
|||
.legendMarker {
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
border-width: 2px;
|
||||
border-style: solid;
|
||||
box-sizing: border-box;
|
||||
transition: 0.2s ease;
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [],
|
|||
const options: uPlotOptions = {
|
||||
...defaultOptions,
|
||||
series,
|
||||
axes: getAxes(series.length > 1 ? series : [{}, {scale: "1"}], unit),
|
||||
axes: getAxes( [{}, {scale: "1"}], unit),
|
||||
scales: {...getScales()},
|
||||
width: layoutSize.width || 400,
|
||||
plugins: [{hooks: {ready: onReadyChart, setCursor, setSeries: seriesFocus}}],
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import qs from "qs";
|
||||
import get from "lodash.get";
|
||||
import router from "../router";
|
||||
import {MAX_QUERY_FIELDS} from "../components/CustomPanel/Configurator/Query/QueryConfigurator";
|
||||
|
||||
const graphStateToUrlParams = {
|
||||
"time.duration": "range_input",
|
||||
|
@ -105,7 +106,7 @@ export const getQueryStringValue = (
|
|||
|
||||
export const getQueryArray = (): string[] => {
|
||||
const queryLength = window.location.search.match(/g\d+.expr/gmi)?.length || 1;
|
||||
return new Array(queryLength).fill(1).map((q, i) => {
|
||||
return getQueryStringValue(`g${i}.expr`, "") as string;
|
||||
});
|
||||
return new Array(queryLength > MAX_QUERY_FIELDS ? MAX_QUERY_FIELDS : queryLength)
|
||||
.fill(1)
|
||||
.map((q, i) => getQueryStringValue(`g${i}.expr`, "") as string);
|
||||
};
|
||||
|
|
|
@ -48,16 +48,15 @@ export const getMinMaxBuffer = (min: number | null, max: number | null): [number
|
|||
}
|
||||
const valueRange = Math.abs(max - min) || Math.abs(min) || 1;
|
||||
const padding = 0.02*valueRange;
|
||||
return [min - padding, max + padding];
|
||||
return [Math.floor(min - padding), Math.ceil(max + padding)];
|
||||
};
|
||||
|
||||
export const getLimitsYAxis = (values: { [key: string]: number[] }): AxisRange => {
|
||||
const result: AxisRange = {};
|
||||
for (const key in values) {
|
||||
const numbers = values[key];
|
||||
const min = getMinFromArray(numbers);
|
||||
const max = getMaxFromArray(numbers);
|
||||
result[key] = getMinMaxBuffer(min, max);
|
||||
}
|
||||
const numbers = Object.values(values).flat();
|
||||
const key = "1";
|
||||
const min = getMinFromArray(numbers);
|
||||
const max = getMaxFromArray(numbers);
|
||||
result[key] = getMinMaxBuffer(min, max);
|
||||
return result;
|
||||
};
|
||||
|
|
|
@ -63,6 +63,10 @@ export const sizeAxis = (u: uPlot, values: string[], axisIdx: number, cycleNum:
|
|||
return Math.ceil(axisSize);
|
||||
};
|
||||
|
||||
export const getColorLine = (scale: number, label: string): string => getColorFromString(`${scale}${label}`);
|
||||
export const getColorLine = (label: string): string => getColorFromString(label);
|
||||
|
||||
export const getDashLine = (group: number): number[] => group <= 1 ? [] : [group*4, group*1.2];
|
||||
|
||||
export const getLegendLabel = (label: string): string => {
|
||||
return label.replace(/^\[\d+]/, "").replace(/{.+}/gmi, "");
|
||||
};
|
||||
|
|
|
@ -2,7 +2,7 @@ import {MetricResult} from "../../api/types";
|
|||
import {Series} from "uplot";
|
||||
import {getNameForMetric} from "../metric";
|
||||
import {BarSeriesItem, Disp, Fill, LegendItem, Stroke} from "./types";
|
||||
import {getColorLine, getDashLine} from "./helpers";
|
||||
import {getColorLine} from "./helpers";
|
||||
import {HideSeriesArgs} from "./types";
|
||||
|
||||
interface SeriesItem extends Series {
|
||||
|
@ -10,15 +10,15 @@ interface SeriesItem extends Series {
|
|||
}
|
||||
|
||||
export const getSeriesItem = (d: MetricResult, hideSeries: string[], alias: string[]): SeriesItem => {
|
||||
const label = getNameForMetric(d, alias[d.group - 1]);
|
||||
const name = getNameForMetric(d, alias[d.group - 1]);
|
||||
const label = `[${d.group}]${name}`;
|
||||
return {
|
||||
label,
|
||||
dash: getDashLine(d.group),
|
||||
freeFormFields: d.metric,
|
||||
width: 1.4,
|
||||
stroke: getColorLine(d.group, label),
|
||||
show: !includesHideSeries(label, d.group, hideSeries),
|
||||
scale: String(d.group),
|
||||
stroke: getColorLine(label),
|
||||
show: !includesHideSeries(label, hideSeries),
|
||||
scale: "1",
|
||||
points: {
|
||||
size: 4.2,
|
||||
width: 1.4
|
||||
|
@ -35,9 +35,9 @@ export const getLegendItem = (s: SeriesItem, group: number): LegendItem => ({
|
|||
});
|
||||
|
||||
export const getHideSeries = ({hideSeries, legend, metaKey, series}: HideSeriesArgs): string[] => {
|
||||
const label = `${legend.group}.${legend.label}`;
|
||||
const include = includesHideSeries(legend.label, legend.group, hideSeries);
|
||||
const labels = series.map(s => `${s.scale}.${s.label}`);
|
||||
const {label} = legend;
|
||||
const include = includesHideSeries(label, hideSeries);
|
||||
const labels = series.map(s => s.label || "");
|
||||
if (metaKey) {
|
||||
return include ? hideSeries.filter(l => l !== label) : [...hideSeries, label];
|
||||
} else if (hideSeries.length) {
|
||||
|
@ -47,8 +47,8 @@ export const getHideSeries = ({hideSeries, legend, metaKey, series}: HideSeriesA
|
|||
}
|
||||
};
|
||||
|
||||
export const includesHideSeries = (label: string, group: string | number, hideSeries: string[]): boolean => {
|
||||
return hideSeries.includes(`${group}.${label}`);
|
||||
export const includesHideSeries = (label: string, hideSeries: string[]): boolean => {
|
||||
return hideSeries.includes(`${label}`);
|
||||
};
|
||||
|
||||
export const getBarSeries = (
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import dayjs from "dayjs";
|
||||
import {SetupTooltip} from "./types";
|
||||
import {getColorLine, formatPrettyNumber} from "./helpers";
|
||||
import {getColorLine, formatPrettyNumber, getLegendLabel} from "./helpers";
|
||||
|
||||
export const setTooltip = ({u, tooltipIdx, metrics, series, tooltip, tooltipOffset, unit = ""}: SetupTooltip): void => {
|
||||
const {seriesIdx, dataIdx} = tooltipIdx;
|
||||
|
@ -9,7 +9,7 @@ export const setTooltip = ({u, tooltipIdx, metrics, series, tooltip, tooltipOffs
|
|||
const dataTime = u.data[0][dataIdx];
|
||||
const metric = metrics[seriesIdx - 1]?.metric || {};
|
||||
const selectedSeries = series[seriesIdx];
|
||||
const color = getColorLine(Number(selectedSeries.scale || 0), selectedSeries.label || "");
|
||||
const color = getColorLine(selectedSeries.label || "");
|
||||
|
||||
const {width, height} = u.over.getBoundingClientRect();
|
||||
const top = u.valToPos((dataSeries || 0), series[seriesIdx]?.scale || "1");
|
||||
|
@ -22,8 +22,7 @@ export const setTooltip = ({u, tooltipIdx, metrics, series, tooltip, tooltipOffs
|
|||
tooltip.style.top = `${tooltipOffset.top + top + 10 - (overflowY ? tooltipHeight + 10 : 0)}px`;
|
||||
tooltip.style.left = `${tooltipOffset.left + lft + 10 - (overflowX ? tooltipWidth + 20 : 0)}px`;
|
||||
const metricName = (selectedSeries.label || "").replace(/{.+}/gmi, "").trim();
|
||||
const groupName = `Query ${selectedSeries.scale}`;
|
||||
const name = metricName || groupName;
|
||||
const name = getLegendLabel(metricName);
|
||||
const date = dayjs(new Date(dataTime * 1000)).format("YYYY-MM-DD HH:mm:ss:SSS (Z)");
|
||||
const info = Object.keys(metric).filter(k => k !== "__name__").map(k => `<div><b>${k}</b>: ${metric[k]}</div>`).join("");
|
||||
const marker = `<div class="u-tooltip__marker" style="background: ${color}"></div>`;
|
||||
|
|
|
@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
|
|||
|
||||
ROOT_IMAGE ?= alpine:3.16.2
|
||||
CERTS_IMAGE := alpine:3.16.2
|
||||
GO_BUILDER_IMAGE := golang:1.19.1-alpine
|
||||
GO_BUILDER_IMAGE := golang:1.19.2-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||
|
||||
|
@ -189,4 +189,4 @@ docker-cluster-up:
|
|||
docker-compose -f deployment/docker/docker-compose-cluster.yml up
|
||||
|
||||
docker-cluster-down:
|
||||
docker-compose -f deployment/docker/docker-compose-cluster.yml down -v
|
||||
docker-compose -f deployment/docker/docker-compose-cluster.yml down -v
|
||||
|
|
|
@ -89,17 +89,6 @@ groups:
|
|||
description: "The limit of concurrent flushes on instance {{ $labels.instance }} is equal to number of CPUs.\n
|
||||
When vmstorage constantly hits the limit it means that storage is overloaded and requires more CPU."
|
||||
|
||||
- alert: TooManyLogs
|
||||
expr: sum(increase(vm_log_messages_total{level="error"}[5m])) by (job, instance) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
dashboard: "http://localhost:3000/d/oS7Bi_0Wz?viewPanel=104&var-instance={{ $labels.instance }}"
|
||||
summary: "Too many logs printed for job \"{{ $labels.job }}\" ({{ $labels.instance }})"
|
||||
description: "Logging rate for job \"{{ $labels.job }}\" ({{ $labels.instance }}) is {{ $value }} for last 15m.\n
|
||||
Worth to check logs for specific error messages."
|
||||
|
||||
- alert: RowsRejectedOnIngestion
|
||||
expr: sum(rate(vm_rows_ignored_total[5m])) by (instance, reason) > 0
|
||||
for: 15m
|
||||
|
|
|
@ -51,4 +51,14 @@ groups:
|
|||
annotations:
|
||||
summary: "More than 90% of CPU is used by \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") during the last 5m"
|
||||
description: "Too high CPU usage may be a sign of insufficient resources and make process unstable.
|
||||
Consider to either increase available CPU resources or decrease the load on the process."
|
||||
Consider to either increase available CPU resources or decrease the load on the process."
|
||||
|
||||
- alert: TooManyLogs
|
||||
expr: sum(increase(vm_log_messages_total{level="error"}[5m])) by (job, instance) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Too many logs printed for job \"{{ $labels.job }}\" ({{ $labels.instance }})"
|
||||
description: "Logging rate for job \"{{ $labels.job }}\" ({{ $labels.instance }}) is {{ $value }} for last 15m.\n
|
||||
Worth to check logs for specific error messages."
|
||||
|
|
|
@ -71,17 +71,6 @@ groups:
|
|||
description: "The limit of concurrent flushes on instance {{ $labels.instance }} is equal to number of CPUs.\n
|
||||
When VictoriaMetrics constantly hits the limit it means that storage is overloaded and requires more CPU."
|
||||
|
||||
- alert: TooManyLogs
|
||||
expr: sum(increase(vm_log_messages_total{level="error"}[5m])) by (job, instance) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=67&var-instance={{ $labels.instance }}"
|
||||
summary: "Too many logs printed for job \"{{ $labels.job }}\" ({{ $labels.instance }})"
|
||||
description: "Logging rate for job \"{{ $labels.job }}\" ({{ $labels.instance }}) is {{ $value }} for last 15m.\n
|
||||
Worth to check logs for specific error messages."
|
||||
|
||||
- alert: RowsRejectedOnIngestion
|
||||
expr: sum(rate(vm_rows_ignored_total[5m])) by (instance, reason) > 0
|
||||
for: 15m
|
||||
|
@ -154,4 +143,4 @@ groups:
|
|||
summary: "Metrics ingested in ({{ $labels.instance }}) are exceeding labels limit"
|
||||
description: "VictoriaMetrics limits the number of labels per each metric with `-maxLabelsPerTimeseries` command-line flag.\n
|
||||
This prevents from ingesting metrics with too many labels. Please verify that `-maxLabelsPerTimeseries` is configured
|
||||
correctly or that clients which send these metrics aren't misbehaving."
|
||||
correctly or that clients which send these metrics aren't misbehaving."
|
||||
|
|
|
@ -104,6 +104,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
|||
* [How to use relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2)
|
||||
* [How to monitor Go applications with VictoriaMetrics](https://victoriametrics.medium.com/how-to-monitor-go-applications-with-victoriametrics-c04703110870)
|
||||
* [Prometheus storage: tech terms for humans](https://valyala.medium.com/prometheus-storage-technical-terms-for-humans-4ab4de6c3d48)
|
||||
* [Cardinality explorer](https://victoriametrics.com/blog/cardinality-explorer/)
|
||||
|
||||
### Other articles
|
||||
|
||||
|
|
|
@ -25,7 +25,9 @@ at [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMe
|
|||
See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy-via-labels) for details.
|
||||
|
||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): support specifying tenant ids via `vm_account_id` and `vm_project_id` labels. See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy-via-labels) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2970).
|
||||
* FEATURE: improve [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling) performance by up to 3x if non-trivial `regex` values are used.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): improve [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling) performance by up to 3x for non-trivial `regex` values such as `([^:]+):.+`, which can be used for extracting a `host` part from `host:port` label value.
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): improve performance by up to 4x for queries containing non-trivial `regex` filters such as `{path=~"/foo/.+|/bar"}`.
|
||||
* FEATURE: improve performance scalability on systems with many CPU cores for [/federate](https://docs.victoriametrics.com/#federation) and [/api/v1/export/...](https://docs.victoriametrics.com/#how-to-export-time-series) endpoints.
|
||||
* FEATURE: sanitize metric names for data ingested via [DataDog protocol](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent) according to [DataDog metric naming](https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics). The behaviour can be disabled by passing `-datadog.sanitizeMetricName=false` command-line flag. Thanks to @PerGon for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3105).
|
||||
* FEATURE: add `-usePromCompatibleNaming` command-line flag to [vmagent](https://docs.victoriametrics.com/vmagent.html), to single-node VictoriaMetrics and to `vminsert` component of VictoriaMetrics cluster. This flag can be used for normalizing the ingested metric names and label names to [Prometheus-compatible form](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). If this flag is set, then all the chars unsupported by Prometheus are replaced with `_` chars in metric names and labels of the ingested samples. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3113).
|
||||
* FEATURE: accept whitespace in metric names and tags ingested via [Graphite plaintext protocol](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) according to [the specs](https://graphite.readthedocs.io/en/latest/tags.html). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3102).
|
||||
|
@ -35,26 +37,41 @@ See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#m
|
|||
* FEATURE: allow to define the minimum TLS version to use when accepting https requests to VictoriaMetrics components if `-tls` command-line flag is set. The minimum TLS version can be set via `-tlsMinVersion` command-line flag. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3090).
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add `vm-native-step-interval` command line flag for `vm-native` mode. New option allows splitting the import process into chunks by time interval. This helps migrating data sets with high churn rate and provides better control over the process. See [feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2733).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add `top queries` tab, which shows various stats for recently executed queries. See [these docs](https://docs.victoriametrics.com/#top-queries) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2707).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): move the "Execute Query" and "Add Query" buttons below the query fields, change icon for remove query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3101).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): set the maximum number of queries to 4, remove multi Y-axes, left one for all queries and dotted lines to indicate queries in the graph. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3169).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `debug` mode to the alerting rule settings for printing additional information into logs during evaluation. See `debug` param in [alerting rule config](https://docs.victoriametrics.com/vmalert.html#alerting-rules).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add experimental feature for displaying last 10 states of the rule (recording or alerting) evaluation. The state is available on the Rule page, which can be opened by clicking on `Details` link next to Rule's name on the `/groups` page.
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow using extra labels in annotiations. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3013).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow configuring authorization params per list of targets in vmalert's notifier config for `static_configs`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2690).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow using `{{$labels}}` for templating in command-line flag `-external.alert.source`. The change supposed to provide additional flexibility for generating alert's source link based on labels values.
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `vm_account_id` and `vm_project_id` labels to results of alerting and recording rules if `-clusterMode` is enabled. This improves [multitenant support in vmalert](https://docs.victoriametrics.com/vmalert.html#multitenancy).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): minimize the time needed for reading large responses from scrape targets in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). This should reduce scrape durations for such targets as [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) running in a big Kubernetes cluster.
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add [sort_by_label_numeric](https://docs.victoriametrics.com/MetricsQL.html#sort_by_label_numeric) and [sort_by_label_numeric_desc](https://docs.victoriametrics.com/MetricsQL.html#sort_by_label_numeric_desc) functions for [numeric sort](https://www.gnu.org/software/coreutils/manual/html_node/Version-sort-is-not-the-same-as-numeric-sort.html) of input time series by the specified labels. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2938).
|
||||
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html) and [vmrestore](https://docs.victoriametrics.com/vmrestore.html): retry GCS operations for up to 3 minutes on temporary failures. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3147).
|
||||
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): add support for saving / restoring backups to / from [Azure blob storage](https://azure.microsoft.com/en-us/products/storage/blobs/). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1029).
|
||||
* FEATURE: [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html): expose `vm_backup_in_flight` metric, which can be used for determining which backup types - latest, hourly, daily, weekly or monthly - are currently executed.
|
||||
* FEATURE: [vmgateway](https://docs.victoriametrics.com/vmgateway.html): add ability to extract JWT authorization token from non-standard HTTP header by passing it via `-auth.httpHeader` command-line flag. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3054).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): expose `__meta_ec2_region` label for [ec2_sd_config](https://docs.victoriametrics.com/sd_configs.html#ec2_sd_configs) in the same way as [Prometheus 2.39 does](https://github.com/prometheus/prometheus/pull/11326).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): accept data ingestion requests via paths starting from `/prometheus` prefix in the same way as [VictoriaMetrics does](https://docs.victoriametrics.com/#how-to-import-time-series-data). For example, `vmagent` now accepts Prometheus `remote_write` data via both `/api/v1/write` and `/prometheus/api/v1/write`. This simplifies switching between single-node VictoriaMetrics and `vmagent`.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `external_labels` from `global` section at `-promscrape.config` after the [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling) is applied to scraped metrics. This aligns with Prometheus behaviour. Previously the `external_labels` were added to scrape targets, so they could be modified during relabeling. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3137).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying per-`-remoteWrite.url` limits for on-disk size for pending data via `-remoteWrite.maxDiskUsagePerURL` command-line flag. Thanks to @rbizos for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3071).
|
||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): log clear error when multiple identical `-storageNode` command-line flags are passed to `vmselect` or to `vminsert`. Previously these components were crashed with cryptic panic `metric ... is already registered` in this case. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3076).
|
||||
|
||||
* BUGFIX: do not export stale metrics via [/federate api](https://docs.victoriametrics.com/#federation) after the staleness markers. Previously such metrics were exported with `NaN` values. this could break some setups. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3185).
|
||||
* BUGFIX: export ininity numbers as `"Infinity"` strings at [/api/v1/export](https://docs.victoriametrics.com/#how-to-export-data-in-json-line-format), so they can be parsed by standard JSON parsers. Previously infinity numbers were exported as `Inf` values, which couldn't be parsed by standard JSON parsers. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3161).
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): properly handle request paths ending with `/` such as `/vmui/`. Previously `vmui` was dropping the traling `/`, which could prevent from using `vmui` via `vmauth`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly encode query params for aws signed requests, use `%20` instead of `+` as api requires. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3171).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly parse relabel config when regex ending with escaped `$`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3131).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `rate_over_sum(m[d])` as `sum_over_time(m[d])/d`. Previously the `sum_over_time(m[d])` could be improperly divided by smaller than `d` time range. See [rate_over_sum() docs](https://docs.victoriametrics.com/MetricsQL.html#rate_over_sum) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3045).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `increase(m[d])` over slow-changing counters with values smaller than 100. Previously [increase](https://docs.victoriametrics.com/MetricsQL.html#increase) could return unexpectedly big results in this case. See [the related issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/962) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3163).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): ignore empty series when applying [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset). It should improve queries with additional filters by value in expressions like `limit_offset(1,1, foo > 1)`.
|
||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): properly calculate query results at `vmselect`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3067). The issue has been introduced in [v1.81.0](https://docs.victoriametrics.com/CHANGELOG.html#v1810).
|
||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): log clear error when multiple identical `-storageNode` command-line flags are passed to `vmselect` or to `vminsert`. Previously these components were crashed with cryptic panic `metric ... is already registered` in this case. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3076).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate [quantiles_over_time](https://docs.victoriametrics.com/MetricsQL.html#quantiles_over_time) when the lookbehind window contains only a single sample. Previously an empty result was incorrectly returned in this case.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix `RangeError: Maximum call stack size exceeded` error when the query returns too many data points at `Table` view. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3092/files).
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix workaround for adding more queries via URL. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3169).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): re-evaluate annotations per each alert evaluation. Previously, annotations were evaluated only on alert's value change. This could result in stale annotations in some cases described in [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3119).
|
||||
* BUGFIX: prevent from excessive CPU usage when the storage enters [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode). The previous fix in [v1.81.0](https://docs.victoriametrics.com/CHANGELOG.html#v1810) wasn't complete.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): change default value for command-line flag `-datasource.queryStep` from `0s` to `5m`. Param `step` is added by vmalert to every rule evaluation request sent to datasource. Before this change, `step` was equal to group's evaluation interval by default. Param `step` for instant queries defines how far VM can look back for the last written data point. The change supposed to improve reliability of the rules evaluation when evaluation interval is lower than scraping interval.
|
||||
* BUGFIX: properly calculate `vm_rows_scanned_per_query` histogram exported at `/metrics` page of `vmselect` and single-node VictoriaMetrics. Previously it could return misleadingly high numbers for [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions), which scan only a few samples on the provided lookbehind window in square brackets. For example, `increase(m[1d])` always scans only 2 rows (aka `raw samples`) per each returned time series.
|
||||
|
||||
## [v1.81.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.81.2)
|
||||
|
||||
|
|
|
@ -84,6 +84,9 @@ while the `http_requests_total{path="/bar"} 34` would be stored in the tenant `a
|
|||
The `vm_account_id` and `vm_project_id` labels are extracted after applying the [relabeling](https://docs.victoriametrics.com/relabeling.html)
|
||||
set via `-relabelConfig` command-line flag, so these labels can be set at this stage.
|
||||
|
||||
**Security considerations:** it is recommended restricting access to `multitenant` endpoints only to trusted sources,
|
||||
since untrusted source may break per-tenant data by writing unwanted samples to aribtrary tenants.
|
||||
|
||||
|
||||
## Binaries
|
||||
|
||||
|
|
1434
docs/MetricsQL.md
1434
docs/MetricsQL.md
File diff suppressed because it is too large
Load diff
127
docs/README.md
127
docs/README.md
|
@ -1,3 +1,4 @@
|
|||
|
||||
# VictoriaMetrics
|
||||
|
||||
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
|
@ -279,7 +280,7 @@ When querying the [backfilled data](https://docs.victoriametrics.com/#backfillin
|
|||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
||||
VMUI allows investigating correlations between two queries on the same graph. Just click `+` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||
VMUI allows investigating correlations between multiple queries on the same graph. Just click `Add Query` button, enter an additional query in the newly appeared input field and press `Ctrl+Enter`. Results for all the queries should be displayed simultaneously on the same graph.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
|
||||
|
@ -311,7 +312,7 @@ matching the specified [series selector](https://prometheus.io/docs/prometheus/l
|
|||
Cardinality explorer is built on top of [/api/v1/status/tsdb](#tsdb-stats).
|
||||
|
||||
See [cardinality explorer playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/cardinality).
|
||||
|
||||
See the example of using the cardinality explorer [here](https://victoriametrics.com/blog/cardinality-explorer/).
|
||||
|
||||
## How to apply new config to VictoriaMetrics
|
||||
|
||||
|
@ -337,100 +338,86 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
|||
|
||||
## How to send data from DataDog agent
|
||||
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/)
|
||||
or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/)
|
||||
via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics)
|
||||
at `/datadog/api/v1/series` path.
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
### Sending metrics to VictoriaMetrics
|
||||
|
||||
Run DataDog agent with environment variable `DD_DD_URL=http://victoriametrics-host:8428/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||
DataDog agent allows configuring destinations for metrics sending via ENV variable `DD_DD_URL`
|
||||
or via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) in section `dd_url`.
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
|
||||
Run DataDog agent with environment variable `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`.
|
||||
|
||||
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
||||
|
||||
Example of how to send data to VictoriaMetrics via [DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line:
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
|
||||
```console
|
||||
echo '
|
||||
{
|
||||
"series": [
|
||||
{
|
||||
"host": "test.example.com",
|
||||
"interval": 20,
|
||||
"metric": "system.load.1",
|
||||
"points": [[
|
||||
0,
|
||||
0.5
|
||||
]],
|
||||
"tags": [
|
||||
"environment:test"
|
||||
],
|
||||
"type": "rate"
|
||||
}
|
||||
]
|
||||
}
|
||||
' | curl -X POST --data-binary @- http://victoriametrics-host:8428/datadog/api/v1/series
|
||||
```
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
<p align="center">
|
||||
<img src="Single-server-VictoriaMetrics-sending_DD_metrics_to_VM.png" width="800">
|
||||
</p>
|
||||
|
||||
To configure DataDog agent via ENV variable add the following prefix:
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
echo '
|
||||
{
|
||||
"series": [
|
||||
{
|
||||
"host": "test.example.com",
|
||||
"interval": 20,
|
||||
"metric": "system.load.1",
|
||||
"points": [[
|
||||
0,
|
||||
0.5
|
||||
]],
|
||||
"tags": [
|
||||
"environment:test"
|
||||
],
|
||||
"type": "rate"
|
||||
}
|
||||
]
|
||||
}
|
||||
' | curl -X POST --data-binary @- http://vminsert-host:8480/insert/0/datadog/api/v1/series
|
||||
```
|
||||
DD_DD_URL=http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
|
||||
|
||||
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export):
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
To configure DataDog agent via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files)
|
||||
add the following line:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://victoriametrics-host:8428/api/v1/export -d 'match[]=system.load.1'
|
||||
```
|
||||
dd_url: http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
vmagent also can accept Datadog metrics format. Depending on where vmagent will forward data,
|
||||
pick [single-node or cluster URL]((https://docs.victoriametrics.com/url-examples.html#datadog)) formats.
|
||||
|
||||
### Sending metrics to Datadog and VictoriaMetrics
|
||||
|
||||
DataDog allows configuring [Dual Shipping](https://docs.datadoghq.com/agent/guide/dual-shipping/) for metrics
|
||||
sending via ENV variable `DD_ADDITIONAL_ENDPOINTS` or via configuration file `additional_endpoints`.
|
||||
|
||||
<p align="center">
|
||||
<img src="Single-server-VictoriaMetrics-sending_DD_metrics_to_VM_and_DD.png" width="800">
|
||||
</p>
|
||||
|
||||
Run DataDog using the following ENV variable with VictoriaMetrics as additional metrics receiver:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://vmselect-host:8481/select/0/prometheus/api/v1/export -d 'match[]=system.load.1'
|
||||
```
|
||||
DD_ADDITIONAL_ENDPOINTS='{\"http://victoriametrics:8428/datadog\"}'
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
This command should return the following output if everything is OK:
|
||||
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
|
||||
|
||||
|
||||
To configure DataDog Dual Shipping via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files)
|
||||
add the following line:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```json
|
||||
{"metric":{"__name__":"system.load.1","environment":"test","host":"test.example.com"},"values":[0.5],"timestamps":[1632833641000]}
|
||||
```
|
||||
additional_endpoints: http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Send via cURL
|
||||
|
||||
See how to send data to VictoriaMetrics via
|
||||
[DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line.
|
||||
|
||||
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export).
|
||||
|
||||
### Additional details
|
||||
|
||||
VictoriaMetrics automatically sanitizes metric names for the data ingested via DataDog protocol
|
||||
according to [DataDog metric naming recommendations](https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics).
|
||||
|
@ -451,7 +438,7 @@ See [these docs](https://docs.victoriametrics.com/vmagent.html#adding-labels-to-
|
|||
|
||||
## How to send data from InfluxDB-compatible agents such as [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)
|
||||
|
||||
Use `http://<victoriametric-addr>:8428` url instead of InfluxDB url in agents' configs.
|
||||
Use `http://<victoriametrics-addr>:8428` url instead of InfluxDB url in agents' configs.
|
||||
For instance, put the following lines into `Telegraf` config, so it sends data to VictoriaMetrics instead of InfluxDB:
|
||||
|
||||
```toml
|
||||
|
|
BIN
docs/Single-server-VictoriaMetrics-sending_DD_metrics_to_VM.png
Normal file
BIN
docs/Single-server-VictoriaMetrics-sending_DD_metrics_to_VM.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 60 KiB |
Binary file not shown.
After Width: | Height: | Size: 80 KiB |
|
@ -283,7 +283,7 @@ When querying the [backfilled data](https://docs.victoriametrics.com/#backfillin
|
|||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
||||
VMUI allows investigating correlations between two queries on the same graph. Just click `+` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||
VMUI allows investigating correlations between multiple queries on the same graph. Just click `Add Query` button, enter an additional query in the newly appeared input field and press `Ctrl+Enter`. Results for all the queries should be displayed simultaneously on the same graph.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
|
||||
|
@ -315,7 +315,7 @@ matching the specified [series selector](https://prometheus.io/docs/prometheus/l
|
|||
Cardinality explorer is built on top of [/api/v1/status/tsdb](#tsdb-stats).
|
||||
|
||||
See [cardinality explorer playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/cardinality).
|
||||
|
||||
See the example of using the cardinality explorer [here](https://victoriametrics.com/blog/cardinality-explorer/).
|
||||
|
||||
## How to apply new config to VictoriaMetrics
|
||||
|
||||
|
@ -341,100 +341,86 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
|||
|
||||
## How to send data from DataDog agent
|
||||
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/)
|
||||
or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/)
|
||||
via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics)
|
||||
at `/datadog/api/v1/series` path.
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
### Sending metrics to VictoriaMetrics
|
||||
|
||||
Run DataDog agent with environment variable `DD_DD_URL=http://victoriametrics-host:8428/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||
DataDog agent allows configuring destinations for metrics sending via ENV variable `DD_DD_URL`
|
||||
or via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) in section `dd_url`.
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
|
||||
Run DataDog agent with environment variable `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`.
|
||||
|
||||
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
||||
|
||||
Example of how to send data to VictoriaMetrics via [DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line:
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
|
||||
```console
|
||||
echo '
|
||||
{
|
||||
"series": [
|
||||
{
|
||||
"host": "test.example.com",
|
||||
"interval": 20,
|
||||
"metric": "system.load.1",
|
||||
"points": [[
|
||||
0,
|
||||
0.5
|
||||
]],
|
||||
"tags": [
|
||||
"environment:test"
|
||||
],
|
||||
"type": "rate"
|
||||
}
|
||||
]
|
||||
}
|
||||
' | curl -X POST --data-binary @- http://victoriametrics-host:8428/datadog/api/v1/series
|
||||
```
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
<p align="center">
|
||||
<img src="Single-server-VictoriaMetrics-sending_DD_metrics_to_VM.png" width="800">
|
||||
</p>
|
||||
|
||||
To configure DataDog agent via ENV variable add the following prefix:
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
echo '
|
||||
{
|
||||
"series": [
|
||||
{
|
||||
"host": "test.example.com",
|
||||
"interval": 20,
|
||||
"metric": "system.load.1",
|
||||
"points": [[
|
||||
0,
|
||||
0.5
|
||||
]],
|
||||
"tags": [
|
||||
"environment:test"
|
||||
],
|
||||
"type": "rate"
|
||||
}
|
||||
]
|
||||
}
|
||||
' | curl -X POST --data-binary @- http://vminsert-host:8480/insert/0/datadog/api/v1/series
|
||||
```
|
||||
DD_DD_URL=http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
|
||||
|
||||
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export):
|
||||
|
||||
### Single-node VictoriaMetrics:
|
||||
To configure DataDog agent via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files)
|
||||
add the following line:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://victoriametrics-host:8428/api/v1/export -d 'match[]=system.load.1'
|
||||
```
|
||||
dd_url: http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Cluster version of VictoriaMetrics:
|
||||
vmagent also can accept Datadog metrics format. Depending on where vmagent will forward data,
|
||||
pick [single-node or cluster URL]((https://docs.victoriametrics.com/url-examples.html#datadog)) formats.
|
||||
|
||||
### Sending metrics to Datadog and VictoriaMetrics
|
||||
|
||||
DataDog allows configuring [Dual Shipping](https://docs.datadoghq.com/agent/guide/dual-shipping/) for metrics
|
||||
sending via ENV variable `DD_ADDITIONAL_ENDPOINTS` or via configuration file `additional_endpoints`.
|
||||
|
||||
<p align="center">
|
||||
<img src="Single-server-VictoriaMetrics-sending_DD_metrics_to_VM_and_DD.png" width="800">
|
||||
</p>
|
||||
|
||||
Run DataDog using the following ENV variable with VictoriaMetrics as additional metrics receiver:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://vmselect-host:8481/select/0/prometheus/api/v1/export -d 'match[]=system.load.1'
|
||||
```
|
||||
DD_ADDITIONAL_ENDPOINTS='{\"http://victoriametrics:8428/datadog\"}'
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
This command should return the following output if everything is OK:
|
||||
_Choose correct URL for VictoriaMetrics [here](https://docs.victoriametrics.com/url-examples.html#datadog)._
|
||||
|
||||
|
||||
To configure DataDog Dual Shipping via [configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files)
|
||||
add the following line:
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```json
|
||||
{"metric":{"__name__":"system.load.1","environment":"test","host":"test.example.com"},"values":[0.5],"timestamps":[1632833641000]}
|
||||
```
|
||||
additional_endpoints: http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Send via cURL
|
||||
|
||||
See how to send data to VictoriaMetrics via
|
||||
[DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line.
|
||||
|
||||
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export).
|
||||
|
||||
### Additional details
|
||||
|
||||
VictoriaMetrics automatically sanitizes metric names for the data ingested via DataDog protocol
|
||||
according to [DataDog metric naming recommendations](https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics).
|
||||
|
@ -455,7 +441,7 @@ See [these docs](https://docs.victoriametrics.com/vmagent.html#adding-labels-to-
|
|||
|
||||
## How to send data from InfluxDB-compatible agents such as [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)
|
||||
|
||||
Use `http://<victoriametric-addr>:8428` url instead of InfluxDB url in agents' configs.
|
||||
Use `http://<victoriametrics-addr>:8428` url instead of InfluxDB url in agents' configs.
|
||||
For instance, put the following lines into `Telegraf` config, so it sends data to VictoriaMetrics instead of InfluxDB:
|
||||
|
||||
```toml
|
||||
|
|
|
@ -473,6 +473,7 @@ The following meta labels are available on discovered targets during [relabeling
|
|||
* `__meta_ec2_private_ip`: the private IP address of the instance, if present
|
||||
* `__meta_ec2_public_dns_name`: the public DNS name of the instance, if available
|
||||
* `__meta_ec2_public_ip`: the public IP address of the instance, if available
|
||||
* `__meta_ec2_region`: EC2 region for the discovered instance
|
||||
* `__meta_ec2_subnet_id`: comma separated list of subnets IDs in which the instance is running, if available
|
||||
* `__meta_ec2_tag_<tagkey>`: each tag value of the instance
|
||||
* `__meta_ec2_vpc_id`: the ID of the VPC in which the instance is running, if available
|
||||
|
|
|
@ -437,6 +437,28 @@ Additional information:
|
|||
* [TSDB Stats](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats)
|
||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||
|
||||
## /datadog
|
||||
|
||||
**DataDog URL for Single-node VictoriaMetrics**
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```
|
||||
http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
**DataDog URL for Cluster version of VictoriaMetrics**
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```
|
||||
http://vminsert:8480/insert/0/datadog
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
## /datadog/api/v1/series
|
||||
|
||||
**Imports data in DataDog format into VictoriaMetrics**
|
||||
|
|
426
docs/vmagent.md
426
docs/vmagent.md
|
@ -4,9 +4,12 @@ sort: 3
|
|||
|
||||
# vmagent
|
||||
|
||||
`vmagent` is a tiny but mighty agent which helps you collect metrics from various sources
|
||||
`vmagent` is a tiny agent which helps you collect metrics from various sources,
|
||||
[relabel and filter the collected metrics](#relabeling)
|
||||
and store them in [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
or any other Prometheus-compatible storage systems with Prometheus `remote_write` protocol support.
|
||||
or any other storage systems via Prometheus `remote_write` protocol.
|
||||
|
||||
See [Quick Start](#quick-start) for details.
|
||||
|
||||
<img alt="vmagent" src="vmagent.png">
|
||||
|
||||
|
@ -20,27 +23,40 @@ additionally to [discovering Prometheus-compatible targets and scraping metrics
|
|||
|
||||
## Features
|
||||
|
||||
* Can be used as a drop-in replacement for Prometheus for scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter). See [Quick Start](#quick-start) for details.
|
||||
* Can read data from Kafka. See [these docs](#reading-metrics-from-kafka).
|
||||
* Can write data to Kafka. See [these docs](#writing-metrics-to-kafka).
|
||||
* Can be used as a drop-in replacement for Prometheus for discovering and scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter).
|
||||
Note that single-node VictoriaMetrics can also discover and scrape Prometheus-compatible targets in the same way as `vmagent` does -
|
||||
see [these docs](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
* Can add, remove and modify labels (aka tags) via Prometheus relabeling. Can filter data before sending it to remote storage. See [these docs](#relabeling) for details.
|
||||
* Accepts data via all the ingestion protocols supported by VictoriaMetrics - see [these docs](#how-to-push-data-to-vmagent).
|
||||
* Can replicate collected metrics simultaneously to multiple remote storage systems.
|
||||
* Can accept data via all the ingestion protocols supported by VictoriaMetrics - see [these docs](#how-to-push-data-to-vmagent).
|
||||
* Can replicate collected metrics simultaneously to multiple remote storage systems -
|
||||
see [these docs](#replication-and-high-availability).
|
||||
* Works smoothly in environments with unstable connections to remote storage. If the remote storage is unavailable, the collected metrics
|
||||
are buffered at `-remoteWrite.tmpDataPath`. The buffered metrics are sent to remote storage as soon as the connection
|
||||
to the remote storage is repaired. The maximum disk usage for the buffer can be limited with `-remoteWrite.maxDiskUsagePerURL`.
|
||||
* Uses lower amounts of RAM, CPU, disk IO and network bandwidth compared with Prometheus.
|
||||
* Uses lower amounts of RAM, CPU, disk IO and network bandwidth than Prometheus.
|
||||
* Scrape targets can be spread among multiple `vmagent` instances when big number of targets must be scraped. See [these docs](#scraping-big-number-of-targets).
|
||||
* Can efficiently scrape targets that expose millions of time series such as [/federate endpoint in Prometheus](https://prometheus.io/docs/prometheus/latest/federation/). See [these docs](#stream-parsing-mode).
|
||||
* Can deal with [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues by limiting the number of unique time series at scrape time and before sending them to remote storage systems. See [these docs](#cardinality-limiter).
|
||||
* Can load scrape configs from multiple files. See [these docs](#loading-scrape-configs-from-multiple-files).
|
||||
* Can efficiently scrape targets that expose millions of time series such as [/federate endpoint in Prometheus](https://prometheus.io/docs/prometheus/latest/federation/).
|
||||
See [these docs](#stream-parsing-mode).
|
||||
* Can deal with [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality)
|
||||
and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues by limiting the number of unique time series at scrape time
|
||||
and before sending them to remote storage systems. See [these docs](#cardinality-limiter).
|
||||
* Can write collected metrics to multiple tenants. See [these docs](#multitenancy).
|
||||
* Can read data from Kafka. See [these docs](#reading-metrics-from-kafka).
|
||||
* Can write data to Kafka. See [these docs](#writing-metrics-to-kafka).
|
||||
|
||||
## Quick Start
|
||||
|
||||
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) (`vmagent` is also available in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags)), unpack it and pass the following flags to the `vmagent` binary in order to start scraping Prometheus-compatible targets:
|
||||
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) (
|
||||
`vmagent` is also available in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags)),
|
||||
unpack it and pass the following flags to the `vmagent` binary in order to start scraping Prometheus-compatible targets:
|
||||
|
||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url. `vmagent` doesn't support some sections of Prometheus config file, so you may need either to delete these sections or to run `vmagent` with `-promscrape.config.strictParse=false` command-line flag, so `vmagent` ignores unsupported sections. See [the list of unsupported sections](#unsupported-prometheus-config-sections).
|
||||
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
|
||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`).
|
||||
The path can point either to local file or to http url. `vmagent` doesn't support some sections of Prometheus config file,
|
||||
so you may need either to delete these sections or to run `vmagent` with `-promscrape.config.strictParse=false` command-line flag.
|
||||
In this case `vmagent` ignores unsupported sections. See [the list of unsupported sections](#unsupported-prometheus-config-sections).
|
||||
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified
|
||||
multiple times to replicate data concurrently to an arbitrary number of remote storage systems. See [various use cases](#use-cases).
|
||||
|
||||
Example command line:
|
||||
|
||||
|
@ -50,7 +66,12 @@ Example command line:
|
|||
|
||||
See [how to scrape Prometheus-compatible targets](#how-to-collect-metrics-in-prometheus-format) for more details.
|
||||
|
||||
If you don't need to scrape Prometheus-compatible targets, then the `-promscrape.config` option isn't needed. For example, the following command is sufficient for accepting data via [supported "push"-based protocols](#how-to-push-data-to-vmagent) and sending it to the provided `-remoteWrite.url`:
|
||||
If you use single-node VictoriaMetrics, then you can discover and scrape Prometheus-compatible targets directly from VictoriaMetrics
|
||||
without the need to use `vmagent` - see [these docs](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
|
||||
If you don't need to scrape Prometheus-compatible targets, then the `-promscrape.config` option isn't needed.
|
||||
For example, the following command is sufficient for accepting data via [supported push-based protocols](#how-to-push-data-to-vmagent)
|
||||
and sending it to the provided `-remoteWrite.url`:
|
||||
|
||||
```console
|
||||
/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write
|
||||
|
@ -62,7 +83,8 @@ Pass `-help` to `vmagent` in order to see [the full list of supported command-li
|
|||
|
||||
## How to push data to vmagent
|
||||
|
||||
`vmagent` supports [the same set of push-based data ingestion protocols as VictoriaMetrics does](https://docs.victoriametrics.com/#how-to-import-time-series-data) additionally to pull-based Prometheus-compatible targets' scraping:
|
||||
`vmagent` supports [the same set of push-based data ingestion protocols as VictoriaMetrics does](https://docs.victoriametrics.com/#how-to-import-time-series-data)
|
||||
additionally to pull-based Prometheus-compatible targets' scraping:
|
||||
|
||||
* DataDog "submit metrics" API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent).
|
||||
* InfluxDB line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
|
@ -77,10 +99,10 @@ Pass `-help` to `vmagent` in order to see [the full list of supported command-li
|
|||
## Configuration update
|
||||
|
||||
`vmagent` should be restarted in order to update config options set via command-line args.
|
||||
`vmagent` supports multiple approaches for reloading configs from updated config files such as
|
||||
`-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`:
|
||||
|
||||
`vmagent` supports multiple approaches for reloading configs from updated config files such as `-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`:
|
||||
|
||||
* Sending `SUGHUP` signal to `vmagent` process:
|
||||
* Sending `SIGHUP` signal to `vmagent` process:
|
||||
|
||||
```console
|
||||
kill -SIGHUP `pidof vmagent`
|
||||
|
@ -110,13 +132,16 @@ See [these docs](#how-to-collect-metrics-in-prometheus-format) for details.
|
|||
|
||||
### Flexible metrics relay
|
||||
|
||||
`vmagent` can accept metrics in [various popular data ingestion protocols](#how-to-push-data-to-vmagent), apply [relabeling](#relabeling) to the accepted metrics (for example, change metric names/labels or drop unneeded metrics) and then forward the relabeled metrics to other remote storage systems, which support Prometheus `remote_write` protocol (including other `vmagent` instances).
|
||||
`vmagent` can accept metrics in [various popular data ingestion protocols](#how-to-push-data-to-vmagent), apply [relabeling](#relabeling)
|
||||
to the accepted metrics (for example, change metric names/labels or drop unneeded metrics) and then forward the relabeled metrics
|
||||
to other remote storage systems, which support Prometheus `remote_write` protocol (including other `vmagent` instances).
|
||||
|
||||
### Replication and high availability
|
||||
|
||||
`vmagent` replicates the collected metrics among multiple remote storage instances configured via `-remoteWrite.url` args.
|
||||
If a single remote storage instance temporarily is out of service, then the collected data remains available in another remote storage instance.
|
||||
`vmagent` buffers the collected data in files at `-remoteWrite.tmpDataPath` until the remote storage becomes available again and then it sends the buffered data to the remote storage in order to prevent data gaps.
|
||||
`vmagent` buffers the collected data in files at `-remoteWrite.tmpDataPath` until the remote storage becomes available again
|
||||
and then it sends the buffered data to the remote storage in order to prevent data gaps.
|
||||
|
||||
### Relabeling and filtering
|
||||
|
||||
|
@ -140,7 +165,11 @@ Also, Basic Auth can be enabled for the incoming `remote_write` requests with `-
|
|||
|
||||
### remote_write for clustered version
|
||||
|
||||
While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes are always performed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html), `-remoteWrite.url` the command-line flag should be configured as `<schema>://<vminsert-host>:8480/insert/<accountID>/prometheus/api/v1/write` according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). There is also support for multitenant writes. See [these docs](#multitenancy).
|
||||
While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets,
|
||||
writes are always performed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html),
|
||||
the `-remoteWrite.url` command-line flag should be configured as `<schema>://<vminsert-host>:8480/insert/<accountID>/prometheus/api/v1/write`
|
||||
according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format).
|
||||
There is also support for multitenant writes. See [these docs](#multitenancy).
|
||||
|
||||
## Multitenancy
|
||||
|
||||
|
@ -148,12 +177,24 @@ By default `vmagent` collects the data without tenant identifiers and routes it
|
|||
|
||||
[VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) supports writing data to multiple tenants
|
||||
specified via special labels - see [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy-via-labels).
|
||||
This allows specifying tenant ids via [relabeling](#relabeling) and writing multitenant data to a single `-remoteWrite.url=http://<vminsert-addr>/insert/multitenant/api/v1/write`.
|
||||
This allows specifying tenant ids via [relabeling](#relabeling) and writing multitenant data
|
||||
to a single `-remoteWrite.url=http://<vminsert-addr>/insert/multitenant/prometheus/api/v1/write`.
|
||||
|
||||
[Multitenancy](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) support is enabled when `-remoteWrite.multitenantURL` command-line flag is set. In this case `vmagent` accepts multitenant data at `http://vmagent:8429/insert/<accountID>/...` in the same way as cluster version of VictoriaMetrics does according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and routes it to `<-remoteWrite.multitenantURL>/insert/<accountID>/prometheus/api/v1/write`. If multiple `-remoteWrite.multitenantURL` command-line options are set, then `vmagent` replicates the collected data across all the configured urls. This allows using a single `vmagent` instance in front of VictoriaMetrics clusters for processing the data from all the tenants.
|
||||
`vmagent` can accept data from the same multitenant endpoints as `vminsert` from [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html)
|
||||
does according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and route the accepted data
|
||||
to the corresponding [tenants](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) in VictoriaMetrics cluster
|
||||
pointed by the `-remoteWrite.multitenantURL` command-line flag. For example, if `-remoteWrite.multitenantURL` is set to `http://vminsert-service`,
|
||||
then `vmagent` would accept multitenant data at `http://vmagent:8429/insert/<accountID>/...` endpoints in the same way
|
||||
as [VictoriaMetrics cluster does](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and route
|
||||
it to `http://vminsert-service/insert/<accountID>/prometheus/api/v1/write`.
|
||||
|
||||
If `-remoteWrite.multitenantURL` command-line flag is set and `vmagent` is configured to scrape Prometheus-compatible targets (e.g. if `-promscrape.config` command-line flag is set)
|
||||
then `vmagent` reads tenantID from `__tenant_id__` label for the discovered targets and routes all the metrics from this target to the given `__tenant_id__`, e.g. to the url `<-remoteWrite.multitnenatURL>/insert/<__tenant_id__>/prometheus/api/v1/write`.
|
||||
If multiple `-remoteWrite.multitenantURL` command-line options are set, then `vmagent` replicates the collected data across all the configured urls.
|
||||
This allows using a single `vmagent` instance in front of multiple VictoriaMetrics clusters.
|
||||
|
||||
If `-remoteWrite.multitenantURL` command-line flag is set and `vmagent` is configured to scrape Prometheus-compatible targets
|
||||
(e.g. if `-promscrape.config` command-line flag is set) then `vmagent` reads tenantID from `__tenant_id__` label
|
||||
for the discovered targets and routes all the metrics from this target to the given `__tenant_id__`,
|
||||
e.g. to the url `<-remoteWrite.multitnenatURL>/insert/<__tenant_id__>/prometheus/api/v1/write`.
|
||||
|
||||
For example, the following relabeling rule instructs sending metrics to tenantID defined in the `prometheus.io/tenant` annotation of Kubernetes pod deployment:
|
||||
|
||||
|
@ -188,7 +229,8 @@ See [the list of supported service discovery types for Prometheus scrape targets
|
|||
|
||||
`vmagent` supports the following additional options in [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) section:
|
||||
|
||||
* `headers` - a list of HTTP headers to send to scrape target with each scrape request. This can be used when the scrape target needs custom authorization and authentication. For example:
|
||||
* `headers` - a list of HTTP headers to send to scrape target with each scrape request. This can be used when the scrape target
|
||||
needs custom authorization and authentication. For example:
|
||||
|
||||
```yaml
|
||||
scrape_configs:
|
||||
|
@ -198,11 +240,14 @@ scrape_configs:
|
|||
- "My-Auth: TopSecret"
|
||||
```
|
||||
|
||||
* `disable_compression: true` for disabling response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets for saving network bandwidth.
|
||||
* `disable_keepalive: true` for disabling [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) on a per-job basis. By default `vmagent` uses keep-alive connections to scrape targets for reducing overhead on connection re-establishing.
|
||||
* `disable_compression: true` for disabling response compression on a per-job basis. By default `vmagent` requests compressed responses
|
||||
from scrape targets for saving network bandwidth.
|
||||
* `disable_keepalive: true` for disabling [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection)
|
||||
on a per-job basis. By default `vmagent` uses keep-alive connections to scrape targets for reducing overhead on connection re-establishing.
|
||||
* `series_limit: N` for limiting the number of unique time series a single scrape target can expose. See [these docs](#cardinality-limiter).
|
||||
* `stream_parse: true` for scraping targets in a streaming manner. This may be useful when targets export big number of metrics. See [these docs](#stream-parsing-mode).
|
||||
* `scrape_align_interval: duration` for aligning scrapes to the given interval instead of using random offset in the range `[0 ... scrape_interval]` for scraping each target. The random offset helps spreading scrapes evenly in time.
|
||||
* `scrape_align_interval: duration` for aligning scrapes to the given interval instead of using random offset
|
||||
in the range `[0 ... scrape_interval]` for scraping each target. The random offset helps spreading scrapes evenly in time.
|
||||
* `scrape_offset: duration` for specifying the exact offset for scraping instead of using random offset in the range `[0 ... scrape_interval]`.
|
||||
* `relabel_debug: true` for enabling debug logging during relabeling of the discovered targets. See [these docs](#relabeling).
|
||||
* `metric_relabel_debug: true` for enabling debug logging during relabeling of the scraped metrics. See [these docs](#relabeling).
|
||||
|
@ -212,7 +257,10 @@ See [scrape_configs docs](https://docs.victoriametrics.com/sd_configs.html#scrap
|
|||
|
||||
## Loading scrape configs from multiple files
|
||||
|
||||
`vmagent` supports loading [scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||
`vmagent` supports loading [scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) from multiple files specified
|
||||
in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent`
|
||||
loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file
|
||||
and from `https://config-server/scrape_config.yml` url:
|
||||
|
||||
```yml
|
||||
scrape_config_files:
|
||||
|
@ -221,7 +269,8 @@ scrape_config_files:
|
|||
- https://config-server/scrape_config.yml
|
||||
```
|
||||
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||
There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
|
||||
```yml
|
||||
- job_name: foo
|
||||
|
@ -238,24 +287,34 @@ Every referred file can contain arbitrary number of [supported scrape configs](h
|
|||
|
||||
`vmagent` doesn't support the following sections in Prometheus config file passed to `-promscrape.config` command-line flag:
|
||||
|
||||
* [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This section is substituted with various `-remoteWrite*` command-line flags. See [the full list of flags](#advanced-usage). The `remote_write` section isn't supported in order to reduce possible confusion when `vmagent` is used for accepting incoming metrics via [supported push protocols](#how-to-push-data-to-vmagent). In this case the `-promscrape.config` file isn't needed.
|
||||
* `remote_read`. This section isn't supported at all, since `vmagent` doesn't provide Prometheus querying API. It is expected that the querying API is provided by the remote storage specified via `-remoteWrite.url` such as VictoriaMetrics. See [Prometheus querying API docs for VictoriaMetrics](https://docs.victoriametrics.com/#prometheus-querying-api-usage).
|
||||
* [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This section is substituted
|
||||
with various `-remoteWrite*` command-line flags. See [the full list of flags](#advanced-usage). The `remote_write` section isn't supported
|
||||
in order to reduce possible confusion when `vmagent` is used for accepting incoming metrics via [supported push protocols](#how-to-push-data-to-vmagent).
|
||||
In this case the `-promscrape.config` file isn't needed.
|
||||
* `remote_read`. This section isn't supported at all, since `vmagent` doesn't provide Prometheus querying API.
|
||||
It is expected that the querying API is provided by the remote storage specified via `-remoteWrite.url` such as VictoriaMetrics.
|
||||
See [Prometheus querying API docs for VictoriaMetrics](https://docs.victoriametrics.com/#prometheus-querying-api-usage).
|
||||
* `rule_files` and `alerting`. These sections are supported by [vmalert](https://docs.victoriametrics.com/vmalert.html).
|
||||
|
||||
The list of supported service discovery types is available [here](#how-to-collect-metrics-in-prometheus-format).
|
||||
|
||||
Additionally `vmagent` doesn't support `refresh_interval` option at service discovery sections. This option is substituted with `-promscrape.*CheckInterval` command-line options, which are specific per each service discovery type. See [the full list of command-line flags for vmagent](#advanced-usage).
|
||||
Additionally `vmagent` doesn't support `refresh_interval` option at service discovery sections.
|
||||
This option is substituted with `-promscrape.*CheckInterval` command-line options, which are specific per each service discovery type.
|
||||
See [the full list of command-line flags for vmagent](#advanced-usage).
|
||||
|
||||
## Adding labels to metrics
|
||||
|
||||
Extra labels can be added to metrics collected by `vmagent` via the following mechanisms:
|
||||
|
||||
* The `global -> external_labels` section in `-promscrape.config` file. These labels are added only to metrics scraped from targets configured in the `-promscrape.config` file. They aren't added to metrics collected via other [data ingestion protocols](#how-to-push-data-to-vmagent).
|
||||
* The `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`. For example, the following command starts `vmagent`, which adds `{datacenter="foobar"}` label to all the metrics pushed to all the configured remote storage systems (all the `-remoteWrite.url` flag values):
|
||||
* The `global -> external_labels` section in `-promscrape.config` file. These labels are added only to metrics scraped from targets configured
|
||||
in the `-promscrape.config` file. They aren't added to metrics collected via other [data ingestion protocols](#how-to-push-data-to-vmagent).
|
||||
* The `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`.
|
||||
For example, the following command starts `vmagent`, which adds `{datacenter="foobar"}` label to all the metrics pushed
|
||||
to all the configured remote storage systems (all the `-remoteWrite.url` flag values):
|
||||
|
||||
```
|
||||
/path/to/vmagent -remoteWrite.label=datacenter=foobar ...
|
||||
```
|
||||
```
|
||||
/path/to/vmagent -remoteWrite.label=datacenter=foobar ...
|
||||
```
|
||||
|
||||
* Via relabeling. See [these docs](#relabeling).
|
||||
|
||||
|
@ -264,59 +323,87 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
|
|||
|
||||
`vmagent` automatically generates the following metrics per each scrape of every [Prometheus-compatible target](#how-to-collect-metrics-in-prometheus-format):
|
||||
|
||||
* `up` - this metric exposes `1` value on successful scrape and `0` value on unsuccessful scrape. This allows monitoring failing scrapes with the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html):
|
||||
* `up` - this metric exposes `1` value on successful scrape and `0` value on unsuccessful scrape. This allows monitoring
|
||||
failing scrapes with the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html):
|
||||
|
||||
```metricsql
|
||||
up == 0
|
||||
```
|
||||
|
||||
* `scrape_duration_seconds` - the duration of the scrape for the given target. This allows monitoring slow scrapes. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns scrapes, which take more than 1.5 seconds to complete:
|
||||
* `scrape_duration_seconds` - the duration of the scrape for the given target. This allows monitoring slow scrapes.
|
||||
For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns scrapes,
|
||||
which take more than 1.5 seconds to complete:
|
||||
|
||||
```metricsql
|
||||
scrape_duration_seconds > 1.5
|
||||
```
|
||||
|
||||
* `scrape_timeout_seconds` - the configured timeout for the current scrape target (aka `scrape_timeout`). This allows detecting targets with scrape durations close to the configured scrape timeout. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets (identified by `instance` label), which take more than 80% of the configured `scrape_timeout` during scrapes:
|
||||
* `scrape_timeout_seconds` - the configured timeout for the current scrape target (aka `scrape_timeout`).
|
||||
This allows detecting targets with scrape durations close to the configured scrape timeout.
|
||||
For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets (identified by `instance` label),
|
||||
which take more than 80% of the configured `scrape_timeout` during scrapes:
|
||||
|
||||
```metricsql
|
||||
scrape_duration_seconds / scrape_timeout_seconds > 0.8
|
||||
```
|
||||
|
||||
* `scrape_samples_scraped` - the number of samples (aka metrics) parsed per each scrape. This allows detecting targets, which expose too many metrics. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets, which expose more than 10000 metrics:
|
||||
* `scrape_samples_scraped` - the number of samples (aka metrics) parsed per each scrape. This allows detecting targets,
|
||||
which expose too many metrics. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html)
|
||||
returns targets, which expose more than 10000 metrics:
|
||||
|
||||
```metricsql
|
||||
scrape_samples_scraped > 10000
|
||||
```
|
||||
|
||||
* `scrape_samples_limit` - the configured limit on the number of metrics the given target can expose. The limit can be set via `sample_limit` option at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs). This metric is exposed only if the `sample_limit` is set. This allows detecting targets, which expose too many metrics compared to the configured `sample_limit`. For example, the following query returns targets (identified by `instance` label), which expose more than 80% metrics compared to the configed `sample_limit`:
|
||||
* `scrape_samples_limit` - the configured limit on the number of metrics the given target can expose.
|
||||
The limit can be set via `sample_limit` option at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||
This metric is exposed only if the `sample_limit` is set. This allows detecting targets,
|
||||
which expose too many metrics compared to the configured `sample_limit`. For example, the following query
|
||||
returns targets (identified by `instance` label), which expose more than 80% metrics compared to the configed `sample_limit`:
|
||||
|
||||
```metricsql
|
||||
scrape_samples_scraped / scrape_samples_limit > 0.8
|
||||
```
|
||||
|
||||
* `scrape_samples_post_metric_relabeling` - the number of samples (aka metrics) left after applying metric-level relabeling from `metric_relabel_configs` section (see [relabeling docs](#relabeling) for more details). This allows detecting targets with too many metrics after the relabeling. For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets with more than 10000 metrics after the relabeling:
|
||||
* `scrape_samples_post_metric_relabeling` - the number of samples (aka metrics) left after applying metric-level relabeling
|
||||
from `metric_relabel_configs` section (see [relabeling docs](#relabeling) for more details).
|
||||
This allows detecting targets with too many metrics after the relabeling.
|
||||
For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets
|
||||
with more than 10000 metrics after the relabeling:
|
||||
|
||||
```metricsql
|
||||
scrape_samples_post_metric_relabeling > 10000
|
||||
```
|
||||
|
||||
* `scrape_series_added` - **an approximate** number of new series the given target generates during the current scrape. This metric allows detecting targets (identified by `instance` label), which lead to [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets, which generate more than 1000 new series during the last hour:
|
||||
* `scrape_series_added` - **an approximate** number of new series the given target generates during the current scrape.
|
||||
This metric allows detecting targets (identified by `instance` label),
|
||||
which lead to [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
For example, the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html) returns targets,
|
||||
which generate more than 1000 new series during the last hour:
|
||||
|
||||
```metricsql
|
||||
sum_over_time(scrape_series_added[1h]) > 1000
|
||||
```
|
||||
|
||||
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line option (e.g. when [staleness markers](#prometheus-staleness-markers) are disabled).
|
||||
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line option
|
||||
(e.g. when [staleness markers](#prometheus-staleness-markers) are disabled).
|
||||
|
||||
* `scrape_series_limit` - the limit on the number of unique time series the given target can expose according to [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). This metric is exposed only if the series limit is set.
|
||||
* `scrape_series_limit` - the limit on the number of unique time series the given target can expose according to [these docs](#cardinality-limiter).
|
||||
This metric is exposed only if the series limit is set.
|
||||
|
||||
* `scrape_series_current` - the number of unique series the given target exposed so far. This metric is exposed only if the series limit is set according to [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). This metric allows alerting when the number of exposed series by the given target reaches the limit. For example, the following query would alert when the target exposes more than 90% of unique series compared to the configured limit.
|
||||
* `scrape_series_current` - the number of unique series the given target exposed so far.
|
||||
This metric is exposed only if the series limit is set according to [these docs](#cardinality-limiter).
|
||||
This metric allows alerting when the number of exposed series by the given target reaches the limit.
|
||||
For example, the following query would alert when the target exposes more than 90% of unique series compared to the configured limit.
|
||||
|
||||
```metricsql
|
||||
scrape_series_current / scrape_series_limit > 0.9
|
||||
```
|
||||
|
||||
* `scrape_series_limit_samples_dropped` - exposes the number of dropped samples during the scrape because of the exceeded limit on the number of unique series. This metric is exposed only if the series limit is set according to [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). This metric allows alerting when scraped samples are dropped because of the exceeded limit. For example, the following query alerts when at least a single sample is dropped because of the exceeded limit during the last hour:
|
||||
* `scrape_series_limit_samples_dropped` - exposes the number of dropped samples during the scrape because of the exceeded limit
|
||||
on the number of unique series. This metric is exposed only if the series limit is set according to [these docs](#cardinality-limiter).
|
||||
This metric allows alerting when scraped samples are dropped because of the exceeded limit.
|
||||
For example, the following query alerts when at least a single sample is dropped because of the exceeded limit during the last hour:
|
||||
|
||||
```metricsql
|
||||
sum_over_time(scrape_series_limit_samples_dropped[1h]) > 0
|
||||
|
@ -325,14 +412,36 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
|
|||
|
||||
## Relabeling
|
||||
|
||||
VictoriaMetrics components support [Prometheus-compatible relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) with [additional enhancements](#relabeling-enhancements) at various stages of data processing. The relabeling can be defined in the following places processed by `vmagent`:
|
||||
VictoriaMetrics components support [Prometheus-compatible relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
|
||||
with [additional enhancements](#relabeling-enhancements). The relabeling can be defined in the following places processed by `vmagent`:
|
||||
|
||||
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is used for modifying labels in discovered targets and for dropping unneded targets. This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
||||
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is used for modifying labels in scraped metrics and for dropping unneeded metrics. This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is used for modifying labels for all the collected metrics (inluding [metrics obtained via push-based protocols](#how-to-push-data-to-vmagent)) and for dropping unneeded metrics before sending them to all the configured `-remoteWrite.url` addresses. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is used for modifying labels for metrics and for dropping unneeded metrics before sending them to a particular `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file.
|
||||
This relabeling is used for modifying labels in discovered targets and for dropping unneded targets.
|
||||
See [relabeling cookbook](https://docs.victoriametrics.com/relabeling.html) for details.
|
||||
|
||||
All the files with relabeling configs can contain special placeholders in the form `%{ENV_VAR}`, which are replaced by the corresponding environment variable values.
|
||||
This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section.
|
||||
In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
||||
|
||||
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file.
|
||||
This relabeling is used for modifying labels in scraped metrics and for dropping unneeded metrics.
|
||||
See [relabeling cookbook](https://docs.victoriametrics.com/relabeling.html) for details.
|
||||
|
||||
This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section.
|
||||
In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
||||
|
||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is used for modifying labels for all the collected metrics
|
||||
(inluding [metrics obtained via push-based protocols](#how-to-push-data-to-vmagent)) and for dropping unneeded metrics
|
||||
before sending them to all the configured `-remoteWrite.url` addresses.
|
||||
This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`.
|
||||
In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||
|
||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is used for modifying labels for metrics
|
||||
and for dropping unneeded metrics before sending them to a particular `-remoteWrite.url`.
|
||||
This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`.
|
||||
In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||
|
||||
All the files with relabeling configs can contain special placeholders in the form `%{ENV_VAR}`,
|
||||
which are replaced by the corresponding environment variable values.
|
||||
|
||||
The following articles contain useful information about Prometheus relabeling:
|
||||
|
||||
|
@ -349,7 +458,11 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
|
||||
## Relabeling enhancements
|
||||
|
||||
* The `replacement` option can refer arbitrary labels via {% raw %}`{{label_name}}`{% endraw %} placeholders. Such placeholders are substituted with the corresponding label value. For example, the following relabeling rule sets `instance-job` label value to `host123-foo` when applied to the metric with `{instance="host123",job="foo"}` labels:
|
||||
`vmagent` provides the following enhancements on top of Prometheus-compatible relabeling:
|
||||
|
||||
* The `replacement` option can refer arbitrary labels via {% raw %}`{{label_name}}`{% endraw %} placeholders.
|
||||
Such placeholders are substituted with the corresponding label value. For example, the following relabeling rule
|
||||
sets `instance-job` label value to `host123-foo` when applied to the metric with `{instance="host123",job="foo"}` labels:
|
||||
|
||||
{% raw %}
|
||||
```yaml
|
||||
|
@ -358,11 +471,13 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
```
|
||||
{% endraw %}
|
||||
|
||||
* An optional `if` filter can be used for conditional relabeling. The `if` filter may contain arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). For example, the following relabeling rule drops metrics, which don't match `foo{bar="baz"}` series selector, while leaving the rest of metrics:
|
||||
* An optional `if` filter can be used for conditional relabeling. The `if` filter may contain
|
||||
arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors).
|
||||
For example, the following relabeling rule drops metrics, which don't match `foo{bar="baz"}` series selector, while leaving the rest of metrics:
|
||||
|
||||
```yaml
|
||||
- action: keep
|
||||
if: 'foo{bar="baz"}'
|
||||
- if: 'foo{bar="baz"}'
|
||||
action: keep
|
||||
```
|
||||
|
||||
This is equivalent to less clear Prometheus-compatible relabeling rule:
|
||||
|
@ -373,7 +488,8 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
regex: 'foo;baz'
|
||||
```
|
||||
|
||||
* The `regex` value can be split into multiple lines for improved readability and maintainability. These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
* The `regex` value can be split into multiple lines for improved readability and maintainability.
|
||||
These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
|
@ -388,9 +504,12 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
- "foo_.+"
|
||||
```
|
||||
|
||||
* VictoriaMetrics provides the following additional relabeling actions on top of standard actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
* VictoriaMetrics provides the following additional relabeling actions on top of standard actions
|
||||
from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
|
||||
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
|
||||
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement`
|
||||
and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences
|
||||
of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
|
||||
|
||||
```yaml
|
||||
- action: replace_all
|
||||
|
@ -400,7 +519,9 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
replacement: "_"
|
||||
```
|
||||
|
||||
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurrences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
|
||||
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`.
|
||||
For example, the following relabeling config replaces all the occurrences of `-` char in all the label names
|
||||
with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
|
||||
|
||||
```yaml
|
||||
- action: labelmap_all
|
||||
|
@ -408,28 +529,35 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
replacement: "_"
|
||||
```
|
||||
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal, while dropping all the other entries. For example, the following relabeling config keeps targets if they contain equal values for `instance` and `host` labels, while dropping all the other targets:
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal,
|
||||
while dropping all the other entries. For example, the following relabeling config keeps targets
|
||||
if they contain equal values for `instance` and `host` labels, while dropping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: keep_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal, while keeping all the other entries. For example, the following relabeling config drops targets if they contain equal values for `instance` and `host` labels, while keeping all the other targets:
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal,
|
||||
while keeping all the other entries. For example, the following relabeling config drops targets
|
||||
if they contain equal values for `instance` and `host` labels, while keeping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: drop_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`, while dropping all the other metrics. For example, the following relabeling config keeps metrics with `fo` and `bar` names, while dropping all the other metrics:
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`,
|
||||
while dropping all the other metrics. For example, the following relabeling config keeps metrics
|
||||
with `fo` and `bar` names, while dropping all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "foo|bar"
|
||||
```
|
||||
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`, while keeping all the other metrics. For example, the following relabeling config drops metrics with `foo` and `bar` names, while leaving all the other metrics:
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`, while keeping all the other metrics.
|
||||
For example, the following relabeling config drops metrics with `foo` and `bar` names, while leaving all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: drop_metrics
|
||||
|
@ -479,17 +607,32 @@ Additionally, the `action: graphite` relabeling rules usually work much faster t
|
|||
* If the scrape target becomes temporarily unavailable, then stale markers are sent for all the metrics scraped from this target.
|
||||
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
|
||||
|
||||
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series) for details.
|
||||
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target
|
||||
in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers`
|
||||
command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series
|
||||
per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](#automatically-generated-metrics) for details.
|
||||
|
||||
## Stream parsing mode
|
||||
|
||||
By default `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics.
|
||||
By default `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling)
|
||||
and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases
|
||||
when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory
|
||||
when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode.
|
||||
When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk
|
||||
and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics.
|
||||
|
||||
Stream parsing mode is automatically enabled for scrape targets returning response bodies with sizes bigger than the `-promscrape.minResponseSizeForStreamParse` command-line flag value. Additionally, the stream parsing mode can be explicitly enabled in the following places:
|
||||
Stream parsing mode is automatically enabled for scrape targets returning response bodies with sizes bigger than
|
||||
the `-promscrape.minResponseSizeForStreamParse` command-line flag value. Additionally,
|
||||
stream parsing mode can be explicitly enabled in the following places:
|
||||
|
||||
* Via `-promscrape.streamParse` command-line flag. In this case all the scrape targets defined in the file pointed by `-promscrape.config` are scraped in stream parsing mode.
|
||||
* Via `stream_parse: true` option at `scrape_configs` section. In this case all the scrape targets defined in this section are scraped in stream parsing mode.
|
||||
* Via `__stream_parse__=true` label, which can be set via [relabeling](#relabeling) at `relabel_configs` section. In this case stream parsing mode is enabled for the corresponding scrape targets. Typical use case: to set the label via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets exposing big number of metrics.
|
||||
* Via `-promscrape.streamParse` command-line flag. In this case all the scrape targets defined
|
||||
in the file pointed by `-promscrape.config` are scraped in stream parsing mode.
|
||||
* Via `stream_parse: true` option at `scrape_configs` section. In this case all the scrape targets defined
|
||||
in this section are scraped in stream parsing mode.
|
||||
* Via `__stream_parse__=true` label, which can be set via [relabeling](#relabeling) at `relabel_configs` section.
|
||||
In this case stream parsing mode is enabled for the corresponding scrape targets.
|
||||
Typical use case: to set the label via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
|
||||
for targets exposing big number of metrics.
|
||||
|
||||
Examples:
|
||||
|
||||
|
@ -507,7 +650,8 @@ scrape_configs:
|
|||
'match[]': ['{__name__!=""}']
|
||||
```
|
||||
|
||||
Note that `sample_limit` and `series_limit` options cannot be used in stream parsing mode because the parsed data is pushed to remote storage as soon as it is parsed.
|
||||
Note that `sample_limit` and `series_limit` [scrape_config options](https://docs.victoriametrics.com/sd_configs.html#scrape_configs)
|
||||
cannot be used in stream parsing mode because the parsed data is pushed to remote storage as soon as it is parsed.
|
||||
|
||||
## Scraping big number of targets
|
||||
|
||||
|
@ -523,7 +667,8 @@ spread scrape targets among a cluster of two `vmagent` instances:
|
|||
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
|
||||
```
|
||||
|
||||
The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes. The pod name must end with a number in the range `0 ... promscrape.cluster.memberNum-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`.
|
||||
The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes.
|
||||
The pod name must end with a number in the range `0 ... promscrape.cluster.memberNum-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`.
|
||||
|
||||
By default each scrape target is scraped only by a single `vmagent` instance in the cluster. If there is a need for replicating scrape targets among multiple `vmagent` instances,
|
||||
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
|
||||
|
@ -593,9 +738,14 @@ scrape_configs:
|
|||
|
||||
By default `vmagent` doesn't limit the number of time series each scrape target can expose. The limit can be enforced in the following places:
|
||||
|
||||
* Via `-promscrape.seriesLimitPerTarget` command-line option. This limit is applied individually to all the scrape targets defined in the file pointed by `-promscrape.config`.
|
||||
* Via `series_limit` config option at `scrape_config` section. This limit is applied individually to all the scrape targets defined in the given `scrape_config`.
|
||||
* Via `__series_limit__` label, which can be set with [relabeling](#relabeling) at `relabel_configs` section. This limit is applied to the corresponding scrape targets. Typical use case: to set the limit via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets, which may expose too high number of time series.
|
||||
* Via `-promscrape.seriesLimitPerTarget` command-line option. This limit is applied individually
|
||||
to all the scrape targets defined in the file pointed by `-promscrape.config`.
|
||||
* Via `series_limit` config option at `scrape_config` section. This limit is applied individually
|
||||
to all the scrape targets defined in the given `scrape_config`.
|
||||
* Via `__series_limit__` label, which can be set with [relabeling](#relabeling) at `relabel_configs` section.
|
||||
This limit is applied to the corresponding scrape targets. Typical use case: to set the limit
|
||||
via [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) for targets,
|
||||
which may expose too high number of time series.
|
||||
|
||||
See also `sample_limit` option at [scrape_config section](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||
|
||||
|
@ -615,12 +765,16 @@ These metrics allow building the following alerting rules:
|
|||
- `sum_over_time(scrape_series_limit_samples_dropped[1h]) > 0` - alerts when some samples are dropped because the series limit on a particular target is reached.
|
||||
|
||||
|
||||
By default `vmagent` doesn't limit the number of time series written to remote storage systems specified at `-remoteWrite.url`. The limit can be enforced by setting the following command-line flags:
|
||||
By default `vmagent` doesn't limit the number of time series written to remote storage systems specified at `-remoteWrite.url`.
|
||||
The limit can be enforced by setting the following command-line flags:
|
||||
|
||||
* `-remoteWrite.maxHourlySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last hour. Useful for limiting the number of active time series.
|
||||
* `-remoteWrite.maxDailySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last day. Useful for limiting daily churn rate.
|
||||
* `-remoteWrite.maxHourlySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last hour.
|
||||
Useful for limiting the number of active time series.
|
||||
* `-remoteWrite.maxDailySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last day.
|
||||
Useful for limiting daily churn rate.
|
||||
|
||||
Both limits can be set simultaneously. If any of these limits is reached, then samples for new time series are dropped instead of sending them to remote storage systems. A sample of dropped series is put in the log with `WARNING` level.
|
||||
Both limits can be set simultaneously. If any of these limits is reached, then samples for new time series are dropped instead of sending
|
||||
them to remote storage systems. A sample of dropped series is put in the log with `WARNING` level.
|
||||
|
||||
`vmagent` exposes the following metrics at `http://vmagent:8429/metrics` page (see [monitoring docs](#monitoring) for details):
|
||||
|
||||
|
@ -637,21 +791,25 @@ See also [cardinality explorer docs](https://docs.victoriametrics.com/#cardinali
|
|||
|
||||
## Monitoring
|
||||
|
||||
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. We recommend setting up regular scraping of this page
|
||||
either through `vmagent` itself or by Prometheus so that the exported metrics may be analyzed later.
|
||||
Use official [Grafana dashboard](https://grafana.com/grafana/dashboards/12683) for `vmagent` state overview. Graphs on this dashboard contain useful hints - hover the `i` icon at the top left corner of each graph in order to read it.
|
||||
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page.
|
||||
We recommend setting up regular scraping of this page either through `vmagent` itself or by Prometheus
|
||||
so that the exported metrics may be analyzed later.
|
||||
|
||||
Use official [Grafana dashboard](https://grafana.com/grafana/dashboards/12683) for `vmagent` state overview.
|
||||
Graphs on this dashboard contain useful hints - hover the `i` icon at the top left corner of each graph in order to read it.
|
||||
If you have suggestions for improvements or have found a bug - please open an issue on github or add a review to the dashboard.
|
||||
|
||||
`vmagent` also exports the status for various targets at the following handlers:
|
||||
`vmagent` also exports the status for various targets at the following pages:
|
||||
|
||||
* `http://vmagent-host:8429/targets`. This handler returns human-readable status for every active target.
|
||||
This page is easy to query from the command line with `wget`, `curl` or similar tools.
|
||||
It accepts optional `show_original_labels=1` query arg which shows the original labels per each target before applying the relabeling.
|
||||
This information may be useful for debugging target relabeling.
|
||||
* `http://vmagent-host:8429/api/v1/targets`. This handler returns data compatible with [the corresponding page from Prometheus API](https://prometheus.io/docs/prometheus/latest/querying/api/#targets).
|
||||
|
||||
* `http://vmagent-host:8429/ready`. This handler returns http 200 status code when `vmagent` finishes it's initialization for all service_discovery configs.
|
||||
It may be useful to perform `vmagent` rolling update without any scrape loss.
|
||||
* `http://vmagent-host:8429/targets`. This pages shows the current status for every active target.
|
||||
* `http://vmagent-host:8429/service-discovery`. This pages shows the list of discovered targets with the discovered `__meta_*` labels
|
||||
according to [these docs](https://docs.victoriametrics.com/sd_configs.html).
|
||||
This page may help debugging target [relabeling](#relabeling).
|
||||
* `http://vmagent-host:8429/api/v1/targets`. This handler returns JSON response
|
||||
compatible with [the corresponding page from Prometheus API](https://prometheus.io/docs/prometheus/latest/querying/api/#targets).
|
||||
* `http://vmagent-host:8429/ready`. This handler returns http 200 status code when `vmagent` finishes
|
||||
it's initialization for all the [service_discovery configs](https://docs.victoriametrics.com/sd_configs.html).
|
||||
It may be useful to perform `vmagent` rolling update without any scrape loss.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
@ -664,24 +822,40 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
|
|||
* Disabling staleness tracking with `-promscrape.noStaleMarkers` option. See [these docs](#prometheus-staleness-markers).
|
||||
* Enabling stream parsing mode if `vmagent` scrapes targets with millions of metrics per target. See [these docs](#stream-parsing-mode).
|
||||
* Reducing the number of output queues with `-remoteWrite.queues` command-line option.
|
||||
* Reducing the amounts of RAM vmagent can use for in-memory buffering with `-memory.allowedPercent` or `-memory.allowedBytes` command-line option. Another option is to reduce memory limits in Docker and/or Kubernetes if `vmagent` runs under these systems.
|
||||
* Reducing the number of CPU cores vmagent can use by passing `GOMAXPROCS=N` environment variable to `vmagent`, where `N` is the desired limit on CPU cores. Another option is to reduce CPU limits in Docker or Kubernetes if `vmagent` runs under these systems.
|
||||
* Passing `-promscrape.dropOriginalLabels` command-line option to `vmagent`, so it drops `"discoveredLabels"` and `"droppedTargets"` lists at `/api/v1/targets` page. This reduces memory usage when scraping big number of targets at the cost of reduced debuggability for improperly configured per-target relabeling.
|
||||
* Reducing the amounts of RAM vmagent can use for in-memory buffering with `-memory.allowedPercent` or `-memory.allowedBytes` command-line option.
|
||||
Another option is to reduce memory limits in Docker and/or Kubernetes if `vmagent` runs under these systems.
|
||||
* Reducing the number of CPU cores vmagent can use by passing `GOMAXPROCS=N` environment variable to `vmagent`,
|
||||
where `N` is the desired limit on CPU cores. Another option is to reduce CPU limits in Docker or Kubernetes if `vmagent` runs under these systems.
|
||||
* Passing `-promscrape.dropOriginalLabels` command-line option to `vmagent`, so it drops `"discoveredLabels"` and `"droppedTargets"`
|
||||
lists at `/api/v1/targets` page. This reduces memory usage when scraping big number of targets at the cost
|
||||
of reduced debuggability for improperly configured per-target relabeling.
|
||||
|
||||
* When `vmagent` scrapes many unreliable targets, it can flood the error log with scrape errors. These errors can be suppressed
|
||||
by passing `-promscrape.suppressScrapeErrors` command-line flag to `vmagent`. The most recent scrape error per each target can be observed at `http://vmagent-host:8429/targets`
|
||||
and `http://vmagent-host:8429/api/v1/targets`.
|
||||
|
||||
* The `/api/v1/targets` page could be useful for debugging relabeling process for scrape targets.
|
||||
This page contains original labels for targets dropped during relabeling (see "droppedTargets" section in the page output). By default the `-promscrape.maxDroppedTargets` targets are shown here. If your setup drops more targets during relabeling, then increase `-promscrape.maxDroppedTargets` command-line flag value to see all the dropped targets. Note that tracking each dropped target requires up to 10Kb of RAM. Therefore big values for `-promscrape.maxDroppedTargets` may result in increased memory usage if a big number of scrape targets are dropped during relabeling.
|
||||
* The `/service-discovery` page could be useful for debugging relabeling process for scrape targets.
|
||||
This page contains original labels for targets dropped during relabeling.
|
||||
By default the `-promscrape.maxDroppedTargets` targets are shown here. If your setup drops more targets during relabeling,
|
||||
then increase `-promscrape.maxDroppedTargets` command-line flag value to see all the dropped targets.
|
||||
Note that tracking each dropped target requires up to 10Kb of RAM. Therefore big values for `-promscrape.maxDroppedTargets`
|
||||
may result in increased memory usage if a big number of scrape targets are dropped during relabeling.
|
||||
|
||||
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported at `http://vmagent-host:8429/metrics` page grows constantly. It is also recommended increasing `-remoteWrite.maxBlockSize` and `-remoteWrite.maxRowsPerBlock` command-line options in this case. This can improve data ingestion performance to the configured remote storage systems at the cost of higher memory usage.
|
||||
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported
|
||||
at `http://vmagent-host:8429/metrics` page grows constantly. It is also recommended increasing `-remoteWrite.maxBlockSize`
|
||||
and `-remoteWrite.maxRowsPerBlock` command-line options in this case. This can improve data ingestion performance
|
||||
to the configured remote storage systems at the cost of higher memory usage.
|
||||
|
||||
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set, try increasing `-remoteWrite.queues`. Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage. Therefore it starts dropping the buffered data if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
|
||||
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set,
|
||||
try increasing `-remoteWrite.queues`. Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage.
|
||||
Therefore it starts dropping the buffered data if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
|
||||
|
||||
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses. The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
|
||||
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses.
|
||||
The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
|
||||
|
||||
* Use `-remoteWrite.queues=1` when `-remoteWrite.url` points to remote storage, which doesn't accept out-of-order samples (aka data backfilling). Such storage systems include Prometheus, Cortex and Thanos, which typically emit `out of order sample` errors. The best solution is to use remote storage with [backfilling support](https://docs.victoriametrics.com/#backfilling).
|
||||
* Use `-remoteWrite.queues=1` when `-remoteWrite.url` points to remote storage, which doesn't accept out-of-order samples (aka data backfilling).
|
||||
Such storage systems include Prometheus, Cortex and Thanos, which typically emit `out of order sample` errors.
|
||||
The best solution is to use remote storage with [backfilling support](https://docs.victoriametrics.com/#backfilling) such as VictoriaMetrics.
|
||||
|
||||
* `vmagent` buffers scraped data at the `-remoteWrite.tmpDataPath` directory until it is sent to `-remoteWrite.url`.
|
||||
The directory can grow large when remote storage is unavailable for extended periods of time and if `-remoteWrite.maxDiskUsagePerURL` isn't set.
|
||||
|
@ -739,25 +913,33 @@ See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting
|
|||
* [Reading metrics from Kafka](#reading-metrics-from-kafka)
|
||||
* [Writing metrics to Kafka](#writing-metrics-to-kafka)
|
||||
|
||||
The enterprise version of vmagent is available for evaluation at [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page in `vmutils-*-enteprise.tar.gz` archives and in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||
The enterprise version of vmagent is available for evaluation at [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page
|
||||
in `vmutils-...-enteprise.tar.gz` archives and in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||
|
||||
### Reading metrics from Kafka
|
||||
|
||||
[Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` can read metrics in various formats from Kafka messages. These formats can be configured with `-kafka.consumer.topic.defaultFormat` or `-kafka.consumer.topic.format` command-line options. The following formats are supported:
|
||||
[Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` can read metrics in various formats from Kafka messages.
|
||||
These formats can be configured with `-kafka.consumer.topic.defaultFormat` or `-kafka.consumer.topic.format` command-line options. The following formats are supported:
|
||||
|
||||
* `promremotewrite` - [Prometheus remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). Messages in this format can be sent by vmagent - see [these docs](#writing-metrics-to-kafka).
|
||||
* `promremotewrite` - [Prometheus remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write).
|
||||
Messages in this format can be sent by vmagent - see [these docs](#writing-metrics-to-kafka).
|
||||
* `influx` - [InfluxDB line protocol format](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/).
|
||||
* `prometheus` - [Prometheus text exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) and [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md).
|
||||
* `prometheus` - [Prometheus text exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format)
|
||||
and [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md).
|
||||
* `graphite` - [Graphite plaintext format](https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol).
|
||||
* `jsonline` - [JSON line format](https://docs.victoriametrics.com/#how-to-import-data-in-json-line-format).
|
||||
|
||||
Every Kafka message may contain multiple lines in `influx`, `prometheus`, `graphite` and `jsonline` format delimited by `\n`.
|
||||
|
||||
`vmagent` consumes messages from Kafka topics specified by `-kafka.consumer.topic` command-line flag. Multiple topics can be specified by passing multiple `-kafka.consumer.topic` command-line flags to `vmagent`.
|
||||
`vmagent` consumes messages from Kafka topics specified by `-kafka.consumer.topic` command-line flag. Multiple topics can be specified
|
||||
by passing multiple `-kafka.consumer.topic` command-line flags to `vmagent`.
|
||||
|
||||
`vmagent` consumes messages from Kafka brokers specified by `-kafka.consumer.topic.brokers` command-line flag. Multiple brokers can be specified per each `-kafka.consumer.topic` by passing a list of brokers delimited by `;`. For example, `-kafka.consumer.topic.brokers=host1:9092;host2:9092`.
|
||||
`vmagent` consumes messages from Kafka brokers specified by `-kafka.consumer.topic.brokers` command-line flag.
|
||||
Multiple brokers can be specified per each `-kafka.consumer.topic` by passing a list of brokers delimited by `;`.
|
||||
For example, `-kafka.consumer.topic.brokers=host1:9092;host2:9092`.
|
||||
|
||||
The following command starts `vmagent`, which reads metrics in InfluxDB line protocol format from Kafka broker at `localhost:9092` from the topic `metrics-by-telegraf` and sends them to remote storage at `http://localhost:8428/api/v1/write`:
|
||||
The following command starts `vmagent`, which reads metrics in InfluxDB line protocol format from Kafka broker at `localhost:9092`
|
||||
from the topic `metrics-by-telegraf` and sends them to remote storage at `http://localhost:8428/api/v1/write`:
|
||||
|
||||
```console
|
||||
./bin/vmagent -remoteWrite.url=http://localhost:8428/api/v1/write \
|
||||
|
@ -778,7 +960,9 @@ data_format = "influx"
|
|||
|
||||
#### Command-line flags for Kafka consumer
|
||||
|
||||
These command-line flags are available only in [enterprise](https://victoriametrics.com/products/enterprise/) version of `vmagent`, which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page (see `vmutils-*-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||
These command-line flags are available only in [enterprise](https://victoriametrics.com/products/enterprise/) version of `vmagent`,
|
||||
which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page
|
||||
(see `vmutils-...-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
|
||||
|
||||
```
|
||||
-kafka.consumer.topic array
|
||||
|
@ -811,9 +995,13 @@ These command-line flags are available only in [enterprise](https://victoriametr
|
|||
|
||||
### Writing metrics to Kafka
|
||||
|
||||
[Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` writes data to Kafka with `at-least-once` semantics if `-remoteWrite.url` contains e.g. Kafka url. For example, if `vmagent` is started with `-remoteWrite.url=kafka://localhost:9092/?topic=prom-rw`, then it would send Prometheus remote_write messages to Kafka bootstrap server at `localhost:9092` with the topic `prom-rw`. These messages can be read later from Kafka by another `vmagent` - see [these docs](#reading-metrics-from-kafka) for details.
|
||||
[Enterprise version](https://victoriametrics.com/products/enterprise/) of `vmagent` writes data to Kafka with `at-least-once`
|
||||
semantics if `-remoteWrite.url` contains e.g. Kafka url. For example, if `vmagent` is started with `-remoteWrite.url=kafka://localhost:9092/?topic=prom-rw`,
|
||||
then it would send Prometheus remote_write messages to Kafka bootstrap server at `localhost:9092` with the topic `prom-rw`.
|
||||
These messages can be read later from Kafka by another `vmagent` - see [these docs](#reading-metrics-from-kafka) for details.
|
||||
|
||||
Additional Kafka options can be passed as query params to `-remoteWrite.url`. For instance, `kafka://localhost:9092/?topic=prom-rw&client.id=my-favorite-id` sets `client.id` Kafka option to `my-favorite-id`. The full list of Kafka options is available [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
|
||||
Additional Kafka options can be passed as query params to `-remoteWrite.url`. For instance, `kafka://localhost:9092/?topic=prom-rw&client.id=my-favorite-id`
|
||||
sets `client.id` Kafka option to `my-favorite-id`. The full list of Kafka options is available [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
|
||||
|
||||
#### Kafka broker authorization and authentication
|
||||
|
||||
|
@ -833,7 +1021,9 @@ Two types of auth are supported:
|
|||
|
||||
## How to build from sources
|
||||
|
||||
We recommend using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in the `vmutils-*` archives .
|
||||
We recommend using [official binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in the `vmutils-...` archives.
|
||||
|
||||
It may be needed to build `vmagent` from source code when developing or testing new feature or bugfix.
|
||||
|
||||
### Development build
|
||||
|
||||
|
@ -903,6 +1093,7 @@ curl http://0.0.0.0:8429/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Advanced usage
|
||||
|
@ -1192,9 +1383,10 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 8388608)
|
||||
-remoteWrite.maxDailySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
-remoteWrite.maxDiskUsagePerURL size
|
||||
-remoteWrite.maxDiskUsagePerURL array
|
||||
The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500MB. Disk usage is unlimited if the value is set to 0
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB.
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.maxHourlySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
-remoteWrite.maxRowsPerBlock int
|
||||
|
|
|
@ -335,6 +335,10 @@ groups:
|
|||
# Rules for accountID=456, projectID=789
|
||||
```
|
||||
|
||||
The results of alerting and recording rules contain `vm_account_id` and `vm_project_id` labels
|
||||
if `-clusterMode` is enabled. These labels can be used during [templating](https://docs.victoriametrics.com/vmalert.html#templating),
|
||||
and help to identify to which account or project the triggered alert or produced recording belongs.
|
||||
|
||||
If `-clusterMode` is enabled, then `-datasource.url`, `-remoteRead.url` and `-remoteWrite.url` must
|
||||
contain only the hostname without tenant id. For example: `-datasource.url=http://vmselect:8481`.
|
||||
`vmalert` automatically adds the specified tenant to urls per each recording rule in this case.
|
||||
|
@ -814,8 +818,7 @@ The shortlist of configuration flags is the following:
|
|||
-evaluationInterval duration
|
||||
How often to evaluate the rules (default 1m0s)
|
||||
-external.alert.source string
|
||||
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
||||
Supports templating. For example, link to Grafana: 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.
|
||||
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . For example, link to Grafana: -external.alert.source='explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]' . If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used
|
||||
If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.
|
||||
-external.label array
|
||||
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
||||
|
|
|
@ -10,6 +10,7 @@ Supported storage systems for backups:
|
|||
|
||||
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
||||
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
|
||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
||||
|
||||
|
@ -183,7 +184,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
-customS3Endpoint string
|
||||
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
|
||||
-dst string
|
||||
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
|
||||
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup/dir
|
||||
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
|
||||
-enableTCP6
|
||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||
|
|
|
@ -5,7 +5,6 @@ sort: 7
|
|||
# vmrestore
|
||||
|
||||
`vmrestore` restores data from backups created by [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||
VictoriaMetrics `v1.29.0` and newer versions must be used for working with the restored data.
|
||||
|
||||
Restore process can be interrupted at any time. It is automatically resumed from the interruption point
|
||||
when restarting `vmrestore` with the same args.
|
||||
|
@ -14,19 +13,28 @@ when restarting `vmrestore` with the same args.
|
|||
|
||||
VictoriaMetrics must be stopped during the restore process.
|
||||
|
||||
```console
|
||||
vmrestore -src=gs://<bucket>/<path/to/backup> -storageDataPath=<local/path/to/restore>
|
||||
Run the following command to restore backup from the given `-src` into the given `-storageDataPath`:
|
||||
|
||||
```console
|
||||
vmrestore -src=<storageType>://<path/to/backup> -storageDataPath=<local/path/to/restore>
|
||||
```
|
||||
|
||||
* `<bucket>` is [GCS bucket](https://cloud.google.com/storage/docs/creating-buckets) name.
|
||||
* `<path/to/backup>` is the path to backup made with [vmbackup](https://docs.victoriametrics.com/vmbackup.html) on GCS bucket.
|
||||
* `<storageType>://<path/to/backup>` is the path to backup made with [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||
`vmrestore` can restore backups from the following storage types:
|
||||
* [GCS](https://cloud.google.com/storage/). Example: `-src=gs://<bucket>/<path/to/backup>`
|
||||
* [S3](https://aws.amazon.com/s3/). Example: `-src=s3://<bucket>/<path/to/backup>`
|
||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `-src=azblob://<bucket>/<path/to/backup>`
|
||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/)
|
||||
or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||
* Local filesystem. Example: `-src=fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup
|
||||
into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
||||
* `<local/path/to/restore>` is the path to folder where data will be restored. This folder must be passed
|
||||
to VictoriaMetrics in `-storageDataPath` command-line flag after the restore process is complete.
|
||||
|
||||
The original `-storageDataPath` directory may contain old files. They will be substituted by the files from backup,
|
||||
i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/questions/476041/how-do-i-make-rsync-delete-files-that-have-been-deleted-from-the-source-folder).
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
* If `vmrestore` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value.
|
||||
|
@ -158,7 +166,7 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
|||
-skipBackupCompleteCheck
|
||||
Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file
|
||||
-src string
|
||||
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
|
||||
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup
|
||||
-storageDataPath string
|
||||
Destination path where backup must be restored. VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir is synchronized with -src contents, i.e. it works like 'rsync --delete' (default "victoria-metrics-data")
|
||||
-tls
|
||||
|
|
43
go.mod
43
go.mod
|
@ -4,6 +4,7 @@ go 1.19
|
|||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.27.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0
|
||||
github.com/VictoriaMetrics/fastcache v1.12.0
|
||||
|
||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
|
@ -11,7 +12,10 @@ require (
|
|||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.22.2
|
||||
github.com/VictoriaMetrics/metricsql v0.45.0
|
||||
github.com/aws/aws-sdk-go v1.44.105
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.16
|
||||
github.com/aws/aws-sdk-go-v2/config v1.17.8
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.34
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cheggaaa/pb/v3 v3.1.0
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
|
@ -26,24 +30,41 @@ require (
|
|||
github.com/oklog/ulid v1.3.1
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/urfave/cli/v2 v2.16.3
|
||||
github.com/urfave/cli/v2 v2.17.1
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/gozstd v1.17.0
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20220923203811-8be639271d50
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8
|
||||
google.golang.org/api v0.97.0
|
||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af
|
||||
golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1
|
||||
golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875
|
||||
google.golang.org/api v0.98.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.104.0 // indirect
|
||||
cloud.google.com/go/compute v1.10.0 // indirect
|
||||
cloud.google.com/go/iam v0.4.0 // indirect
|
||||
cloud.google.com/go/iam v0.5.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect
|
||||
github.com/aws/smithy-go v1.13.3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
|
@ -51,7 +72,7 @@ require (
|
|||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
|
@ -67,11 +88,11 @@ require (
|
|||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
|
||||
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 // indirect
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc // indirect
|
||||
google.golang.org/grpc v1.49.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91 // indirect
|
||||
google.golang.org/grpc v1.50.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
|
95
go.sum
95
go.sum
|
@ -50,8 +50,8 @@ cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOt
|
|||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
||||
cloud.google.com/go/iam v0.4.0 h1:YBYU00SCDzZJdHqVc4I5d6lsklcYIjQZa1YmEz4jlSE=
|
||||
cloud.google.com/go/iam v0.4.0/go.mod h1:cbaZxyScUhxl7ZAkNWiALgihfP75wS/fUsVNaa1r3vA=
|
||||
cloud.google.com/go/iam v0.5.0 h1:fz9X5zyTWBmamZsqvqZqD7khbifcZF/q+Z1J8pfhIUg=
|
||||
cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
|
@ -66,7 +66,15 @@ cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgy
|
|||
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
|
||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible h1:+t2P1j1r5N6lYgPiiz7ZbEVZFkWjVe9WhHbMm0gg8hw=
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 h1:pqrAR74b6EoR4kcxF7L7Wg2B8Jgil9UUZtMvxhEFqWo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 h1:XUNQ4mw+zJmaA2KXzP9JlQiecy1SI+Eog7xVkPiqIbg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0 h1:fe+kSd9btgTTeHeUlMTyEsjoe6L/zd+Q61iWEMPwHmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0/go.mod h1:T7nxmZ9i42Dqy7kwnn8AZYNjqxd4TloKXdIbhosHSqo=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
|
@ -88,6 +96,7 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L
|
|||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
|
@ -149,9 +158,45 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.105 h1:UUwoD1PRKIj3ltrDUYTDQj5fOTK3XsnqolLpRTMmSEM=
|
||||
github.com/aws/aws-sdk-go v1.44.105/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 h1:tcFliCWne+zOuUfKNRn8JdFBuWPDuISDH08wD2ULkhk=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8/go.mod h1:JTnlBSot91steJeti4ryyu/tLd4Sk84O5W22L7O2EQU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.17.8 h1:b9LGqNnOdg9vR4Q43tBTVWk4J6F+W774MSchvKJsqnE=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.21 h1:4tjlyCD0hRGNQivh5dN8hbP30qQhMLBE/FgQR1vHHWM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 h1:r08j4sbZu/RVi+BNxkBJwPMUYY3P8mgSDuKkZ/ZN1lE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.34 h1:1PNtaCM+2ruo1dfYL2RweUdtbuPvinjAejjNcPa/RQY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.34/go.mod h1:+Six+CXNHYllXam32j+YW8ixk82+am345ei89kEz8p4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 h1:s4g/wnzMf+qepSNgTvaQQHNxyMLKSawNhKCPNy++2xY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 h1:/K482T5A3623WJgWT8w1yRAFK4RzGzEl7y39yhtn9eA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 h1:wj5Rwc05hvUSvKuOF29IYb9QrCLjU+rHAy/x/o0DK2c=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14 h1:ZSIPAkAsCCjYrhqfw2+lNzWDzxzHXEckFkTePL5RSWQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14/go.mod h1:AyGgqiKv9ECM6IZeNQtdT8NnMvUb3/2wokeq2Fgryto=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9 h1:Lh1AShsuIJTwMkoxVCAYPJgNG5H+eN6SmoUn8nOZ5wE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9/go.mod h1:a9j48l6yL5XINLHLcOKInjdvknN+vWqPBxqeIDw7ktw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18 h1:BBYoNQt2kUZUUK4bIPsKrCcjVPUMNsgQpNAwhznK/zo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18/go.mod h1:NS55eQ4YixUJPTC+INxi2/jCqe1y2Uw3rnh9wEOVJxY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 h1:Jrd/oMh0PKQc6+BowB+pLEwLIgaQF29eYbe7E1Av9Ug=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17 h1:HfVVR1vItaG6le+Bpw6P4midjBDMKnjMyZnw9MXYUcE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17/go.mod h1:YqMdV+gEKCQ59NrB7rzrJdALeBIsYiVi8Inj3+KcqHI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11 h1:3/gm/JTX9bX8CpzTgIlrtYpB3EVBDxyg/GY/QdcIEZw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11/go.mod h1:fmgDANqTUCxciViKl9hb/zD5LFbvPINFRgWhDbR+vZo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 h1:pwvCchFUEnlceKIgPUouBJwK81aCkQ8UDMORfeFtW10=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 h1:OwhhKc1P9ElfWbMKPIbMMZBV6hzJlL2JKD76wNNVzgQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 h1:9pPi0PsFNAGILFfPCk8Y0iyEBGc6lu6OQ97U7hmdesg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM=
|
||||
github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA=
|
||||
github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
@ -205,6 +250,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
|||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
|
||||
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
|
@ -376,6 +422,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
|||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
|
@ -468,8 +515,9 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
|
@ -595,6 +643,7 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
|
@ -712,6 +761,7 @@ github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHu
|
|||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
|
@ -824,8 +874,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
|
@ -834,8 +884,8 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
|
|||
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.16.3 h1:gHoFIwpPjoyIMbJp/VFd+/vuD0dAgFK4B6DpEMFJfQk=
|
||||
github.com/urfave/cli/v2 v2.16.3/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
github.com/urfave/cli/v2 v2.17.1 h1:UzjDEw2dJQUE3iRaiNQ1VrVFbyAtKGH3VdkMoHA58V0=
|
||||
github.com/urfave/cli/v2 v2.17.1/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
|
@ -917,6 +967,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -1014,8 +1065,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20220923203811-8be639271d50 h1:vKyz8L3zkd+xrMeIaBsQ/MNVPVFSffdaU3ZyYlBGFnI=
|
||||
golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af h1:wv66FM3rLZGPdxpYL+ApnDe2HzHcTFta3z5nsc13wI4=
|
||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1037,8 +1088,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j
|
|||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA=
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1 h1:3VPzK7eqH25j7GYw5w6g/GzNRc0/fYtrxz27z1gD4W0=
|
||||
golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1052,8 +1103,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc=
|
||||
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1149,8 +1200,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc=
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875 h1:AzgQNqF+FKwyQ5LbVrVqOcuuFB67N47F9+htZYH0wFM=
|
||||
golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1297,8 +1348,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69
|
|||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.97.0 h1:x/vEL1XDF/2V4xzdNgFPaKHluRESo2aTsL7QzHnBtGQ=
|
||||
google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
|
||||
google.golang.org/api v0.98.0 h1:yxZrcxXESimy6r6mdL5Q6EnZwmewDJK2dVg3g75s5Dg=
|
||||
google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1390,8 +1441,8 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP
|
|||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc h1:saaNe2+SBQxandnzcD/qB1JEBQ2Pqew+KlFLLdA/XcM=
|
||||
google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc/go.mod h1:yEEpwVWKMZZzo81NwRgyEJnA2fQvpXAYPVisv8EgDVs=
|
||||
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91 h1:Ezh2cpcnP5Rq60sLensUsFnxh7P6513NLvNtCm9iyJ4=
|
||||
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1428,8 +1479,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11
|
|||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
|
||||
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc v1.50.0 h1:fPVVDxY9w++VjTZsYvXWqEf9Rqar/e+9zYfxKK+W+YU=
|
||||
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
|
|
@ -53,11 +53,9 @@ func NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey,
|
|||
defaultAccessKey: os.Getenv("AWS_ACCESS_KEY_ID"),
|
||||
defaultSecretKey: os.Getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
}
|
||||
cfg.service = service
|
||||
if cfg.service == "" {
|
||||
cfg.service = "aps"
|
||||
}
|
||||
cfg.region = region
|
||||
if cfg.region == "" {
|
||||
r, err := getDefaultRegion(cfg.client)
|
||||
if err != nil {
|
||||
|
@ -75,8 +73,6 @@ func NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey,
|
|||
return nil, fmt.Errorf("roleARN is missing for AWS_WEB_IDENTITY_TOKEN_FILE=%q; set it via env var AWS_ROLE_ARN", cfg.webTokenPath)
|
||||
}
|
||||
// explicitly set credentials has priority over env variables
|
||||
cfg.defaultAccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
cfg.defaultSecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
if len(accessKey) > 0 {
|
||||
cfg.defaultAccessKey = accessKey
|
||||
}
|
||||
|
@ -90,6 +86,11 @@ func NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey,
|
|||
return cfg, nil
|
||||
}
|
||||
|
||||
// GetRegion returns region for the given cfg.
|
||||
func (cfg *Config) GetRegion() string {
|
||||
return cfg.region
|
||||
}
|
||||
|
||||
// GetEC2APIResponse performs EC2 API request with ghe given action.
|
||||
//
|
||||
// filtersQueryString must contain an optional percent-encoded query string for aws filters.
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/azremote"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fsremote"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/gcsremote"
|
||||
|
@ -183,7 +184,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
|
|||
}
|
||||
n := strings.Index(path, "://")
|
||||
if n < 0 {
|
||||
return nil, fmt.Errorf("Missing scheme in path %q. Supported schemes: `gs://`, `s3://`, `fs://`", path)
|
||||
return nil, fmt.Errorf("Missing scheme in path %q. Supported schemes: `gs://`, `s3://`, `azblob://`, `fs://`", path)
|
||||
}
|
||||
scheme := path[:n]
|
||||
dir := path[n+len("://"):]
|
||||
|
@ -212,6 +213,21 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
|
|||
return nil, fmt.Errorf("cannot initialize connection to gcs: %w", err)
|
||||
}
|
||||
return fs, nil
|
||||
case "azblob":
|
||||
n := strings.Index(dir, "/")
|
||||
if n < 0 {
|
||||
return nil, fmt.Errorf("missing directory on the AZBlob container %q", dir)
|
||||
}
|
||||
bucket := dir[:n]
|
||||
dir = dir[n:]
|
||||
fs := &azremote.FS{
|
||||
Container: bucket,
|
||||
Dir: dir,
|
||||
}
|
||||
if err := fs.Init(); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize connection to AZBlob: %w", err)
|
||||
}
|
||||
return fs, nil
|
||||
case "s3":
|
||||
n := strings.Index(dir, "/")
|
||||
if n < 0 {
|
||||
|
|
303
lib/backup/azremote/azblob.go
Normal file
303
lib/backup/azremote/azblob.go
Normal file
|
@ -0,0 +1,303 @@
|
|||
package azremote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fscommon"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
envStorageAcctName = "AZURE_STORAGE_ACCOUNT_NAME"
|
||||
envStorageAccKey = "AZURE_STORAGE_ACCOUNT_KEY"
|
||||
envStorageAccCs = "AZURE_STORAGE_ACCOUNT_CONNECTION_STRING"
|
||||
storageErrorCodeBlobNotFound = "BlobNotFound"
|
||||
)
|
||||
|
||||
// FS represents filesystem for backups in Azure Blob Storage.
|
||||
//
|
||||
// Init must be called before calling other FS methods.
|
||||
type FS struct {
|
||||
// Azure Blob Storage bucket to use.
|
||||
Container string
|
||||
|
||||
// Directory in the bucket to write to.
|
||||
Dir string
|
||||
|
||||
client *container.Client
|
||||
}
|
||||
|
||||
// Init initializes fs.
|
||||
//
|
||||
// The returned fs must be stopped when no long needed with MustStop call.
|
||||
func (fs *FS) Init() error {
|
||||
if fs.client != nil {
|
||||
logger.Panicf("BUG: fs.Init has been already called")
|
||||
}
|
||||
|
||||
for strings.HasPrefix(fs.Dir, "/") {
|
||||
fs.Dir = fs.Dir[1:]
|
||||
}
|
||||
if !strings.HasSuffix(fs.Dir, "/") {
|
||||
fs.Dir += "/"
|
||||
}
|
||||
|
||||
var sc *service.Client
|
||||
var err error
|
||||
if cs, ok := os.LookupEnv(envStorageAccCs); ok {
|
||||
sc, err = service.NewClientFromConnectionString(cs, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AZBlob service client from connection string: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
accountName, ok1 := os.LookupEnv(envStorageAcctName)
|
||||
accountKey, ok2 := os.LookupEnv(envStorageAccKey)
|
||||
if ok1 && ok2 {
|
||||
creds, err := azblob.NewSharedKeyCredential(accountName, accountKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AZBlob credentials from account name and key: %w", err)
|
||||
}
|
||||
serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
|
||||
|
||||
sc, err = service.NewClientWithSharedKeyCredential(serviceURL, creds, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AZBlob service client from account name and key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if sc == nil {
|
||||
return fmt.Errorf(`failed to detect any credentials type for AZBlob. Ensure there is connection string set at %q, or shared key at %q and %q`, envStorageAccCs, envStorageAcctName, envStorageAccKey)
|
||||
}
|
||||
|
||||
containerClient := sc.NewContainerClient(fs.Container)
|
||||
fs.client = containerClient
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MustStop stops fs.
|
||||
func (fs *FS) MustStop() {
|
||||
fs.client = nil
|
||||
}
|
||||
|
||||
// String returns human-readable description for fs.
|
||||
func (fs *FS) String() string {
|
||||
return fmt.Sprintf("AZBlob{container: %q, dir: %q}", fs.Container, fs.Dir)
|
||||
}
|
||||
|
||||
// ListParts returns all the parts for fs.
|
||||
func (fs *FS) ListParts() ([]common.Part, error) {
|
||||
dir := fs.Dir
|
||||
ctx := context.Background()
|
||||
|
||||
opts := &azblob.ListBlobsFlatOptions{
|
||||
Prefix: &dir,
|
||||
}
|
||||
|
||||
pager := fs.client.NewListBlobsFlatPager(opts)
|
||||
var parts []common.Part
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot list blobs at %s (remote path %q): %w", fs, fs.Container, err)
|
||||
}
|
||||
|
||||
for _, v := range resp.Segment.BlobItems {
|
||||
file := *v.Name
|
||||
if !strings.HasPrefix(file, dir) {
|
||||
return nil, fmt.Errorf("unexpected prefix for AZBlob key %q; want %q", file, dir)
|
||||
}
|
||||
if fscommon.IgnorePath(file) {
|
||||
continue
|
||||
}
|
||||
var p common.Part
|
||||
if !p.ParseFromRemotePath(file[len(dir):]) {
|
||||
logger.Errorf("skipping unknown object %q", file)
|
||||
continue
|
||||
}
|
||||
|
||||
p.ActualSize = uint64(*v.Properties.ContentLength)
|
||||
parts = append(parts, p)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
// DeletePart deletes part p from fs.
|
||||
func (fs *FS) DeletePart(p common.Part) error {
|
||||
bc := fs.clientForPart(p)
|
||||
ctx := context.Background()
|
||||
if _, err := bc.Delete(ctx, &blob.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveEmptyDirs recursively removes empty dirs in fs.
|
||||
func (fs *FS) RemoveEmptyDirs() error {
|
||||
// Blob storage has no directories, so nothing to remove.
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyPart copies p from srcFS to fs.
|
||||
func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
||||
src, ok := srcFS.(*FS)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot perform server-side copying from %s to %s: both of them must be AZBlob", srcFS, fs)
|
||||
}
|
||||
|
||||
sbc := src.client.NewBlobClient(p.RemotePath(src.Dir))
|
||||
dbc := fs.clientForPart(p)
|
||||
|
||||
ssCopyPermission := sas.BlobPermissions{
|
||||
Read: true,
|
||||
Create: true,
|
||||
Write: true,
|
||||
}
|
||||
|
||||
t, err := sbc.GetSASURL(ssCopyPermission, time.Now().Add(-10*time.Minute), time.Now().Add(30*time.Minute))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate SAS token of src %q: %w", p.Path, err)
|
||||
}
|
||||
|
||||
// Hotfix for SDK issue: https://github.com/Azure/azure-sdk-for-go/issues/19245
|
||||
t = strings.Replace(t, "/?", "?", -1)
|
||||
ctx := context.Background()
|
||||
_, err = dbc.CopyFromURL(ctx, t, &blob.CopyFromURLOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy %q from %s to %s: %w", p.Path, src, fs, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DownloadPart downloads part p from fs to w.
|
||||
func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
||||
bc := fs.clientForPart(p)
|
||||
|
||||
ctx := context.Background()
|
||||
r, err := bc.DownloadStream(ctx, &blob.DownloadStreamOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err)
|
||||
}
|
||||
|
||||
body := r.NewRetryReader(ctx, &azblob.RetryReaderOptions{})
|
||||
n, err := io.Copy(w, body)
|
||||
if err1 := body.Close(); err1 != nil && err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, bc.URL(), err)
|
||||
}
|
||||
if uint64(n) != p.Size {
|
||||
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UploadPart uploads part p from r to fs.
|
||||
func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
||||
bc := fs.clientForPart(p)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := bc.UploadStream(ctx, r, &blockblob.UploadStreamOptions{})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *FS) clientForPart(p common.Part) *blockblob.Client {
|
||||
path := p.RemotePath(fs.Dir)
|
||||
|
||||
return fs.clientForPath(path)
|
||||
}
|
||||
|
||||
func (fs *FS) clientForPath(path string) *blockblob.Client {
|
||||
bc := fs.client.NewBlockBlobClient(path)
|
||||
return bc
|
||||
}
|
||||
|
||||
// DeleteFile deletes filePath at fs if it exists.
|
||||
//
|
||||
// The function does nothing if the filePath doesn't exists.
|
||||
func (fs *FS) DeleteFile(filePath string) error {
|
||||
v, err := fs.HasFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !v {
|
||||
return nil
|
||||
}
|
||||
|
||||
path := fs.Dir + filePath
|
||||
bc := fs.clientForPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if _, err := bc.Delete(ctx, nil); err != nil {
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, bc.URL(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateFile creates filePath at fs and puts data into it.
|
||||
//
|
||||
// The file is overwritten if it exists.
|
||||
func (fs *FS) CreateFile(filePath string, data []byte) error {
|
||||
path := fs.Dir + filePath
|
||||
bc := fs.clientForPath(path)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := bc.UploadBuffer(ctx, data, &blockblob.UploadBufferOptions{
|
||||
Concurrency: 1,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %w", len(data), filePath, fs, bc.URL(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasFile returns ture if filePath exists at fs.
|
||||
func (fs *FS) HasFile(filePath string) (bool, error) {
|
||||
path := fs.Dir + filePath
|
||||
|
||||
bc := fs.clientForPath(path)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := bc.GetProperties(ctx, nil)
|
||||
logger.Errorf("GetProperties(%q) returned %s", bc.URL(), err)
|
||||
var azerr *azcore.ResponseError
|
||||
if errors.As(err, &azerr) {
|
||||
if azerr.ErrorCode == storageErrorCodeBlobNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("unexpected error when obtaining properties for %q at %s (remote path %q): %w", filePath, fs, bc.URL(), err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
|
@ -3,19 +3,18 @@ package s3remote
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fscommon"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
)
|
||||
|
||||
// FS represents filesystem for backups in S3.
|
||||
|
@ -43,8 +42,8 @@ type FS struct {
|
|||
// The name of S3 config profile to use.
|
||||
ProfileName string
|
||||
|
||||
s3 *s3.S3
|
||||
uploader *s3manager.Uploader
|
||||
s3 *s3.Client
|
||||
uploader *manager.Uploader
|
||||
}
|
||||
|
||||
// Init initializes fs.
|
||||
|
@ -60,46 +59,46 @@ func (fs *FS) Init() error {
|
|||
if !strings.HasSuffix(fs.Dir, "/") {
|
||||
fs.Dir += "/"
|
||||
}
|
||||
opts := session.Options{
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
Profile: fs.ProfileName,
|
||||
configOpts := []func(*config.LoadOptions) error{
|
||||
config.WithSharedConfigProfile(fs.ProfileName),
|
||||
}
|
||||
|
||||
if len(fs.CredsFilePath) > 0 {
|
||||
opts.SharedConfigFiles = []string{
|
||||
configOpts = append(configOpts, config.WithSharedConfigFiles([]string{
|
||||
fs.ConfigFilePath,
|
||||
fs.CredsFilePath,
|
||||
}
|
||||
}))
|
||||
}
|
||||
sess, err := session.NewSessionWithOptions(opts)
|
||||
|
||||
cfg, err := config.LoadDefaultConfig(context.TODO(),
|
||||
configOpts...,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create S3 session: %w", err)
|
||||
return fmt.Errorf("cannot load S3 config: %w", err)
|
||||
}
|
||||
var outerErr error
|
||||
fs.s3 = s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
if len(fs.CustomEndpoint) > 0 {
|
||||
logger.Infof("Using provided custom S3 endpoint: %q", fs.CustomEndpoint)
|
||||
o.UsePathStyle = fs.S3ForcePathStyle
|
||||
o.EndpointResolver = s3.EndpointResolverFromURL(fs.CustomEndpoint)
|
||||
} else {
|
||||
region, err := manager.GetBucketRegion(context.Background(), s3.NewFromConfig(cfg), fs.Bucket)
|
||||
if err != nil {
|
||||
outerErr = fmt.Errorf("cannot determine region for bucket %q: %w", fs.Bucket, err)
|
||||
return
|
||||
}
|
||||
|
||||
o.Region = region
|
||||
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
|
||||
}
|
||||
})
|
||||
|
||||
if outerErr != nil {
|
||||
return outerErr
|
||||
}
|
||||
|
||||
if len(fs.CustomEndpoint) > 0 {
|
||||
// Use provided custom endpoint for S3
|
||||
logger.Infof("Using provided custom S3 endpoint: %q", fs.CustomEndpoint)
|
||||
// hack for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1449
|
||||
if sess.Config.Region == nil || *sess.Config.Region == "" {
|
||||
logger.Infof("Region is not defined for custom S3 endpoint, using `us-east-1` as default")
|
||||
sess.Config.WithRegion("us-east-1")
|
||||
}
|
||||
sess.Config.WithEndpoint(fs.CustomEndpoint)
|
||||
|
||||
// Disable prefixing endpoint with bucket name
|
||||
sess.Config.WithS3ForcePathStyle(fs.S3ForcePathStyle)
|
||||
} else {
|
||||
// Determine bucket region.
|
||||
ctx := context.Background()
|
||||
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "us-west-2")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine region for bucket %q: %w", fs.Bucket, err)
|
||||
}
|
||||
sess.Config.WithRegion(region)
|
||||
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
|
||||
}
|
||||
|
||||
fs.s3 = s3.New(sess)
|
||||
fs.uploader = s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
|
||||
fs.uploader = manager.NewUploader(fs.s3, func(u *manager.Uploader) {
|
||||
// We manage upload concurrency by ourselves.
|
||||
u.Concurrency = 1
|
||||
})
|
||||
|
@ -120,18 +119,24 @@ func (fs *FS) String() string {
|
|||
// ListParts returns all the parts for fs.
|
||||
func (fs *FS) ListParts() ([]common.Part, error) {
|
||||
dir := fs.Dir
|
||||
input := &s3.ListObjectsV2Input{
|
||||
|
||||
var parts []common.Part
|
||||
|
||||
paginator := s3.NewListObjectsV2Paginator(fs.s3, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(fs.Bucket),
|
||||
Prefix: aws.String(dir),
|
||||
}
|
||||
var errOuter error
|
||||
var parts []common.Part
|
||||
err := fs.s3.ListObjectsV2Pages(input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
})
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(context.TODO())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected pagination error: %w", err)
|
||||
}
|
||||
|
||||
for _, o := range page.Contents {
|
||||
file := *o.Key
|
||||
if !strings.HasPrefix(file, dir) {
|
||||
errOuter = fmt.Errorf("unexpected prefix for s3 key %q; want %q", file, dir)
|
||||
return false
|
||||
return nil, fmt.Errorf("unexpected prefix for s3 key %q; want %q", file, dir)
|
||||
}
|
||||
if fscommon.IgnorePath(file) {
|
||||
continue
|
||||
|
@ -141,17 +146,13 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
logger.Infof("skipping unknown object %q", file)
|
||||
continue
|
||||
}
|
||||
p.ActualSize = uint64(*o.Size)
|
||||
|
||||
p.ActualSize = uint64(o.Size)
|
||||
parts = append(parts, p)
|
||||
}
|
||||
return !lastPage
|
||||
})
|
||||
if errOuter != nil && err == nil {
|
||||
err = errOuter
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %w", dir, err)
|
||||
|
||||
}
|
||||
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
|
@ -162,7 +163,7 @@ func (fs *FS) DeletePart(p common.Part) error {
|
|||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
}
|
||||
_, err := fs.s3.DeleteObject(input)
|
||||
_, err := fs.s3.DeleteObject(context.Background(), input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||
}
|
||||
|
@ -190,7 +191,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
|||
CopySource: aws.String(copySource),
|
||||
Key: aws.String(dstPath),
|
||||
}
|
||||
_, err := fs.s3.CopyObject(input)
|
||||
_, err := fs.s3.CopyObject(context.Background(), input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %w", p.Path, src, fs, copySource, err)
|
||||
}
|
||||
|
@ -204,7 +205,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
|||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
}
|
||||
o, err := fs.s3.GetObject(input)
|
||||
o, err := fs.s3.GetObject(context.Background(), input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||
}
|
||||
|
@ -228,12 +229,12 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
|||
sr := &statReader{
|
||||
r: r,
|
||||
}
|
||||
input := &s3manager.UploadInput{
|
||||
input := &s3.PutObjectInput{
|
||||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
Body: sr,
|
||||
}
|
||||
_, err := fs.uploader.Upload(input)
|
||||
_, err := fs.uploader.Upload(context.Background(), input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||
}
|
||||
|
@ -265,7 +266,7 @@ func (fs *FS) DeleteFile(filePath string) error {
|
|||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
}
|
||||
if _, err := fs.s3.DeleteObject(input); err != nil {
|
||||
if _, err := fs.s3.DeleteObject(context.Background(), input); err != nil {
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||
}
|
||||
return nil
|
||||
|
@ -279,12 +280,12 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
|||
sr := &statReader{
|
||||
r: bytes.NewReader(data),
|
||||
}
|
||||
input := &s3manager.UploadInput{
|
||||
input := &s3.PutObjectInput{
|
||||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
Body: sr,
|
||||
}
|
||||
_, err := fs.uploader.Upload(input)
|
||||
_, err := fs.uploader.Upload(context.Background(), input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||
}
|
||||
|
@ -302,10 +303,9 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
|||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
}
|
||||
o, err := fs.s3.GetObject(input)
|
||||
o, err := fs.s3.GetObject(context.Background(), input)
|
||||
if err != nil {
|
||||
var ae awserr.Error
|
||||
if errors.As(err, &ae) && ae.Code() == s3.ErrCodeNoSuchKey {
|
||||
if strings.Contains(err.Error(), "NoSuchKey") {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("cannot open %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||
|
|
|
@ -8,19 +8,17 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// NewArray returns new Array with the given name and description.
|
||||
func NewArray(name, description string) *Array {
|
||||
description += "\nSupports an `array` of values separated by comma" +
|
||||
" or specified via multiple flags."
|
||||
var a Array
|
||||
// NewArrayString returns new ArrayString with the given name and description.
|
||||
func NewArrayString(name, description string) *ArrayString {
|
||||
description += "\nSupports an `array` of values separated by comma or specified via multiple flags."
|
||||
var a ArrayString
|
||||
flag.Var(&a, name, description)
|
||||
return &a
|
||||
}
|
||||
|
||||
// NewArrayDuration returns new ArrayDuration with the given name and description.
|
||||
func NewArrayDuration(name, description string) *ArrayDuration {
|
||||
description += "\nSupports `array` of values separated by comma" +
|
||||
" or specified via multiple flags."
|
||||
description += "\nSupports `array` of values separated by comma or specified via multiple flags."
|
||||
var a ArrayDuration
|
||||
flag.Var(&a, name, description)
|
||||
return &a
|
||||
|
@ -28,23 +26,30 @@ func NewArrayDuration(name, description string) *ArrayDuration {
|
|||
|
||||
// NewArrayBool returns new ArrayBool with the given name and description.
|
||||
func NewArrayBool(name, description string) *ArrayBool {
|
||||
description += "\nSupports `array` of values separated by comma" +
|
||||
" or specified via multiple flags."
|
||||
description += "\nSupports `array` of values separated by comma or specified via multiple flags."
|
||||
var a ArrayBool
|
||||
flag.Var(&a, name, description)
|
||||
return &a
|
||||
}
|
||||
|
||||
// NewArrayInt returns new ArrayInt with the given name and description.
|
||||
func NewArrayInt(name string, description string) *ArrayInt {
|
||||
description += "\nSupports `array` of values separated by comma" +
|
||||
" or specified via multiple flags."
|
||||
func NewArrayInt(name, description string) *ArrayInt {
|
||||
description += "\nSupports `array` of values separated by comma or specified via multiple flags."
|
||||
var a ArrayInt
|
||||
flag.Var(&a, name, description)
|
||||
return &a
|
||||
}
|
||||
|
||||
// Array is a flag that holds an array of values.
|
||||
// NewArrayBytes returns new ArrayBytes with the given name and description.
|
||||
func NewArrayBytes(name, description string) *ArrayBytes {
|
||||
description += "\nSupports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB."
|
||||
description += "\nSupports `array` of values separated by comma or specified via multiple flags."
|
||||
var a ArrayBytes
|
||||
flag.Var(&a, name, description)
|
||||
return &a
|
||||
}
|
||||
|
||||
// ArrayString is a flag that holds an array of strings.
|
||||
//
|
||||
// It may be set either by specifying multiple flags with the given name
|
||||
// passed to NewArray or by joining flag values by comma.
|
||||
|
@ -57,10 +62,10 @@ func NewArrayInt(name string, description string) *ArrayInt {
|
|||
// Flag values may be quoted. For instance, the following arg creates an array of ("a", "b, c") items:
|
||||
//
|
||||
// -foo='a,"b, c"'
|
||||
type Array []string
|
||||
type ArrayString []string
|
||||
|
||||
// String implements flag.Value interface
|
||||
func (a *Array) String() string {
|
||||
func (a *ArrayString) String() string {
|
||||
aEscaped := make([]string, len(*a))
|
||||
for i, v := range *a {
|
||||
if strings.ContainsAny(v, `", `+"\n") {
|
||||
|
@ -72,7 +77,7 @@ func (a *Array) String() string {
|
|||
}
|
||||
|
||||
// Set implements flag.Value interface
|
||||
func (a *Array) Set(value string) error {
|
||||
func (a *ArrayString) Set(value string) error {
|
||||
values := parseArrayValues(value)
|
||||
*a = append(*a, values...)
|
||||
return nil
|
||||
|
@ -141,7 +146,7 @@ func getNextArrayValue(s string) (string, string) {
|
|||
}
|
||||
|
||||
// GetOptionalArg returns optional arg under the given argIdx.
|
||||
func (a *Array) GetOptionalArg(argIdx int) string {
|
||||
func (a *ArrayString) GetOptionalArg(argIdx int) string {
|
||||
x := *a
|
||||
if argIdx >= len(x) {
|
||||
if len(x) == 1 {
|
||||
|
@ -153,7 +158,8 @@ func (a *Array) GetOptionalArg(argIdx int) string {
|
|||
}
|
||||
|
||||
// ArrayBool is a flag that holds an array of booleans values.
|
||||
// have the same api as Array.
|
||||
//
|
||||
// Has the same api as ArrayString.
|
||||
type ArrayBool []bool
|
||||
|
||||
// IsBoolFlag implements flag.IsBoolFlag interface
|
||||
|
@ -194,7 +200,8 @@ func (a *ArrayBool) GetOptionalArg(argIdx int) bool {
|
|||
}
|
||||
|
||||
// ArrayDuration is a flag that holds an array of time.Duration values.
|
||||
// have the same api as Array.
|
||||
//
|
||||
// Has the same api as ArrayString.
|
||||
type ArrayDuration []time.Duration
|
||||
|
||||
// String implements flag.Value interface
|
||||
|
@ -233,6 +240,8 @@ func (a *ArrayDuration) GetOptionalArgOrDefault(argIdx int, defaultValue time.Du
|
|||
}
|
||||
|
||||
// ArrayInt is flag that holds an array of ints.
|
||||
//
|
||||
// Has the same api as ArrayString.
|
||||
type ArrayInt []int
|
||||
|
||||
// String implements flag.Value interface
|
||||
|
@ -259,7 +268,7 @@ func (a *ArrayInt) Set(value string) error {
|
|||
}
|
||||
|
||||
// GetOptionalArgOrDefault returns optional arg under the given argIdx.
|
||||
func (a *ArrayInt) GetOptionalArgOrDefault(argIdx int, defaultValue int) int {
|
||||
func (a *ArrayInt) GetOptionalArgOrDefault(argIdx, defaultValue int) int {
|
||||
x := *a
|
||||
if argIdx < len(x) {
|
||||
return x[argIdx]
|
||||
|
@ -269,3 +278,43 @@ func (a *ArrayInt) GetOptionalArgOrDefault(argIdx int, defaultValue int) int {
|
|||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// ArrayBytes is flag that holds an array of Bytes.
|
||||
//
|
||||
// Has the same api as ArrayString.
|
||||
type ArrayBytes []*Bytes
|
||||
|
||||
// String implements flag.Value interface
|
||||
func (a *ArrayBytes) String() string {
|
||||
x := *a
|
||||
formattedBytes := make([]string, len(x))
|
||||
for i, v := range x {
|
||||
formattedBytes[i] = v.String()
|
||||
}
|
||||
return strings.Join(formattedBytes, ",")
|
||||
}
|
||||
|
||||
// Set implemented flag.Value interface
|
||||
func (a *ArrayBytes) Set(value string) error {
|
||||
values := parseArrayValues(value)
|
||||
for _, v := range values {
|
||||
var b Bytes
|
||||
if err := b.Set(v); err != nil {
|
||||
return err
|
||||
}
|
||||
*a = append(*a, &b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOptionalArgOrDefault returns optional arg under the given argIdx.
|
||||
func (a *ArrayBytes) GetOptionalArgOrDefault(argIdx, defaultValue int) int {
|
||||
x := *a
|
||||
if argIdx < len(x) {
|
||||
return x[argIdx].N
|
||||
}
|
||||
if len(x) == 1 {
|
||||
return x[0].N
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
|
|
@ -9,21 +9,24 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
fooFlag Array
|
||||
fooFlagString ArrayString
|
||||
fooFlagDuration ArrayDuration
|
||||
fooFlagBool ArrayBool
|
||||
fooFlagInt ArrayInt
|
||||
fooFlagBytes ArrayBytes
|
||||
)
|
||||
|
||||
func init() {
|
||||
os.Args = append(os.Args, "--fooFlag=foo", "--fooFlag=bar")
|
||||
os.Args = append(os.Args, "--fooFlagDuration=10s", "--fooFlagDuration=5m")
|
||||
os.Args = append(os.Args, "--fooFlagBool=true", "--fooFlagBool=false,true", "--fooFlagBool")
|
||||
os.Args = append(os.Args, "--fooFlagInt=1", "--fooFlagInt=2,3")
|
||||
flag.Var(&fooFlag, "fooFlag", "test")
|
||||
os.Args = append(os.Args, "-fooFlagString=foo", "-fooFlagString=bar")
|
||||
os.Args = append(os.Args, "-fooFlagDuration=10s", "-fooFlagDuration=5m")
|
||||
os.Args = append(os.Args, "-fooFlagBool=true", "-fooFlagBool=false,true", "-fooFlagBool")
|
||||
os.Args = append(os.Args, "-fooFlagInt=1", "-fooFlagInt=2,3")
|
||||
os.Args = append(os.Args, "-fooFlagBytes=10MB", "-fooFlagBytes=23,10kib")
|
||||
flag.Var(&fooFlagString, "fooFlagString", "test")
|
||||
flag.Var(&fooFlagDuration, "fooFlagDuration", "test")
|
||||
flag.Var(&fooFlagBool, "fooFlagBool", "test")
|
||||
flag.Var(&fooFlagInt, "fooFlagInt", "test")
|
||||
flag.Var(&fooFlagBytes, "fooFlagBytes", "test")
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -31,25 +34,20 @@ func TestMain(m *testing.M) {
|
|||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestArray(t *testing.T) {
|
||||
expected := []string{
|
||||
func TestArrayString(t *testing.T) {
|
||||
expected := ArrayString{
|
||||
"foo",
|
||||
"bar",
|
||||
}
|
||||
if len(expected) != len(fooFlag) {
|
||||
t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected))
|
||||
}
|
||||
for i, v := range fooFlag {
|
||||
if v != expected[i] {
|
||||
t.Errorf("unexpected item in array %q", v)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, fooFlagString) {
|
||||
t.Fatalf("unexpected flag values; got\n%q\nwant\n%q", fooFlagString, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArraySet(t *testing.T) {
|
||||
func TestArrayString_Set(t *testing.T) {
|
||||
f := func(s string, expectedValues []string) {
|
||||
t.Helper()
|
||||
var a Array
|
||||
var a ArrayString
|
||||
_ = a.Set(s)
|
||||
if !reflect.DeepEqual([]string(a), expectedValues) {
|
||||
t.Fatalf("unexpected values parsed;\ngot\n%q\nwant\n%q", a, expectedValues)
|
||||
|
@ -66,10 +64,10 @@ func TestArraySet(t *testing.T) {
|
|||
f(`,fo,"\"b, a'\\",,r,`, []string{``, `fo`, `"b, a'\`, ``, `r`, ``})
|
||||
}
|
||||
|
||||
func TestArrayGetOptionalArg(t *testing.T) {
|
||||
func TestArrayString_GetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx int, expectedValue string) {
|
||||
t.Helper()
|
||||
var a Array
|
||||
var a ArrayString
|
||||
_ = a.Set(s)
|
||||
v := a.GetOptionalArg(argIdx)
|
||||
if v != expectedValue {
|
||||
|
@ -85,10 +83,10 @@ func TestArrayGetOptionalArg(t *testing.T) {
|
|||
f("foo,bar", 2, "")
|
||||
}
|
||||
|
||||
func TestArrayString(t *testing.T) {
|
||||
func TestArrayString_String(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var a Array
|
||||
var a ArrayString
|
||||
_ = a.Set(s)
|
||||
result := a.String()
|
||||
if result != s {
|
||||
|
@ -105,21 +103,16 @@ func TestArrayString(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestArrayDuration(t *testing.T) {
|
||||
expected := []time.Duration{
|
||||
expected := ArrayDuration{
|
||||
time.Second * 10,
|
||||
time.Minute * 5,
|
||||
}
|
||||
if len(expected) != len(fooFlagDuration) {
|
||||
t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected))
|
||||
}
|
||||
for i, v := range fooFlagDuration {
|
||||
if v != expected[i] {
|
||||
t.Errorf("unexpected item in array %s", v)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, fooFlagDuration) {
|
||||
t.Fatalf("unexpected flag values; got\n%s\nwant\n%s", fooFlagDuration, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArrayDurationSet(t *testing.T) {
|
||||
func TestArrayDuration_Set(t *testing.T) {
|
||||
f := func(s string, expectedValues []time.Duration) {
|
||||
t.Helper()
|
||||
var a ArrayDuration
|
||||
|
@ -133,8 +126,8 @@ func TestArrayDurationSet(t *testing.T) {
|
|||
f(`5m,1s,1h`, []time.Duration{time.Minute * 5, time.Second, time.Hour})
|
||||
}
|
||||
|
||||
func TestArrayDurationGetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx int, expectedValue, defaultValue time.Duration) {
|
||||
func TestArrayDuration_GetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx int, defaultValue, expectedValue time.Duration) {
|
||||
t.Helper()
|
||||
var a ArrayDuration
|
||||
_ = a.Set(s)
|
||||
|
@ -146,10 +139,10 @@ func TestArrayDurationGetOptionalArg(t *testing.T) {
|
|||
f("", 0, time.Second, time.Second)
|
||||
f("", 1, time.Minute, time.Minute)
|
||||
f("10s,1m", 1, time.Minute, time.Minute)
|
||||
f("10s", 3, time.Second*10, time.Minute)
|
||||
f("10s", 3, time.Minute, time.Second*10)
|
||||
}
|
||||
|
||||
func TestArrayDurationString(t *testing.T) {
|
||||
func TestArrayDuration_String(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var a ArrayDuration
|
||||
|
@ -165,20 +158,15 @@ func TestArrayDurationString(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestArrayBool(t *testing.T) {
|
||||
expected := []bool{
|
||||
expected := ArrayBool{
|
||||
true, false, true, true,
|
||||
}
|
||||
if len(expected) != len(fooFlagBool) {
|
||||
t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected))
|
||||
}
|
||||
for i, v := range fooFlagBool {
|
||||
if v != expected[i] {
|
||||
t.Errorf("unexpected item in array index=%v,value=%v,want=%v", i, v, expected[i])
|
||||
}
|
||||
if !reflect.DeepEqual(expected, fooFlagBool) {
|
||||
t.Fatalf("unexpected flag values; got\n%v\nwant\n%v", fooFlagBool, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArrayBoolSet(t *testing.T) {
|
||||
func TestArrayBool_Set(t *testing.T) {
|
||||
f := func(s string, expectedValues []bool) {
|
||||
t.Helper()
|
||||
var a ArrayBool
|
||||
|
@ -192,7 +180,7 @@ func TestArrayBoolSet(t *testing.T) {
|
|||
f(`false,True,False`, []bool{false, true, false})
|
||||
}
|
||||
|
||||
func TestArrayBoolGetOptionalArg(t *testing.T) {
|
||||
func TestArrayBool_GetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx int, expectedValue bool) {
|
||||
t.Helper()
|
||||
var a ArrayBool
|
||||
|
@ -208,7 +196,7 @@ func TestArrayBoolGetOptionalArg(t *testing.T) {
|
|||
f("true", 2, true)
|
||||
}
|
||||
|
||||
func TestArrayBoolString(t *testing.T) {
|
||||
func TestArrayBool_String(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var a ArrayBool
|
||||
|
@ -225,18 +213,13 @@ func TestArrayBoolString(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestArrayInt(t *testing.T) {
|
||||
expected := []int{1, 2, 3}
|
||||
if len(expected) != len(fooFlagInt) {
|
||||
t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected))
|
||||
}
|
||||
for i, n := range fooFlagInt {
|
||||
if n != expected[i] {
|
||||
t.Errorf("unexpected item in array %d", n)
|
||||
}
|
||||
expected := ArrayInt{1, 2, 3}
|
||||
if !reflect.DeepEqual(expected, fooFlagInt) {
|
||||
t.Fatalf("unexpected flag values; got\n%d\nwant\n%d", fooFlagInt, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArrayIntSet(t *testing.T) {
|
||||
func TestArrayInt_Set(t *testing.T) {
|
||||
f := func(s string, expectedValues []int) {
|
||||
t.Helper()
|
||||
var a ArrayInt
|
||||
|
@ -250,8 +233,8 @@ func TestArrayIntSet(t *testing.T) {
|
|||
f(`-2,3,-64`, []int{-2, 3, -64})
|
||||
}
|
||||
|
||||
func TestArrayIntGetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx int, expectedValue, defaultValue int) {
|
||||
func TestArrayInt_GetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx, defaultValue, expectedValue int) {
|
||||
t.Helper()
|
||||
var a ArrayInt
|
||||
_ = a.Set(s)
|
||||
|
@ -262,11 +245,11 @@ func TestArrayIntGetOptionalArg(t *testing.T) {
|
|||
}
|
||||
f("", 0, 123, 123)
|
||||
f("", 1, -34, -34)
|
||||
f("10,1", 1, 1, 234)
|
||||
f("10", 3, 10, -34)
|
||||
f("10,1", 1, 234, 1)
|
||||
f("10", 3, -34, 10)
|
||||
}
|
||||
|
||||
func TestArrayIntString(t *testing.T) {
|
||||
func TestArrayInt_String(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var a ArrayInt
|
||||
|
@ -280,3 +263,64 @@ func TestArrayIntString(t *testing.T) {
|
|||
f("10,1")
|
||||
f("-5,1,123")
|
||||
}
|
||||
|
||||
func TestArrayBytes(t *testing.T) {
|
||||
expected := []int{10000000, 23, 10240}
|
||||
result := make([]int, len(fooFlagBytes))
|
||||
for i, b := range fooFlagBytes {
|
||||
result[i] = b.N
|
||||
}
|
||||
if !reflect.DeepEqual(expected, result) {
|
||||
t.Fatalf("unexpected flag values; got\n%d\nwant\n%d", result, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArrayBytes_Set(t *testing.T) {
|
||||
f := func(s string, expectedValues []int) {
|
||||
t.Helper()
|
||||
var a ArrayBytes
|
||||
_ = a.Set(s)
|
||||
values := make([]int, len(a))
|
||||
for i, v := range a {
|
||||
values[i] = v.N
|
||||
}
|
||||
if !reflect.DeepEqual(values, expectedValues) {
|
||||
t.Fatalf("unexpected values parsed;\ngot\n%d\nwant\n%d", values, expectedValues)
|
||||
}
|
||||
}
|
||||
f("", []int{})
|
||||
f(`1`, []int{1})
|
||||
f(`-2,3,10kb`, []int{-2, 3, 10000})
|
||||
}
|
||||
|
||||
func TestArrayBytes_GetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx, defaultValue, expectedValue int) {
|
||||
t.Helper()
|
||||
var a ArrayBytes
|
||||
_ = a.Set(s)
|
||||
v := a.GetOptionalArgOrDefault(argIdx, defaultValue)
|
||||
if v != expectedValue {
|
||||
t.Fatalf("unexpected value; got %d; want %d", v, expectedValue)
|
||||
}
|
||||
}
|
||||
f("", 0, 123, 123)
|
||||
f("", 1, -34, -34)
|
||||
f("10,1", 1, 234, 1)
|
||||
f("10,1", 3, 234, 234)
|
||||
f("10Kb", 3, -34, 10000)
|
||||
}
|
||||
|
||||
func TestArrayBytes_String(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var a ArrayBytes
|
||||
_ = a.Set(s)
|
||||
result := a.String()
|
||||
if result != s {
|
||||
t.Fatalf("unexpected string;\ngot\n%s\nwant\n%s", result, s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f("10.5KiB,1")
|
||||
f("-5,1,123MB")
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ var (
|
|||
tlsEnable = flag.Bool("tls", false, "Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set")
|
||||
tlsCertFile = flag.String("tlsCertFile", "", "Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated")
|
||||
tlsKeyFile = flag.String("tlsKeyFile", "", "Path to file with TLS key if -tls is set. The provided key file is automatically re-read every second, so it can be dynamically updated")
|
||||
tlsCipherSuites = flagutil.NewArray("tlsCipherSuites", "Optional list of TLS cipher suites for incoming requests over HTTPS if -tls is set. See the list of supported cipher suites at https://pkg.go.dev/crypto/tls#pkg-constants")
|
||||
tlsCipherSuites = flagutil.NewArrayString("tlsCipherSuites", "Optional list of TLS cipher suites for incoming requests over HTTPS if -tls is set. See the list of supported cipher suites at https://pkg.go.dev/crypto/tls#pkg-constants")
|
||||
tlsMinVersion = flag.String("tlsMinVersion", "", "Optional minimum TLS version to use for incoming requests over HTTPS if -tls is set. "+
|
||||
"Supported values: TLS10, TLS11, TLS12, TLS13")
|
||||
|
||||
|
@ -248,7 +248,7 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
|||
// This is needed for proper handling of relative urls in web browsers.
|
||||
// Intentionally ignore query args, since it is expected that the requested url
|
||||
// is composed by a human, so it doesn't contain query args.
|
||||
RedirectPermanent(w, prefix)
|
||||
Redirect(w, prefix)
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(path, prefix) {
|
||||
|
@ -681,11 +681,14 @@ func GetRequestURI(r *http.Request) string {
|
|||
return requestURI + delimiter + queryArgs
|
||||
}
|
||||
|
||||
// RedirectPermanent redirects to the given url using 301 status code.
|
||||
func RedirectPermanent(w http.ResponseWriter, url string) {
|
||||
// Redirect redirects to the given url.
|
||||
func Redirect(w http.ResponseWriter, url string) {
|
||||
// Do not use http.Redirect, since it breaks relative redirects
|
||||
// if the http.Request.URL contains unexpected url.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918
|
||||
w.Header().Set("Location", url)
|
||||
w.WriteHeader(http.StatusMovedPermanently)
|
||||
// Use http.StatusFound instead of http.StatusMovedPermanently,
|
||||
// since browsers can cache incorrect redirects returned with StatusMovedPermanently.
|
||||
// This may require browser cache cleaning after the incorrect redirect is fixed.
|
||||
w.WriteHeader(http.StatusFound)
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var influxDatabaseNames = flagutil.NewArray("influx.databaseNames", "Comma-separated list of database names to return from /query and /influx/query API. "+
|
||||
var influxDatabaseNames = flagutil.NewArrayString("influx.databaseNames", "Comma-separated list of database names to return from /query and /influx/query API. "+
|
||||
"This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb")
|
||||
|
||||
// WriteDatabaseNames writes influxDatabaseNames to w.
|
||||
|
|
|
@ -3,12 +3,8 @@ package promrelabel
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
|
@ -125,10 +121,7 @@ func TestIfExpressionMatch(t *testing.T) {
|
|||
if err := yaml.UnmarshalStrict([]byte(ifExpr), &ie); err != nil {
|
||||
t.Fatalf("unexpected error during unmarshal: %s", err)
|
||||
}
|
||||
labels, err := parseMetricWithLabels(metricWithLabels)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse %s: %s", metricWithLabels, err)
|
||||
}
|
||||
labels := MustParseMetricWithLabels(metricWithLabels)
|
||||
if !ie.Match(labels) {
|
||||
t.Fatalf("unexpected mismatch of ifExpr=%s for %s", ifExpr, metricWithLabels)
|
||||
}
|
||||
|
@ -162,10 +155,7 @@ func TestIfExpressionMismatch(t *testing.T) {
|
|||
if err := yaml.UnmarshalStrict([]byte(ifExpr), &ie); err != nil {
|
||||
t.Fatalf("unexpected error during unmarshal: %s", err)
|
||||
}
|
||||
labels, err := parseMetricWithLabels(metricWithLabels)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse %s: %s", metricWithLabels, err)
|
||||
}
|
||||
labels := MustParseMetricWithLabels(metricWithLabels)
|
||||
if ie.Match(labels) {
|
||||
t.Fatalf("unexpected match of ifExpr=%s for %s", ifExpr, metricWithLabels)
|
||||
}
|
||||
|
@ -187,40 +177,3 @@ func TestIfExpressionMismatch(t *testing.T) {
|
|||
f(`'{foo!~"bar|"}'`, `abc`)
|
||||
f(`'{foo!~"bar|"}'`, `abc{foo="bar"}`)
|
||||
}
|
||||
|
||||
func parseMetricWithLabels(metricWithLabels string) ([]prompbmarshal.Label, error) {
|
||||
stripDummyMetric := false
|
||||
if strings.HasPrefix(metricWithLabels, "{") {
|
||||
// Add a dummy metric name, since the parser needs it
|
||||
metricWithLabels = "dummy_metric" + metricWithLabels
|
||||
stripDummyMetric = true
|
||||
}
|
||||
// add a value to metricWithLabels, so it could be parsed by prometheus protocol parser.
|
||||
s := metricWithLabels + " 123"
|
||||
var rows prometheus.Rows
|
||||
var err error
|
||||
rows.UnmarshalWithErrLogger(s, func(s string) {
|
||||
err = fmt.Errorf("error during metric parse: %s", s)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rows.Rows) != 1 {
|
||||
return nil, fmt.Errorf("unexpected number of rows parsed; got %d; want 1", len(rows.Rows))
|
||||
}
|
||||
r := rows.Rows[0]
|
||||
var lfs []prompbmarshal.Label
|
||||
if !stripDummyMetric {
|
||||
lfs = append(lfs, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: r.Metric,
|
||||
})
|
||||
}
|
||||
for _, tag := range r.Tags {
|
||||
lfs = append(lfs, prompbmarshal.Label{
|
||||
Name: tag.Key,
|
||||
Value: tag.Value,
|
||||
})
|
||||
}
|
||||
return lfs, nil
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
bb := relabelBufPool.Get()
|
||||
for _, gl := range prc.graphiteLabelRules {
|
||||
bb.B = gl.grt.Expand(bb.B[:0], gm.a)
|
||||
valueStr := string(bb.B)
|
||||
valueStr := bytesutil.InternString(bytesutil.ToUnsafeString(bb.B))
|
||||
labels = setLabelValue(labels, labelsOffset, gl.targetLabel, valueStr)
|
||||
}
|
||||
relabelBufPool.Put(bb)
|
||||
|
@ -185,7 +185,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
if prc.hasLabelReferenceInReplacement {
|
||||
// Fill {{labelName}} references in the replacement
|
||||
bb.B = fillLabelReferences(bb.B[:0], replacement, labels[labelsOffset:])
|
||||
replacement = string(bb.B)
|
||||
replacement = bytesutil.InternString(bytesutil.ToUnsafeString(bb.B))
|
||||
}
|
||||
bb.B = concatLabelValues(bb.B[:0], src, prc.SourceLabels, prc.Separator)
|
||||
if prc.RegexAnchored == defaultRegexForRelabelConfig && !prc.hasCaptureGroupInTargetLabel {
|
||||
|
@ -193,7 +193,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
// Fast path for the rule that copies source label values to destination:
|
||||
// - source_labels: [...]
|
||||
// target_label: foobar
|
||||
valueStr := string(bb.B)
|
||||
valueStr := bytesutil.InternString(bytesutil.ToUnsafeString(bb.B))
|
||||
relabelBufPool.Put(bb)
|
||||
return setLabelValue(labels, labelsOffset, prc.TargetLabel, valueStr)
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
// and store the result at `target_label`
|
||||
bb := relabelBufPool.Get()
|
||||
bb.B = concatLabelValues(bb.B[:0], src, prc.SourceLabels, prc.Separator)
|
||||
sourceStr := string(bb.B)
|
||||
sourceStr := bytesutil.InternString(bytesutil.ToUnsafeString(bb.B))
|
||||
relabelBufPool.Put(bb)
|
||||
valueStr := prc.replaceStringSubmatchesFast(sourceStr)
|
||||
if valueStr != sourceStr {
|
||||
|
@ -350,7 +350,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
case "uppercase":
|
||||
bb := relabelBufPool.Get()
|
||||
bb.B = concatLabelValues(bb.B[:0], src, prc.SourceLabels, prc.Separator)
|
||||
valueStr := string(bb.B)
|
||||
valueStr := bytesutil.InternString(bytesutil.ToUnsafeString(bb.B))
|
||||
relabelBufPool.Put(bb)
|
||||
valueStr = strings.ToUpper(valueStr)
|
||||
labels = setLabelValue(labels, labelsOffset, prc.TargetLabel, valueStr)
|
||||
|
@ -358,7 +358,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
case "lowercase":
|
||||
bb := relabelBufPool.Get()
|
||||
bb.B = concatLabelValues(bb.B[:0], src, prc.SourceLabels, prc.Separator)
|
||||
valueStr := string(bb.B)
|
||||
valueStr := bytesutil.InternString(bytesutil.ToUnsafeString(bb.B))
|
||||
relabelBufPool.Put(bb)
|
||||
valueStr = strings.ToLower(valueStr)
|
||||
labels = setLabelValue(labels, labelsOffset, prc.TargetLabel, valueStr)
|
||||
|
@ -384,7 +384,7 @@ func (prc *parsedRelabelConfig) replaceFullStringFast(s string) string {
|
|||
return s
|
||||
}
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
// Fast path - s doesn't match literl prefix from regex
|
||||
// Fast path - s doesn't match literal prefix from regex
|
||||
return s
|
||||
}
|
||||
if replacement == "$1" {
|
||||
|
@ -447,7 +447,7 @@ func (prc *parsedRelabelConfig) replaceStringSubmatchesSlow(s string) string {
|
|||
func (prc *parsedRelabelConfig) expandCaptureGroups(template, source string, match []int) string {
|
||||
bb := relabelBufPool.Get()
|
||||
bb.B = prc.RegexAnchored.ExpandString(bb.B[:0], template, source, match)
|
||||
s := string(bb.B)
|
||||
s := bytesutil.InternString(bytesutil.ToUnsafeString(bb.B))
|
||||
relabelBufPool.Put(bb)
|
||||
return s
|
||||
}
|
||||
|
|
|
@ -77,10 +77,7 @@ func TestApplyRelabelConfigs(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("cannot parse %q: %s", config, err)
|
||||
}
|
||||
labels, err := parseMetricWithLabels(metric)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse %s: %s", metric, err)
|
||||
}
|
||||
labels := MustParseMetricWithLabels(metric)
|
||||
resultLabels := pcs.Apply(labels, 0, isFinalize)
|
||||
result := labelsToString(resultLabels)
|
||||
if result != resultExpected {
|
||||
|
@ -696,10 +693,7 @@ func TestApplyRelabelConfigs(t *testing.T) {
|
|||
func TestFinalizeLabels(t *testing.T) {
|
||||
f := func(metric, resultExpected string) {
|
||||
t.Helper()
|
||||
labels, err := parseMetricWithLabels(metric)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse %s: %s", metric, err)
|
||||
}
|
||||
labels := MustParseMetricWithLabels(metric)
|
||||
resultLabels := FinalizeLabels(nil, labels)
|
||||
result := labelsToString(resultLabels)
|
||||
if result != resultExpected {
|
||||
|
@ -715,10 +709,7 @@ func TestFinalizeLabels(t *testing.T) {
|
|||
func TestRemoveMetaLabels(t *testing.T) {
|
||||
f := func(metric, resultExpected string) {
|
||||
t.Helper()
|
||||
labels, err := parseMetricWithLabels(metric)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse %s: %s", metric, err)
|
||||
}
|
||||
labels := MustParseMetricWithLabels(metric)
|
||||
resultLabels := RemoveMetaLabels(nil, labels)
|
||||
result := labelsToString(resultLabels)
|
||||
if result != resultExpected {
|
||||
|
@ -734,10 +725,7 @@ func TestRemoveMetaLabels(t *testing.T) {
|
|||
func TestFillLabelReferences(t *testing.T) {
|
||||
f := func(replacement, metric, resultExpected string) {
|
||||
t.Helper()
|
||||
labels, err := parseMetricWithLabels(metric)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse %s: %s", metric, err)
|
||||
}
|
||||
labels := MustParseMetricWithLabels(metric)
|
||||
result := fillLabelReferences(nil, replacement, labels)
|
||||
if string(result) != resultExpected {
|
||||
t.Fatalf("unexpected result; got\n%q\nwant\n%q", result, resultExpected)
|
||||
|
|
|
@ -270,22 +270,149 @@ func BenchmarkMatchRegexOrValuesMismatchUnoptimized(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||
b.Run("kubernetes", func(b *testing.B) {
|
||||
// See https://github.com/Arnoways/prometheus/blob/d521933053bdf68d252e365da9376706d04addcc/model/relabel/relabel_test.go#L505
|
||||
pcs := mustParseRelabelConfigs(`
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_container_port_name
|
||||
regex: .*-metrics
|
||||
action: keep
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_label_name
|
||||
action: drop
|
||||
regex: ""
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_phase
|
||||
regex: Succeeded|Failed
|
||||
action: drop
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
regex: "false"
|
||||
action: drop
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scheme
|
||||
target_label: __scheme__
|
||||
regex: (https?)
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
regex: (.+?)(\:\d+)?;(\d+)
|
||||
replacement: $1:$3
|
||||
action: replace
|
||||
- regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+)
|
||||
replacement: __param_$1
|
||||
action: labelmap
|
||||
- regex: __meta_kubernetes_pod_label_prometheus_io_label_(.+)
|
||||
action: labelmap
|
||||
- regex: __meta_kubernetes_pod_annotation_prometheus_io_label_(.+)
|
||||
action: labelmap
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
- __meta_kubernetes_pod_label_name
|
||||
separator: /
|
||||
target_label: job
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: namespace
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: pod
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_container_name
|
||||
target_label: container
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
- __meta_kubernetes_pod_container_name
|
||||
- __meta_kubernetes_pod_container_port_name
|
||||
separator: ':'
|
||||
target_label: instance
|
||||
action: replace
|
||||
- target_label: cluster
|
||||
replacement: dev-us-central-0
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
regex: hosted-grafana
|
||||
action: drop
|
||||
- source_labels:
|
||||
- __address__
|
||||
target_label: __tmp_hash
|
||||
modulus: 3
|
||||
action: hashmod
|
||||
- source_labels:
|
||||
- __tmp_hash
|
||||
regex: ^0$
|
||||
action: keep
|
||||
- regex: __tmp_hash
|
||||
action: labeldrop
|
||||
`)
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__address__", "10.132.183.40:80",
|
||||
"__meta_kubernetes_namespace", "loki-boltdb-shipper",
|
||||
"__meta_kubernetes_pod_annotation_promtail_loki_boltdb_shipper_hash", "50523b9759094a144adcec2eae0aa4ad",
|
||||
"__meta_kubernetes_pod_annotationpresent_promtail_loki_boltdb_shipper_hash", "true",
|
||||
"__meta_kubernetes_pod_container_init", "false",
|
||||
"__meta_kubernetes_pod_container_name", "promtail",
|
||||
"__meta_kubernetes_pod_container_port_name", "http-metrics",
|
||||
"__meta_kubernetes_pod_container_port_number", "80",
|
||||
"__meta_kubernetes_pod_container_port_protocol", "TCP",
|
||||
"__meta_kubernetes_pod_controller_kind", "DaemonSet",
|
||||
"__meta_kubernetes_pod_controller_name", "promtail-loki-boltdb-shipper",
|
||||
"__meta_kubernetes_pod_host_ip", "10.128.0.178",
|
||||
"__meta_kubernetes_pod_ip", "10.132.183.40",
|
||||
"__meta_kubernetes_pod_label_controller_revision_hash", "555b77cd7d",
|
||||
"__meta_kubernetes_pod_label_name", "promtail-loki-boltdb-shipper",
|
||||
"__meta_kubernetes_pod_label_pod_template_generation", "45",
|
||||
"__meta_kubernetes_pod_labelpresent_controller_revision_hash", "true",
|
||||
"__meta_kubernetes_pod_labelpresent_name", "true",
|
||||
"__meta_kubernetes_pod_labelpresent_pod_template_generation", "true",
|
||||
"__meta_kubernetes_pod_name", "promtail-loki-boltdb-shipper-jgtr7",
|
||||
"__meta_kubernetes_pod_node_name", "gke-dev-us-central-0-main-n2s8-2-14d53341-9hkr",
|
||||
"__meta_kubernetes_pod_phase", "Running",
|
||||
"__meta_kubernetes_pod_ready", "true",
|
||||
"__meta_kubernetes_pod_uid", "4c586419-7f6c-448d-aeec-ca4fa5b05e60",
|
||||
"__metrics_path__", "/metrics",
|
||||
"__scheme__", "http",
|
||||
"__scrape_interval__", "15s",
|
||||
"__scrape_timeout__", "10s",
|
||||
"job", "kubernetes-pods",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var labels []prompbmarshal.Label
|
||||
for pb.Next() {
|
||||
labels = append(labels[:0], labelsOrig...)
|
||||
labels = pcs.Apply(labels, 0, false)
|
||||
if len(labels) != 0 {
|
||||
panic(fmt.Errorf("BUG: expecting empty labels"))
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
b.Run("replace-label-copy", func(b *testing.B) {
|
||||
pcs := mustParseRelabelConfigs(`
|
||||
- action: replace
|
||||
source_labels: [id]
|
||||
target_label: __name__
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -294,7 +421,7 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
labels = append(labels[:0], labelsOrig...)
|
||||
labels = pcs.Apply(labels, 0, true)
|
||||
if len(labels) != len(labelsOrig) {
|
||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labelsOrig))
|
||||
}
|
||||
if labels[0].Name != "__name__" {
|
||||
panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__"))
|
||||
|
@ -317,16 +444,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
target_label: __name__
|
||||
replacement: foobar
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -358,12 +479,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
target_label: aaa
|
||||
replacement: foobar
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -396,16 +514,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
target_label: id
|
||||
regex: "(foobar)-.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -438,16 +550,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
target_label: id
|
||||
regex: "(foobar)-.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -479,16 +585,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
source_labels: ["non-existing-label"]
|
||||
regex: "(foobar)-.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -520,16 +620,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
source_labels: [id]
|
||||
regex: "yes"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "yes",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "yes",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -549,16 +643,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
source_labels: [id]
|
||||
regex: "(foobar)-.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -578,16 +666,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
source_labels: ["non-existing-label"]
|
||||
regex: "(foobar)-.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -607,16 +689,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
source_labels: [id]
|
||||
regex: "yes"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "yes",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "yes",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -648,16 +724,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
source_labels: [id]
|
||||
regex: "(foobar)-.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -688,16 +758,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labeldrop
|
||||
regex: "non-existing-label"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -728,16 +792,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labeldrop
|
||||
regex: id
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -762,16 +820,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labeldrop
|
||||
regex: "id.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -796,16 +848,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labeldrop
|
||||
regex: ".*id.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -830,16 +876,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labelkeep
|
||||
regex: "non-existing-label"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -858,16 +898,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labelkeep
|
||||
regex: id
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -892,16 +926,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labelkeep
|
||||
regex: "id.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -926,16 +954,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labelkeep
|
||||
regex: ".*id.*"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -960,12 +982,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labelmap
|
||||
regex: "a(.*)"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"foo", "bar",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -990,12 +1009,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labelmap
|
||||
regex: "a(.*)"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "aabc",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"aabc", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -1026,12 +1042,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
- action: labelmap
|
||||
regex: "(.*)bc"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "aabc",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"aabc", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -1064,16 +1077,10 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
target_label: id
|
||||
modulus: 23
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "metric",
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Value: "foobar-random-string-here",
|
||||
},
|
||||
}
|
||||
labelsOrig := labelsFromStrings(
|
||||
"__name__", "metric",
|
||||
"id", "foobar-random-string-here",
|
||||
)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -1108,3 +1115,15 @@ func mustParseRelabelConfigs(config string) *ParsedConfigs {
|
|||
}
|
||||
return pcs
|
||||
}
|
||||
|
||||
func labelsFromStrings(ss ...string) []prompbmarshal.Label {
|
||||
labelsLen := len(ss) / 2
|
||||
labels := make([]prompbmarshal.Label, 0, labelsLen)
|
||||
for i := 0; i < len(ss); i += 2 {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: ss[i],
|
||||
Value: ss[i+1],
|
||||
})
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
|
50
lib/promrelabel/util.go
Normal file
50
lib/promrelabel/util.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package promrelabel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
)
|
||||
|
||||
// MustParseMetricWithLabels parses s, which can have the form `metric{labels}`.
|
||||
//
|
||||
// This function is indended mostly for tests.
|
||||
func MustParseMetricWithLabels(metricWithLabels string) []prompbmarshal.Label {
|
||||
stripDummyMetric := false
|
||||
if strings.HasPrefix(metricWithLabels, "{") {
|
||||
// Add a dummy metric name, since the parser needs it
|
||||
metricWithLabels = "dummy_metric" + metricWithLabels
|
||||
stripDummyMetric = true
|
||||
}
|
||||
// add a value to metricWithLabels, so it could be parsed by prometheus protocol parser.
|
||||
s := metricWithLabels + " 123"
|
||||
var rows prometheus.Rows
|
||||
var err error
|
||||
rows.UnmarshalWithErrLogger(s, func(s string) {
|
||||
err = fmt.Errorf("error during metric parse: %s", s)
|
||||
})
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot parse %q: %s", metricWithLabels, err)
|
||||
}
|
||||
if len(rows.Rows) != 1 {
|
||||
logger.Panicf("BUG: unexpected number of rows parsed; got %d; want 1", len(rows.Rows))
|
||||
}
|
||||
r := rows.Rows[0]
|
||||
var lfs []prompbmarshal.Label
|
||||
if !stripDummyMetric {
|
||||
lfs = append(lfs, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: r.Metric,
|
||||
})
|
||||
}
|
||||
for _, tag := range r.Tags {
|
||||
lfs = append(lfs, prompbmarshal.Label{
|
||||
Name: tag.Key,
|
||||
Value: tag.Value,
|
||||
})
|
||||
}
|
||||
return lfs
|
||||
}
|
|
@ -229,6 +229,22 @@ type GlobalConfig struct {
|
|||
ExternalLabels map[string]string `yaml:"external_labels,omitempty"`
|
||||
}
|
||||
|
||||
func (gc *GlobalConfig) getExternalLabels() []prompbmarshal.Label {
|
||||
externalLabels := gc.ExternalLabels
|
||||
if len(externalLabels) == 0 {
|
||||
return nil
|
||||
}
|
||||
labels := make([]prompbmarshal.Label, 0, len(externalLabels))
|
||||
for name, value := range externalLabels {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
promrelabel.SortLabels(labels)
|
||||
return labels
|
||||
}
|
||||
|
||||
// ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
|
||||
|
@ -933,6 +949,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
|
|||
if (*streamParse || sc.StreamParse) && sc.SeriesLimit > 0 {
|
||||
return nil, fmt.Errorf("cannot use stream parsing mode when `series_limit` is set for `job_name` %q", jobName)
|
||||
}
|
||||
externalLabels := globalCfg.getExternalLabels()
|
||||
swc := &scrapeWorkConfig{
|
||||
scrapeInterval: scrapeInterval,
|
||||
scrapeIntervalString: scrapeInterval.String(),
|
||||
|
@ -948,7 +965,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
|
|||
honorLabels: honorLabels,
|
||||
honorTimestamps: honorTimestamps,
|
||||
denyRedirects: denyRedirects,
|
||||
externalLabels: globalCfg.ExternalLabels,
|
||||
externalLabels: externalLabels,
|
||||
relabelConfigs: relabelConfigs,
|
||||
metricRelabelConfigs: metricRelabelConfigs,
|
||||
sampleLimit: sc.SampleLimit,
|
||||
|
@ -977,7 +994,7 @@ type scrapeWorkConfig struct {
|
|||
honorLabels bool
|
||||
honorTimestamps bool
|
||||
denyRedirects bool
|
||||
externalLabels map[string]string
|
||||
externalLabels []prompbmarshal.Label
|
||||
relabelConfigs *promrelabel.ParsedConfigs
|
||||
metricRelabelConfigs *promrelabel.ParsedConfigs
|
||||
sampleLimit int
|
||||
|
@ -1308,6 +1325,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
|||
DenyRedirects: swc.denyRedirects,
|
||||
OriginalLabels: originalLabels,
|
||||
Labels: labels,
|
||||
ExternalLabels: swc.externalLabels,
|
||||
ProxyURL: swc.proxyURL,
|
||||
ProxyAuthConfig: swc.proxyAuthConfig,
|
||||
AuthConfig: swc.authConfig,
|
||||
|
@ -1357,9 +1375,6 @@ func mergeLabels(dst []prompbmarshal.Label, swc *scrapeWorkConfig, target string
|
|||
logger.Panicf("BUG: len(dst) must be 0; got %d", len(dst))
|
||||
}
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
|
||||
for k, v := range swc.externalLabels {
|
||||
dst = appendLabel(dst, k, v)
|
||||
}
|
||||
dst = appendLabel(dst, "job", swc.jobName)
|
||||
dst = appendLabel(dst, "__address__", target)
|
||||
dst = appendLabel(dst, "__scheme__", swc.scheme)
|
||||
|
|
|
@ -36,19 +36,11 @@ func TestMergeLabels(t *testing.T) {
|
|||
metricsPath: "/foo/bar",
|
||||
scrapeIntervalString: "15s",
|
||||
scrapeTimeoutString: "10s",
|
||||
externalLabels: map[string]string{
|
||||
"job": "bar",
|
||||
"a": "b",
|
||||
},
|
||||
}, "foo", nil, nil, `{__address__="foo",__metrics_path__="/foo/bar",__scheme__="https",__scrape_interval__="15s",__scrape_timeout__="10s",a="b",job="xyz"}`)
|
||||
}, "foo", nil, nil, `{__address__="foo",__metrics_path__="/foo/bar",__scheme__="https",__scrape_interval__="15s",__scrape_timeout__="10s",job="xyz"}`)
|
||||
f(&scrapeWorkConfig{
|
||||
jobName: "xyz",
|
||||
scheme: "https",
|
||||
metricsPath: "/foo/bar",
|
||||
externalLabels: map[string]string{
|
||||
"job": "bar",
|
||||
"a": "b",
|
||||
},
|
||||
}, "foo", map[string]string{
|
||||
"job": "extra_job",
|
||||
"foo": "extra_foo",
|
||||
|
@ -959,10 +951,6 @@ scrape_configs:
|
|||
Name: "__scrape_timeout__",
|
||||
Value: "10s",
|
||||
},
|
||||
{
|
||||
Name: "datacenter",
|
||||
Value: "foobar",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "foo.bar:1234",
|
||||
|
@ -971,6 +959,12 @@ scrape_configs:
|
|||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
},
|
||||
ExternalLabels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "datacenter",
|
||||
Value: "foobar",
|
||||
},
|
||||
{
|
||||
Name: "jobs",
|
||||
Value: "xxx",
|
||||
|
@ -1604,6 +1598,24 @@ scrape_configs:
|
|||
Name: "job",
|
||||
Value: "yyy",
|
||||
},
|
||||
},
|
||||
ExternalLabels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__address__",
|
||||
Value: "aaasdf",
|
||||
},
|
||||
{
|
||||
Name: "__param_a",
|
||||
Value: "jlfd",
|
||||
},
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "xx",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foobar",
|
||||
},
|
||||
{
|
||||
Name: "q",
|
||||
Value: "qwe",
|
||||
|
|
|
@ -232,7 +232,7 @@ func getRefreshTokenFunc(sdc *SDConfig, ac, proxyAC *promauth.Config, env *cloud
|
|||
}
|
||||
var tr tokenResponse
|
||||
if err := json.Unmarshal(data, &tr); err != nil {
|
||||
return "", 0, fmt.Errorf("cannot parse token auth response %q: %w", string(data), err)
|
||||
return "", 0, fmt.Errorf("cannot parse token auth response %q: %w", data, err)
|
||||
}
|
||||
expiresInSeconds, err := strconv.ParseInt(tr.ExpiresIn, 10, 64)
|
||||
if err != nil {
|
||||
|
|
|
@ -16,10 +16,11 @@ func getInstancesLabels(cfg *apiConfig) ([]map[string]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
azMap := getAZMap(cfg)
|
||||
region := cfg.awsConfig.GetRegion()
|
||||
var ms []map[string]string
|
||||
for _, r := range rs {
|
||||
for _, inst := range r.InstanceSet.Items {
|
||||
ms = inst.appendTargetLabels(ms, r.OwnerID, cfg.port, azMap)
|
||||
ms = inst.appendTargetLabels(ms, r.OwnerID, region, cfg.port, azMap)
|
||||
}
|
||||
}
|
||||
return ms, nil
|
||||
|
@ -134,7 +135,7 @@ func parseInstancesResponse(data []byte) (*InstancesResponse, error) {
|
|||
return &v, nil
|
||||
}
|
||||
|
||||
func (inst *Instance) appendTargetLabels(ms []map[string]string, ownerID string, port int, azMap map[string]string) []map[string]string {
|
||||
func (inst *Instance) appendTargetLabels(ms []map[string]string, ownerID, region string, port int, azMap map[string]string) []map[string]string {
|
||||
if len(inst.PrivateIPAddress) == 0 {
|
||||
// Cannot scrape instance without private IP address
|
||||
return ms
|
||||
|
@ -157,6 +158,7 @@ func (inst *Instance) appendTargetLabels(ms []map[string]string, ownerID string,
|
|||
"__meta_ec2_private_ip": inst.PrivateIPAddress,
|
||||
"__meta_ec2_public_dns_name": inst.PublicDNSName,
|
||||
"__meta_ec2_public_ip": inst.PublicIPAddress,
|
||||
"__meta_ec2_region": region,
|
||||
"__meta_ec2_vpc_id": inst.VPCID,
|
||||
}
|
||||
if len(inst.VPCID) > 0 {
|
||||
|
|
|
@ -238,7 +238,7 @@ func TestParseInstancesResponse(t *testing.T) {
|
|||
ownerID := rs.OwnerID
|
||||
port := 423
|
||||
inst := rs.InstanceSet.Items[0]
|
||||
labelss := inst.appendTargetLabels(nil, ownerID, port, map[string]string{
|
||||
labelss := inst.appendTargetLabels(nil, ownerID, "region-a", port, map[string]string{
|
||||
"eu-west-2c": "foobar-zone",
|
||||
})
|
||||
var sortedLabelss [][]prompbmarshal.Label
|
||||
|
@ -263,6 +263,7 @@ func TestParseInstancesResponse(t *testing.T) {
|
|||
"__meta_ec2_private_ip": "172.31.11.152",
|
||||
"__meta_ec2_public_dns_name": "ec2-3-8-232-141.eu-west-2.compute.amazonaws.com",
|
||||
"__meta_ec2_public_ip": "3.8.232.141",
|
||||
"__meta_ec2_region": "region-a",
|
||||
"__meta_ec2_subnet_id": ",subnet-57044c3e,",
|
||||
"__meta_ec2_tag_foo": "bar",
|
||||
"__meta_ec2_vpc_id": "vpc-f1eaad99",
|
||||
|
|
|
@ -87,6 +87,12 @@ type ScrapeWork struct {
|
|||
// See also https://prometheus.io/docs/concepts/jobs_instances/
|
||||
Labels []prompbmarshal.Label
|
||||
|
||||
// ExternalLabels contains labels from global->external_labels section of -promscrape.config
|
||||
//
|
||||
// These labels are added to scraped metrics after the relabeling.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3137
|
||||
ExternalLabels []prompbmarshal.Label
|
||||
|
||||
// ProxyURL HTTP proxy url
|
||||
ProxyURL *proxy.URL
|
||||
|
||||
|
@ -140,9 +146,11 @@ func (sw *ScrapeWork) key() string {
|
|||
// Do not take into account OriginalLabels, since they can be changed with relabeling.
|
||||
// Take into account JobNameOriginal in order to capture the case when the original job_name is changed via relabeling.
|
||||
key := fmt.Sprintf("JobNameOriginal=%s, ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, HonorTimestamps=%v, DenyRedirects=%v, Labels=%s, "+
|
||||
"ExternalLabels=%s, "+
|
||||
"ProxyURL=%s, ProxyAuthConfig=%s, AuthConfig=%s, MetricRelabelConfigs=%s, SampleLimit=%d, DisableCompression=%v, DisableKeepAlive=%v, StreamParse=%v, "+
|
||||
"ScrapeAlignInterval=%s, ScrapeOffset=%s, SeriesLimit=%d",
|
||||
sw.jobNameOriginal, sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels, sw.HonorTimestamps, sw.DenyRedirects, sw.LabelsString(),
|
||||
promLabelsString(sw.ExternalLabels),
|
||||
sw.ProxyURL.String(), sw.ProxyAuthConfig.String(),
|
||||
sw.AuthConfig.String(), sw.MetricRelabelConfigs.String(), sw.SampleLimit, sw.DisableCompression, sw.DisableKeepAlive, sw.StreamParse,
|
||||
sw.ScrapeAlignInterval, sw.ScrapeOffset, sw.SeriesLimit)
|
||||
|
@ -835,6 +843,9 @@ func (sw *scrapeWork) addRowToTimeseries(wc *writeRequestCtx, r *parser.Row, tim
|
|||
// Skip row without labels.
|
||||
return
|
||||
}
|
||||
// Add labels from `global->external_labels` section after the relabeling like Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3137
|
||||
wc.labels = appendExtraLabels(wc.labels, sw.Config.ExternalLabels, labelsLen, sw.Config.HonorLabels)
|
||||
sampleTimestamp := r.Timestamp
|
||||
if !sw.Config.HonorTimestamps || sampleTimestamp == 0 {
|
||||
sampleTimestamp = timestamp
|
||||
|
@ -863,36 +874,43 @@ func appendLabels(dst []prompbmarshal.Label, metric string, src []parser.Tag, ex
|
|||
Value: tag.Value,
|
||||
})
|
||||
}
|
||||
dst = append(dst, extraLabels...)
|
||||
labels := dst[dstLen:]
|
||||
if len(labels) <= 1 {
|
||||
// Fast path - only a single label.
|
||||
return appendExtraLabels(dst, extraLabels, dstLen, honorLabels)
|
||||
}
|
||||
|
||||
func appendExtraLabels(dst, extraLabels []prompbmarshal.Label, offset int, honorLabels bool) []prompbmarshal.Label {
|
||||
// Add extraLabels to labels.
|
||||
// Handle duplicates in the same way as Prometheus does.
|
||||
if len(dst) > offset && dst[offset].Name == "__name__" {
|
||||
offset++
|
||||
}
|
||||
labels := dst[offset:]
|
||||
if len(labels) == 0 {
|
||||
// Fast path - add extraLabels to dst without the need to de-duplicate.
|
||||
dst = append(dst, extraLabels...)
|
||||
return dst
|
||||
}
|
||||
|
||||
// de-duplicate labels
|
||||
dstLabels := labels[:0]
|
||||
for i := range labels {
|
||||
label := &labels[i]
|
||||
prevLabel := promrelabel.GetLabelByName(dstLabels, label.Name)
|
||||
for _, label := range extraLabels {
|
||||
prevLabel := promrelabel.GetLabelByName(labels, label.Name)
|
||||
if prevLabel == nil {
|
||||
dstLabels = append(dstLabels, *label)
|
||||
// Fast path - the label doesn't exist in labels, so just add it to dst.
|
||||
dst = append(dst, label)
|
||||
continue
|
||||
}
|
||||
if honorLabels {
|
||||
// Skip the extra label with the same name.
|
||||
continue
|
||||
}
|
||||
// Rename the prevLabel to "exported_" + label.Name.
|
||||
// Rename the prevLabel to "exported_" + label.Name
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
|
||||
exportedName := "exported_" + label.Name
|
||||
if promrelabel.GetLabelByName(dstLabels, exportedName) != nil {
|
||||
// Override duplicate with the current label.
|
||||
*prevLabel = *label
|
||||
continue
|
||||
exportedLabel := promrelabel.GetLabelByName(labels, exportedName)
|
||||
if exportedLabel == nil {
|
||||
prevLabel.Name = exportedName
|
||||
dst = append(dst, label)
|
||||
} else {
|
||||
exportedLabel.Value = prevLabel.Value
|
||||
prevLabel.Value = label.Value
|
||||
}
|
||||
prevLabel.Name = exportedName
|
||||
dstLabels = append(dstLabels, *label)
|
||||
}
|
||||
return dst[:dstLen+len(dstLabels)]
|
||||
return dst
|
||||
}
|
||||
|
|
|
@ -12,6 +12,33 @@ import (
|
|||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
)
|
||||
|
||||
func TestAppendExtraLabels(t *testing.T) {
|
||||
f := func(sourceLabels, extraLabels string, honorLabels bool, resultExpected string) {
|
||||
t.Helper()
|
||||
src := promrelabel.MustParseMetricWithLabels(sourceLabels)
|
||||
extra := promrelabel.MustParseMetricWithLabels(extraLabels)
|
||||
labels := appendExtraLabels(src, extra, 0, honorLabels)
|
||||
result := promLabelsString(labels)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result; got\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("{}", "{}", true, "{}")
|
||||
f("{}", "{}", false, "{}")
|
||||
f("foo", "{}", true, `{__name__="foo"}`)
|
||||
f("foo", "{}", false, `{__name__="foo"}`)
|
||||
f("foo", "bar", true, `{__name__="foo",__name__="bar"}`)
|
||||
f("foo", "bar", false, `{__name__="foo",__name__="bar"}`)
|
||||
f(`{a="b"}`, `{c="d"}`, true, `{a="b",c="d"}`)
|
||||
f(`{a="b"}`, `{c="d"}`, false, `{a="b",c="d"}`)
|
||||
f(`{a="b"}`, `{a="d"}`, true, `{a="b"}`)
|
||||
f(`{a="b"}`, `{a="d"}`, false, `{exported_a="b",a="d"}`)
|
||||
f(`{a="b",exported_a="x"}`, `{a="d"}`, true, `{a="b",exported_a="x"}`)
|
||||
f(`{a="b",exported_a="x"}`, `{a="d"}`, false, `{a="d",exported_a="b"}`)
|
||||
f(`{a="b"}`, `{a="d",exported_a="x"}`, true, `{a="b",exported_a="x"}`)
|
||||
f(`{a="b"}`, `{a="d",exported_a="x"}`, false, `{exported_a="b",a="d",exported_a="x"}`)
|
||||
}
|
||||
|
||||
func TestPromLabelsString(t *testing.T) {
|
||||
f := func(labels []prompbmarshal.Label, resultExpected string) {
|
||||
t.Helper()
|
||||
|
@ -187,7 +214,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
`)
|
||||
f(`
|
||||
foo{job="orig",bar="baz"} 34.45
|
||||
bar{y="2",job="aa",a="b",job="bb",x="1"} -3e4 2345
|
||||
bar{y="2",job="aa",a="b",x="1"} -3e4 2345
|
||||
`, &ScrapeWork{
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: false,
|
||||
|
@ -262,7 +289,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
`)
|
||||
f(`
|
||||
foo{job="orig",bar="baz"} 34.45
|
||||
bar{job="aa",a="b",job="bb"} -3e4 2345
|
||||
bar{job="aa",a="b"} -3e4 2345
|
||||
`, &ScrapeWork{
|
||||
ScrapeTimeout: time.Second * 42,
|
||||
HonorLabels: true,
|
||||
|
|
|
@ -2,8 +2,10 @@ package vmimport
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
|
@ -79,7 +81,11 @@ func (r *Row) unmarshal(s string, tu *tagsUnmarshaler) error {
|
|||
for i, v := range values {
|
||||
f, err := v.Float64()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal value at position %d: %w", i, err)
|
||||
// Fall back to parsing special values
|
||||
f, err = getSpecialFloat64(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal value at position %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
r.Values = append(r.Values, f)
|
||||
}
|
||||
|
@ -103,6 +109,43 @@ func (r *Row) unmarshal(s string, tu *tagsUnmarshaler) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var nan = math.NaN()
|
||||
|
||||
func getSpecialFloat64(v *fastjson.Value) (float64, error) {
|
||||
vt := v.Type()
|
||||
switch vt {
|
||||
case fastjson.TypeNull:
|
||||
return nan, nil
|
||||
case fastjson.TypeString:
|
||||
b, _ := v.StringBytes()
|
||||
s := bytesutil.ToUnsafeString(b)
|
||||
return getSpecialFloat64FromString(s)
|
||||
default:
|
||||
return 0, fmt.Errorf("unsupported value type: %s; value=%q", vt, v)
|
||||
}
|
||||
}
|
||||
|
||||
var inf = math.Inf(1)
|
||||
|
||||
func getSpecialFloat64FromString(s string) (float64, error) {
|
||||
minus := false
|
||||
if strings.HasPrefix(s, "-") {
|
||||
minus = true
|
||||
s = s[1:]
|
||||
}
|
||||
switch s {
|
||||
case "infinity", "Infinity", "Inf", "inf":
|
||||
if minus {
|
||||
return -inf, nil
|
||||
}
|
||||
return inf, nil
|
||||
case "null", "Null", "nan", "NaN":
|
||||
return nan, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unsupported string: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Tag represents `/api/v1/import` tag.
|
||||
type Tag struct {
|
||||
Key []byte
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package vmimport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
@ -47,6 +48,10 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
|||
f(`{"metric":{"foo":"bar"},"values":null,"timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":["foo"],"timestamps":[3]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":null,"timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":"null","timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":"NaN","timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":[["NaN"]],"timestamps":[3,4]}`)
|
||||
|
||||
// Invalid timestamps
|
||||
f(`{"metric":{"foo":"bar"},"values":[1,2],"timestamps":3}`)
|
||||
|
@ -71,14 +76,15 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
|||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
|
||||
if err := compareRows(&rows, rowsExpected); err != nil {
|
||||
t.Fatalf("unexpected rows: %s;\ngot\n%+v;\nwant\n%+v", err, rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
if err := compareRows(&rows, rowsExpected); err != nil {
|
||||
t.Fatalf("unexpected rows at second unmarshal: %s;\ngot\n%+v;\nwant\n%+v", err, rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
|
@ -104,15 +110,15 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
|||
}},
|
||||
})
|
||||
|
||||
// Inf and nan values
|
||||
f(`{"metric":{"foo":"bar"},"values":[Inf, -Inf],"timestamps":[456, 789]}`, &Rows{
|
||||
// Inf and nan, null values
|
||||
f(`{"metric":{"foo":"bar"},"values":[Inf, -Inf, "Infinity", "-Infinity", NaN, "NaN", null, "null", 1.2],"timestamps":[456, 789, 123, 0, 1, 42, 2, 3, 7]}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Tags: []Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}},
|
||||
Values: []float64{math.Inf(1), math.Inf(-1)},
|
||||
Timestamps: []int64{456, 789},
|
||||
Values: []float64{inf, -inf, inf, -inf, nan, nan, nan, nan, 1.2},
|
||||
Timestamps: []int64{456, 789, 123, 0, 1, 42, 2, 3, 7},
|
||||
}},
|
||||
})
|
||||
|
||||
|
@ -229,3 +235,46 @@ garbage here
|
|||
},
|
||||
})
|
||||
}
|
||||
|
||||
func compareRows(rows, rowsExpected *Rows) error {
|
||||
if len(rows.Rows) != len(rowsExpected.Rows) {
|
||||
return fmt.Errorf("unexpected number of rows; got %d; want %d", len(rows.Rows), len(rowsExpected.Rows))
|
||||
}
|
||||
for i, row := range rows.Rows {
|
||||
rowExpected := rowsExpected.Rows[i]
|
||||
if err := compareSingleRow(&row, &rowExpected); err != nil {
|
||||
return fmt.Errorf("unexpected row at position #%d: %w", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareSingleRow(row, rowExpected *Row) error {
|
||||
if !reflect.DeepEqual(row.Tags, rowExpected.Tags) {
|
||||
return fmt.Errorf("unexpected tags; got %q; want %q", row.Tags, rowExpected.Tags)
|
||||
}
|
||||
if !reflect.DeepEqual(row.Timestamps, rowExpected.Timestamps) {
|
||||
return fmt.Errorf("unexpected timestamps; got %d; want %d", row.Timestamps, rowExpected.Timestamps)
|
||||
}
|
||||
if err := compareValues(row.Values, rowExpected.Values); err != nil {
|
||||
return fmt.Errorf("unexpected values; got %v; want %v", row.Values, rowExpected.Values)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareValues(values, valuesExpected []float64) error {
|
||||
if len(values) != len(valuesExpected) {
|
||||
return fmt.Errorf("unexpected number of values; got %d; want %d", len(values), len(valuesExpected))
|
||||
}
|
||||
for i, v := range values {
|
||||
vExpected := valuesExpected[i]
|
||||
if math.IsNaN(v) {
|
||||
if !math.IsNaN(vExpected) {
|
||||
return fmt.Errorf("expecting NaN at position #%d; got %v", i, v)
|
||||
}
|
||||
} else if v != vExpected {
|
||||
return fmt.Errorf("unepxected value at position #%d; got %v; want %v", i, v, vExpected)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -12,10 +12,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
pushURL = flagutil.NewArray("pushmetrics.url", "Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . "+
|
||||
pushURL = flagutil.NewArrayString("pushmetrics.url", "Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . "+
|
||||
"By default metrics exposed at /metrics page aren't pushed to any remote storage")
|
||||
pushInterval = flag.Duration("pushmetrics.interval", 10*time.Second, "Interval for pushing metrics to -pushmetrics.url")
|
||||
pushExtraLabel = flagutil.NewArray("pushmetrics.extraLabel", "Optional labels to add to metrics pushed to -pushmetrics.url . "+
|
||||
pushExtraLabel = flagutil.NewArrayString("pushmetrics.extraLabel", "Optional labels to add to metrics pushed to -pushmetrics.url . "+
|
||||
`For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url`)
|
||||
)
|
||||
|
||||
|
|
|
@ -588,13 +588,21 @@ func getOptimizedReMatchFunc(reMatch func(b []byte) bool, expr string) (func(b [
|
|||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error when parsing verified expr=%q: %s", expr, err)
|
||||
}
|
||||
if matchFunc, literalSuffix, reCost := getOptimizedReMatchFuncExt(reMatch, sre); matchFunc != nil {
|
||||
// Prepare fast string matcher for reMatch.
|
||||
fsm := bytesutil.NewFastStringMatcher(func(s string) bool {
|
||||
return reMatch(bytesutil.ToUnsafeBytes(s))
|
||||
})
|
||||
reMatchFast := func(b []byte) bool {
|
||||
return fsm.Match(bytesutil.ToUnsafeString(b))
|
||||
}
|
||||
|
||||
if matchFunc, literalSuffix, reCost := getOptimizedReMatchFuncExt(reMatchFast, sre); matchFunc != nil {
|
||||
// Found optimized function for matching the expr.
|
||||
suffixUnescaped := tagCharsReverseRegexpEscaper.Replace(literalSuffix)
|
||||
return matchFunc, suffixUnescaped, reCost
|
||||
}
|
||||
// Fall back to un-optimized reMatch.
|
||||
return reMatch, "", reMatchCost
|
||||
// Fall back to reMatchFast.
|
||||
return reMatchFast, "", reMatchCost
|
||||
}
|
||||
|
||||
// These cost values are used for sorting tag filters in ascending order or the required CPU time for execution.
|
||||
|
|
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
|
@ -1,5 +1,12 @@
|
|||
# Changes
|
||||
|
||||
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7))
|
||||
|
||||
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06)
|
||||
|
||||
|
||||
|
|
486
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
generated
vendored
Normal file
486
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,486 @@
|
|||
# Release History
|
||||
|
||||
## 1.1.4 (2022-10-06)
|
||||
|
||||
### Bugs Fixed
|
||||
* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`.
|
||||
* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string
|
||||
|
||||
### Other Changes
|
||||
* Removed logging URL from retry policy as it's redundant.
|
||||
* Retry policy logs when it exits due to a non-retriable status code.
|
||||
|
||||
## 1.1.3 (2022-09-01)
|
||||
|
||||
### Bugs Fixed
|
||||
* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines.
|
||||
|
||||
## 1.1.2 (2022-08-09)
|
||||
|
||||
### Other Changes
|
||||
* Fixed various doc bugs.
|
||||
|
||||
## 1.1.1 (2022-06-30)
|
||||
|
||||
### Bugs Fixed
|
||||
* Avoid polling when a RELO LRO synchronously terminates.
|
||||
|
||||
## 1.1.0 (2022-06-03)
|
||||
|
||||
### Other Changes
|
||||
* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests.
|
||||
|
||||
## 1.0.0 (2022-05-12)
|
||||
|
||||
### Features Added
|
||||
* Added interface `runtime.PollingHandler` to support custom poller implementations.
|
||||
* Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`.
|
||||
|
||||
### Breaking Changes
|
||||
* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost`
|
||||
* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic`
|
||||
* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions`
|
||||
* Removed `TokenRequestOptions.TenantID`
|
||||
* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration`
|
||||
* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()`
|
||||
* Removed `arm/runtime.FinalStateVia` and related `const` values
|
||||
* Renamed `runtime.PageProcessor` to `runtime.PagingHandler`
|
||||
* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported.
|
||||
* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()`
|
||||
* `TokenCredential.GetToken` now returns `AccessToken` by value.
|
||||
|
||||
### Bugs Fixed
|
||||
* When per-try timeouts are enabled, only cancel the context after the body has been read and closed.
|
||||
* The `Operation-Location` poller now properly handles `final-state-via` values.
|
||||
* Improvements in `runtime.Poller[T]`
|
||||
* `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state.
|
||||
* `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries.
|
||||
|
||||
### Other Changes
|
||||
* Updated to latest `internal` module and absorbed breaking changes.
|
||||
* Use `temporal.Resource` and deleted copy.
|
||||
* The internal poller implementation has been refactored.
|
||||
* The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification.
|
||||
* The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface.
|
||||
* The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it.
|
||||
* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions`
|
||||
* Default User-Agent headers no longer include `azcore` version information
|
||||
|
||||
## 0.23.1 (2022-04-14)
|
||||
|
||||
### Bugs Fixed
|
||||
* Include XML header when marshalling XML content.
|
||||
* Handle XML namespaces when searching for error code.
|
||||
* Handle `odata.error` when searching for error code.
|
||||
|
||||
## 0.23.0 (2022-04-04)
|
||||
|
||||
### Features Added
|
||||
* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations.
|
||||
* Added `cloud` package with a new API for cloud configuration
|
||||
* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type.
|
||||
|
||||
### Breaking Changes
|
||||
* Removed the `Poller` type-alias to the internal poller implementation.
|
||||
* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations.
|
||||
* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter.
|
||||
* Replaced `arm.Endpoint` with `cloud` API
|
||||
* Removed the `endpoint` parameter from `NewRPRegistrationPolicy()`
|
||||
* `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error`
|
||||
* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages.
|
||||
* Removed the `pollerID` parameter as it's no longer required.
|
||||
* Created optional parameter structs and moved optional parameters into them.
|
||||
* Changed `FinalStateVia` field to a `const` type.
|
||||
|
||||
### Other Changes
|
||||
* Converted expiring resource and dependent types to use generics.
|
||||
|
||||
## 0.22.0 (2022-03-03)
|
||||
|
||||
### Features Added
|
||||
* Added header `WWW-Authenticate` to the default allow-list of headers for logging.
|
||||
* Added a pipeline policy that enables the retrieval of HTTP responses from API calls.
|
||||
* Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default).
|
||||
|
||||
### Breaking Changes
|
||||
* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package.
|
||||
|
||||
## 0.21.1 (2022-02-04)
|
||||
|
||||
### Bugs Fixed
|
||||
* Restore response body after reading in `Poller.FinalResponse()`. (#16911)
|
||||
* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969)
|
||||
|
||||
### Other Changes
|
||||
* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789)
|
||||
|
||||
## 0.21.0 (2022-01-11)
|
||||
|
||||
### Features Added
|
||||
* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger.
|
||||
* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received.
|
||||
|
||||
### Breaking Changes
|
||||
* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions`
|
||||
* Renamed `arm/ClientOptions.Host` to `.Endpoint`
|
||||
* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload`
|
||||
* Removed `azcore.HTTPResponse` interface type
|
||||
* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter
|
||||
* `runtime.NewResponseError()` no longer requires an `error` parameter
|
||||
|
||||
## 0.20.0 (2021-10-22)
|
||||
|
||||
### Breaking Changes
|
||||
* Removed `arm.Connection`
|
||||
* Removed `azcore.Credential` and `.NewAnonymousCredential()`
|
||||
* `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential`
|
||||
* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication
|
||||
* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions`
|
||||
* Contents in the `log` package have been slightly renamed.
|
||||
* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions`
|
||||
* Changed parameters for `NewBearerTokenPolicy()`
|
||||
* Moved policy config options out of `arm/runtime` and into `arm/policy`
|
||||
|
||||
### Features Added
|
||||
* Updating Documentation
|
||||
* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints
|
||||
* `azcore.ClientOptions` contains common pipeline configuration settings
|
||||
* Added support for multi-tenant authorization in `arm/runtime`
|
||||
* Require one second minimum when calling `PollUntilDone()`
|
||||
|
||||
### Bug Fixes
|
||||
* Fixed a potential panic when creating the default Transporter.
|
||||
* Close LRO initial response body when creating a poller.
|
||||
* Fixed a panic when recursively cloning structs that contain time.Time.
|
||||
|
||||
## 0.19.0 (2021-08-25)
|
||||
|
||||
### Breaking Changes
|
||||
* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors).
|
||||
* `azcore` has all core functionality.
|
||||
* `log` contains facilities for configuring in-box logging.
|
||||
* `policy` is used for configuring pipeline options and creating custom pipeline policies.
|
||||
* `runtime` contains various helpers used by SDK authors and generated content.
|
||||
* `streaming` has helpers for streaming IO operations.
|
||||
* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed.
|
||||
* As a result, the `Request.Telemetry()` method has been removed.
|
||||
* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it.
|
||||
* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it.
|
||||
* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively.
|
||||
|
||||
### Bug Fixes
|
||||
* Fixed an issue in the retry policy where the request body could be overwritten after a rewind.
|
||||
|
||||
### Other Changes
|
||||
* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively.
|
||||
* The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy.
|
||||
* Poller logic has been consolidated across ARM and core implementations.
|
||||
* This required some changes to the internal interfaces for core pollers.
|
||||
* The core poller types have been improved, including more logging and test coverage.
|
||||
|
||||
## 0.18.1 (2021-08-20)
|
||||
|
||||
### Features Added
|
||||
* Adds an `ETag` type for comparing etags and handling etags on requests
|
||||
* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object
|
||||
|
||||
### Bugs Fixed
|
||||
* `JoinPaths` will preserve query parameters encoded in the `root` url.
|
||||
|
||||
### Other Changes
|
||||
* Bumps dependency on `internal` module to the latest version (v0.7.0)
|
||||
|
||||
## 0.18.0 (2021-07-29)
|
||||
### Features Added
|
||||
* Replaces methods from Logger type with two package methods for interacting with the logging functionality.
|
||||
* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications`
|
||||
* `azcore.SetListener` replaces `azcore.Logger().SetListener`
|
||||
|
||||
### Breaking Changes
|
||||
* Removes `Logger` type from `azcore`
|
||||
|
||||
|
||||
## 0.17.0 (2021-07-27)
|
||||
### Features Added
|
||||
* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879)
|
||||
* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123)
|
||||
|
||||
### Breaking Changes
|
||||
* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104)
|
||||
* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103)
|
||||
* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038)
|
||||
|
||||
|
||||
## 0.16.2 (2021-05-26)
|
||||
### Features Added
|
||||
* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715)
|
||||
|
||||
|
||||
## 0.16.1 (2021-05-19)
|
||||
### Features Added
|
||||
* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682)
|
||||
|
||||
|
||||
## 0.16.0 (2021-05-07)
|
||||
### Features Added
|
||||
* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642)
|
||||
|
||||
|
||||
## 0.15.1 (2021-05-06)
|
||||
### Features Added
|
||||
* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634)
|
||||
|
||||
|
||||
## 0.15.0 (2021-05-05)
|
||||
### Features Added
|
||||
* Add support for null map and slice
|
||||
* Export `Response.Payload` method
|
||||
|
||||
### Breaking Changes
|
||||
* remove `Response.UnmarshalError` as it's no longer required
|
||||
|
||||
|
||||
## 0.14.5 (2021-04-23)
|
||||
### Features Added
|
||||
* Add `UnmarshalError()` on `azcore.Response`
|
||||
|
||||
|
||||
## 0.14.4 (2021-04-22)
|
||||
### Features Added
|
||||
* Support for basic LRO polling
|
||||
* Added type `LROPoller` and supporting types for basic polling on long running operations.
|
||||
* rename poller param and added doc comment
|
||||
|
||||
### Bugs Fixed
|
||||
* Fixed content type detection bug in logging.
|
||||
|
||||
|
||||
## 0.14.3 (2021-03-29)
|
||||
### Features Added
|
||||
* Add support for multi-part form data
|
||||
* Added method `WriteMultipartFormData()` to Request.
|
||||
|
||||
|
||||
## 0.14.2 (2021-03-17)
|
||||
### Features Added
|
||||
* Add support for encoding JSON null values
|
||||
* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null.
|
||||
* Documentation fixes
|
||||
|
||||
### Bugs Fixed
|
||||
* Fixed improper error wrapping
|
||||
|
||||
|
||||
## 0.14.1 (2021-02-08)
|
||||
### Features Added
|
||||
* Add `Pager` and `Poller` interfaces to azcore
|
||||
|
||||
|
||||
## 0.14.0 (2021-01-12)
|
||||
### Features Added
|
||||
* Accept zero-value options for default values
|
||||
* Specify zero-value options structs to accept default values.
|
||||
* Remove `DefaultXxxOptions()` methods.
|
||||
* Do not silently change TryTimeout on negative values
|
||||
* make per-try timeout opt-in
|
||||
|
||||
|
||||
## 0.13.4 (2020-11-20)
|
||||
### Features Added
|
||||
* Include telemetry string in User Agent
|
||||
|
||||
|
||||
## 0.13.3 (2020-11-20)
|
||||
### Features Added
|
||||
* Updating response body handling on `azcore.Response`
|
||||
|
||||
|
||||
## 0.13.2 (2020-11-13)
|
||||
### Features Added
|
||||
* Remove implementation of stateless policies as first-class functions.
|
||||
|
||||
|
||||
## 0.13.1 (2020-11-05)
|
||||
### Features Added
|
||||
* Add `Telemetry()` method to `azcore.Request()`
|
||||
|
||||
|
||||
## 0.13.0 (2020-10-14)
|
||||
### Features Added
|
||||
* Rename `log` to `logger` to avoid name collision with the log package.
|
||||
* Documentation improvements
|
||||
* Simplified `DefaultHTTPClientTransport()` implementation
|
||||
|
||||
|
||||
## 0.12.1 (2020-10-13)
|
||||
### Features Added
|
||||
* Update `internal` module dependence to `v0.5.0`
|
||||
|
||||
|
||||
## 0.12.0 (2020-10-08)
|
||||
### Features Added
|
||||
* Removed storage specific content
|
||||
* Removed internal content to prevent API clutter
|
||||
* Refactored various policy options to conform with our options pattern
|
||||
|
||||
|
||||
## 0.11.0 (2020-09-22)
|
||||
### Features Added
|
||||
|
||||
* Removed `LogError` and `LogSlowResponse`.
|
||||
* Renamed `options` in `RequestLogOptions`.
|
||||
* Updated `NewRequestLogPolicy()` to follow standard pattern for options.
|
||||
* Refactored `requestLogPolicy.Do()` per above changes.
|
||||
* Cleaned up/added logging in retry policy.
|
||||
* Export `NewResponseError()`
|
||||
* Fix `RequestLogOptions` comment
|
||||
|
||||
|
||||
## 0.10.1 (2020-09-17)
|
||||
### Features Added
|
||||
* Add default console logger
|
||||
* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'.
|
||||
* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks.
|
||||
* Add `LogLongRunningOperation`
|
||||
|
||||
|
||||
## 0.10.0 (2020-09-10)
|
||||
### Features Added
|
||||
* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library.
|
||||
* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter.
|
||||
* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`.
|
||||
* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request.
|
||||
* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this.
|
||||
* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type.
|
||||
* moved path concatenation into `JoinPaths()` func
|
||||
|
||||
|
||||
## 0.9.6 (2020-08-18)
|
||||
### Features Added
|
||||
* Improvements to body download policy
|
||||
* Always download the response body for error responses, i.e. HTTP status codes >= 400.
|
||||
* Simplify variable declarations
|
||||
|
||||
|
||||
## 0.9.5 (2020-08-11)
|
||||
### Features Added
|
||||
* Set the Content-Length header in `Request.SetBody`
|
||||
|
||||
|
||||
## 0.9.4 (2020-08-03)
|
||||
### Features Added
|
||||
* Fix cancellation of per try timeout
|
||||
* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time.
|
||||
* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion.
|
||||
* Do not drain response body if there are no more retries
|
||||
* Do not retry non-idempotent operations when body download fails
|
||||
|
||||
|
||||
## 0.9.3 (2020-07-28)
|
||||
### Features Added
|
||||
* Add support for custom HTTP request headers
|
||||
* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request.
|
||||
* Use `azcore.WithHTTPHeader` to add HTTP headers to a context.
|
||||
* Remove method specific to Go 1.14
|
||||
|
||||
|
||||
## 0.9.2 (2020-07-28)
|
||||
### Features Added
|
||||
* Omit read-only content from request payloads
|
||||
* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation.
|
||||
* Verify no fields were dropped
|
||||
* Handle embedded struct types
|
||||
* Added test for cloning by value
|
||||
* Add messages to failures
|
||||
|
||||
|
||||
## 0.9.1 (2020-07-22)
|
||||
### Features Added
|
||||
* Updated dependency on internal module to fix race condition.
|
||||
|
||||
|
||||
## 0.9.0 (2020-07-09)
|
||||
### Features Added
|
||||
* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure.
|
||||
* Updated `sdk/internal` dependency to latest version.
|
||||
* Rename package alias
|
||||
|
||||
|
||||
## 0.8.2 (2020-06-29)
|
||||
### Features Added
|
||||
* Added missing documentation comments
|
||||
|
||||
### Bugs Fixed
|
||||
* Fixed a bug in body download policy.
|
||||
|
||||
|
||||
## 0.8.1 (2020-06-26)
|
||||
### Features Added
|
||||
* Miscellaneous clean-up reported by linters
|
||||
|
||||
|
||||
## 0.8.0 (2020-06-01)
|
||||
### Features Added
|
||||
* Differentiate between standard and URL encoding.
|
||||
|
||||
|
||||
## 0.7.1 (2020-05-27)
|
||||
### Features Added
|
||||
* Add support for for base64 encoding and decoding of payloads.
|
||||
|
||||
|
||||
## 0.7.0 (2020-05-12)
|
||||
### Features Added
|
||||
* Change `RetryAfter()` to a function.
|
||||
|
||||
|
||||
## 0.6.0 (2020-04-29)
|
||||
### Features Added
|
||||
* Updating `RetryAfter` to only return the detaion in the RetryAfter header
|
||||
|
||||
|
||||
## 0.5.0 (2020-03-23)
|
||||
### Features Added
|
||||
* Export `TransportFunc`
|
||||
|
||||
### Breaking Changes
|
||||
* Removed `IterationDone`
|
||||
|
||||
|
||||
## 0.4.1 (2020-02-25)
|
||||
### Features Added
|
||||
* Ensure per-try timeout is properly cancelled
|
||||
* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy.
|
||||
* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed.
|
||||
* `Logger.Should()` will return false if no listener is set.
|
||||
|
||||
|
||||
## 0.4.0 (2020-02-18)
|
||||
### Features Added
|
||||
* Enable custom `RetryOptions` to be specified per API call
|
||||
* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call.
|
||||
* Remove 429 from the list of default HTTP status codes for retry.
|
||||
* Change StatusCodesForRetry to a slice so consumers can append to it.
|
||||
* Added support for retry-after in HTTP-date format.
|
||||
* Cleaned up some comments specific to storage.
|
||||
* Remove `Request.SetQueryParam()`
|
||||
* Renamed `MaxTries` to `MaxRetries`
|
||||
|
||||
## 0.3.0 (2020-01-16)
|
||||
### Features Added
|
||||
* Added `DefaultRetryOptions` to create initialized default options.
|
||||
|
||||
### Breaking Changes
|
||||
* Removed `Response.CheckStatusCode()`
|
||||
|
||||
|
||||
## 0.2.0 (2020-01-15)
|
||||
### Features Added
|
||||
* Add support for marshalling and unmarshalling JSON
|
||||
* Removed `Response.Payload` field
|
||||
* Exit early when unmarsahlling if there is no payload
|
||||
|
||||
|
||||
## 0.1.0 (2020-01-10)
|
||||
### Features Added
|
||||
* Initial release
|
21
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
39
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md
generated
vendored
Normal file
39
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
# Azure Core Client Module for Go
|
||||
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore)
|
||||
[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main)
|
||||
[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)
|
||||
|
||||
The `azcore` module provides a set of common interfaces and types for Go SDK client modules.
|
||||
These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html).
|
||||
|
||||
## Getting started
|
||||
|
||||
This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management.
|
||||
|
||||
Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency.
|
||||
To add the latest version to your `go.mod` file, execute the following command.
|
||||
|
||||
```bash
|
||||
go get github.com/Azure/azure-sdk-for-go/sdk/azcore
|
||||
```
|
||||
|
||||
General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore).
|
||||
|
||||
## Contributing
|
||||
This project welcomes contributions and suggestions. Most contributions require
|
||||
you to agree to a Contributor License Agreement (CLA) declaring that you have
|
||||
the right to, and actually do, grant us the rights to use your contribution.
|
||||
For details, visit [https://cla.microsoft.com](https://cla.microsoft.com).
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether
|
||||
you need to provide a CLA and decorate the PR appropriately (e.g., label,
|
||||
comment). Simply follow the instructions provided by the bot. You will only
|
||||
need to do this once across all repos using our CLA.
|
||||
|
||||
This project has adopted the
|
||||
[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information, see the
|
||||
[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
||||
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
|
||||
additional questions or comments.
|
29
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
generated
vendored
Normal file
29
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file.
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- feature/*
|
||||
- hotfix/*
|
||||
- release/*
|
||||
paths:
|
||||
include:
|
||||
- sdk/azcore/
|
||||
- eng/
|
||||
|
||||
pr:
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- feature/*
|
||||
- hotfix/*
|
||||
- release/*
|
||||
paths:
|
||||
include:
|
||||
- sdk/azcore/
|
||||
- eng/
|
||||
|
||||
stages:
|
||||
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
|
||||
parameters:
|
||||
ServiceDirectory: azcore
|
44
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go
generated
vendored
Normal file
44
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package cloud
|
||||
|
||||
var (
|
||||
// AzureChina contains configuration for Azure China.
|
||||
AzureChina = Configuration{
|
||||
ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{},
|
||||
}
|
||||
// AzureGovernment contains configuration for Azure Government.
|
||||
AzureGovernment = Configuration{
|
||||
ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{},
|
||||
}
|
||||
// AzurePublic contains configuration for Azure Public Cloud.
|
||||
AzurePublic = Configuration{
|
||||
ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{},
|
||||
}
|
||||
)
|
||||
|
||||
// ServiceName identifies a cloud service.
|
||||
type ServiceName string
|
||||
|
||||
// ResourceManager is a global constant identifying Azure Resource Manager.
|
||||
const ResourceManager ServiceName = "resourceManager"
|
||||
|
||||
// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager.
|
||||
type ServiceConfiguration struct {
|
||||
// Audience is the audience the client will request for its access tokens.
|
||||
Audience string
|
||||
// Endpoint is the service's base URL.
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// Configuration configures a cloud.
|
||||
type Configuration struct {
|
||||
// ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory.
|
||||
ActiveDirectoryAuthorityHost string
|
||||
// Services contains configuration for the cloud's services.
|
||||
Services map[ServiceName]ServiceConfiguration
|
||||
}
|
53
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go
generated
vendored
Normal file
53
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
/*
|
||||
Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds.
|
||||
|
||||
Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as
|
||||
"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other
|
||||
Azure Clouds to configure clients appropriately.
|
||||
|
||||
This package contains predefined configuration for well-known sovereign clouds such as Azure Government and
|
||||
Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For
|
||||
example, configuring a credential and ARM client for Azure Government:
|
||||
|
||||
opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment}
|
||||
cred, err := azidentity.NewDefaultAzureCredential(
|
||||
&azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
|
||||
)
|
||||
handle(err)
|
||||
|
||||
client, err := armsubscription.NewClient(
|
||||
cred, &arm.ClientOptions{ClientOptions: opts},
|
||||
)
|
||||
handle(err)
|
||||
|
||||
Applications deployed to a private cloud such as Azure Stack create a Configuration object with
|
||||
appropriate values:
|
||||
|
||||
c := cloud.Configuration{
|
||||
ActiveDirectoryAuthorityHost: "https://...",
|
||||
Services: map[cloud.ServiceName]cloud.ServiceConfiguration{
|
||||
cloud.ResourceManager: {
|
||||
Audience: "...",
|
||||
Endpoint: "https://...",
|
||||
},
|
||||
},
|
||||
}
|
||||
opts := azcore.ClientOptions{Cloud: c}
|
||||
|
||||
cred, err := azidentity.NewDefaultAzureCredential(
|
||||
&azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
|
||||
)
|
||||
handle(err)
|
||||
|
||||
client, err := armsubscription.NewClient(
|
||||
cred, &arm.ClientOptions{ClientOptions: opts},
|
||||
)
|
||||
handle(err)
|
||||
*/
|
||||
package cloud
|
75
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
generated
vendored
Normal file
75
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package azcore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||
)
|
||||
|
||||
// AccessToken represents an Azure service bearer access token with expiry information.
|
||||
type AccessToken struct {
|
||||
Token string
|
||||
ExpiresOn time.Time
|
||||
}
|
||||
|
||||
// TokenCredential represents a credential capable of providing an OAuth token.
|
||||
type TokenCredential interface {
|
||||
// GetToken requests an access token for the specified set of scopes.
|
||||
GetToken(ctx context.Context, options policy.TokenRequestOptions) (AccessToken, error)
|
||||
}
|
||||
|
||||
// holds sentinel values used to send nulls
|
||||
var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{}
|
||||
|
||||
// NullValue is used to send an explicit 'null' within a request.
|
||||
// This is typically used in JSON-MERGE-PATCH operations to delete a value.
|
||||
func NullValue[T any]() T {
|
||||
t := shared.TypeOfT[T]()
|
||||
v, found := nullables[t]
|
||||
if !found {
|
||||
var o reflect.Value
|
||||
if k := t.Kind(); k == reflect.Map {
|
||||
o = reflect.MakeMap(t)
|
||||
} else if k == reflect.Slice {
|
||||
// empty slices appear to all point to the same data block
|
||||
// which causes comparisons to become ambiguous. so we create
|
||||
// a slice with len/cap of one which ensures a unique address.
|
||||
o = reflect.MakeSlice(t, 1, 1)
|
||||
} else {
|
||||
o = reflect.New(t.Elem())
|
||||
}
|
||||
v = o.Interface()
|
||||
nullables[t] = v
|
||||
}
|
||||
// return the sentinel object
|
||||
return v.(T)
|
||||
}
|
||||
|
||||
// IsNullValue returns true if the field contains a null sentinel value.
|
||||
// This is used by custom marshallers to properly encode a null value.
|
||||
func IsNullValue[T any](v T) bool {
|
||||
// see if our map has a sentinel object for this *T
|
||||
t := reflect.TypeOf(v)
|
||||
if o, found := nullables[t]; found {
|
||||
o1 := reflect.ValueOf(o)
|
||||
v1 := reflect.ValueOf(v)
|
||||
// we found it; return true if v points to the sentinel object.
|
||||
// NOTE: maps and slices can only be compared to nil, else you get
|
||||
// a runtime panic. so we compare addresses instead.
|
||||
return o1.Pointer() == v1.Pointer()
|
||||
}
|
||||
// no sentinel object for this *t
|
||||
return false
|
||||
}
|
||||
|
||||
// ClientOptions contains configuration settings for a client's pipeline.
|
||||
type ClientOptions = policy.ClientOptions
|
257
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go
generated
vendored
Normal file
257
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,257 @@
|
|||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
// Copyright 2017 Microsoft Corporation. All rights reserved.
|
||||
// Use of this source code is governed by an MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients.
|
||||
|
||||
The middleware consists of three components.
|
||||
|
||||
- One or more Policy instances.
|
||||
- A Transporter instance.
|
||||
- A Pipeline instance that combines the Policy and Transporter instances.
|
||||
|
||||
# Implementing the Policy Interface
|
||||
|
||||
A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as
|
||||
a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share
|
||||
the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to
|
||||
avoid race conditions.
|
||||
|
||||
A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can
|
||||
perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers,
|
||||
and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request
|
||||
work, it must call the Next() method on the *policy.Request instance in order to pass the request to the
|
||||
next Policy in the chain.
|
||||
|
||||
When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance
|
||||
can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response
|
||||
body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response
|
||||
and error instances to its caller.
|
||||
|
||||
Template for implementing a stateless Policy:
|
||||
|
||||
type policyFunc func(*policy.Request) (*http.Response, error)
|
||||
|
||||
// Do implements the Policy interface on policyFunc.
|
||||
func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
|
||||
return pf(req)
|
||||
}
|
||||
|
||||
func NewMyStatelessPolicy() policy.Policy {
|
||||
return policyFunc(func(req *policy.Request) (*http.Response, error) {
|
||||
// TODO: mutate/process Request here
|
||||
|
||||
// forward Request to next Policy & get Response/error
|
||||
resp, err := req.Next()
|
||||
|
||||
// TODO: mutate/process Response/error here
|
||||
|
||||
// return Response/error to previous Policy
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
|
||||
Template for implementing a stateful Policy:
|
||||
|
||||
type MyStatefulPolicy struct {
|
||||
// TODO: add configuration/setting fields here
|
||||
}
|
||||
|
||||
// TODO: add initialization args to NewMyStatefulPolicy()
|
||||
func NewMyStatefulPolicy() policy.Policy {
|
||||
return &MyStatefulPolicy{
|
||||
// TODO: initialize configuration/setting fields here
|
||||
}
|
||||
}
|
||||
|
||||
func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
|
||||
// TODO: mutate/process Request here
|
||||
|
||||
// forward Request to next Policy & get Response/error
|
||||
resp, err := req.Next()
|
||||
|
||||
// TODO: mutate/process Response/error here
|
||||
|
||||
// return Response/error to previous Policy
|
||||
return resp, err
|
||||
}
|
||||
|
||||
# Implementing the Transporter Interface
|
||||
|
||||
The Transporter interface is responsible for sending the HTTP request and returning the corresponding
|
||||
HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter
|
||||
implementation uses a shared http.Client from the standard library.
|
||||
|
||||
The same stateful/stateless rules for Policy implementations apply to Transporter implementations.
|
||||
|
||||
# Using Policy and Transporter Instances Via a Pipeline
|
||||
|
||||
To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function.
|
||||
|
||||
func NewPipeline(transport Transporter, policies ...Policy) Pipeline
|
||||
|
||||
The specified Policy instances form a chain and are invoked in the order provided to NewPipeline
|
||||
followed by the Transporter.
|
||||
|
||||
Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method.
|
||||
|
||||
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
|
||||
|
||||
func (p Pipeline) Do(req *Request) (*http.Request, error)
|
||||
|
||||
The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter
|
||||
instances. The response/error is then sent through the same chain of Policy instances in reverse
|
||||
order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with
|
||||
TransportA.
|
||||
|
||||
pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC)
|
||||
|
||||
The flow of Request and Response looks like the following:
|
||||
|
||||
policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+
|
||||
|
|
||||
HTTP(S) endpoint
|
||||
|
|
||||
caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+
|
||||
|
||||
# Creating a Request Instance
|
||||
|
||||
The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also
|
||||
contains some internal state and provides various convenience methods. You create a Request instance
|
||||
by calling the runtime.NewRequest function:
|
||||
|
||||
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
|
||||
|
||||
If the Request should contain a body, call the SetBody method.
|
||||
|
||||
func (req *Request) SetBody(body ReadSeekCloser, contentType string) error
|
||||
|
||||
A seekable stream is required so that upon retry, the retry Policy instance can seek the stream
|
||||
back to the beginning before retrying the network request and re-uploading the body.
|
||||
|
||||
# Sending an Explicit Null
|
||||
|
||||
Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted.
|
||||
|
||||
{
|
||||
"delete-me": null
|
||||
}
|
||||
|
||||
This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as
|
||||
a means to resolve the ambiguity between a field to be excluded and its zero-value.
|
||||
|
||||
type Widget struct {
|
||||
Name *string `json:",omitempty"`
|
||||
Count *int `json:",omitempty"`
|
||||
}
|
||||
|
||||
In the above example, Name and Count are defined as pointer-to-type to disambiguate between
|
||||
a missing value (nil) and a zero-value (0) which might have semantic differences.
|
||||
|
||||
In a PATCH operation, any fields left as nil are to have their values preserved. When updating
|
||||
a Widget's count, one simply specifies the new value for Count, leaving Name nil.
|
||||
|
||||
To fulfill the requirement for sending a JSON null, the NullValue() function can be used.
|
||||
|
||||
w := Widget{
|
||||
Count: azcore.NullValue[*int](),
|
||||
}
|
||||
|
||||
This sends an explict "null" for Count, indicating that any current value for Count should be deleted.
|
||||
|
||||
# Processing the Response
|
||||
|
||||
When the HTTP response is received, the *http.Response is returned directly. Each Policy instance
|
||||
can inspect/mutate the *http.Response.
|
||||
|
||||
# Built-in Logging
|
||||
|
||||
To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program.
|
||||
|
||||
By default the logger writes to stderr. This can be customized by calling log.SetListener, providing
|
||||
a callback that writes to the desired location. Any custom logging implementation MUST provide its
|
||||
own synchronization to handle concurrent invocations.
|
||||
|
||||
See the docs for the log package for further details.
|
||||
|
||||
# Pageable Operations
|
||||
|
||||
Pageable operations return potentially large data sets spread over multiple GET requests. The result of
|
||||
each GET is a "page" of data consisting of a slice of items.
|
||||
|
||||
Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T].
|
||||
|
||||
func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse]
|
||||
|
||||
The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages
|
||||
and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked.
|
||||
|
||||
pager := widgetClient.NewListWidgetsPager(nil)
|
||||
for pager.More() {
|
||||
page, err := pager.NextPage(context.TODO())
|
||||
// handle err
|
||||
for _, widget := range page.Values {
|
||||
// process widget
|
||||
}
|
||||
}
|
||||
|
||||
# Long-Running Operations
|
||||
|
||||
Long-running operations (LROs) are operations consisting of an initial request to start the operation followed
|
||||
by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one
|
||||
of the following values.
|
||||
|
||||
- Succeeded - the LRO completed successfully
|
||||
- Failed - the LRO failed to complete
|
||||
- Canceled - the LRO was canceled
|
||||
|
||||
LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T].
|
||||
|
||||
func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error)
|
||||
|
||||
When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started.
|
||||
It does _not_ mean that the widget has been created or updated (or failed to be created/updated).
|
||||
|
||||
The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete,
|
||||
call the PollUntilDone() method.
|
||||
|
||||
poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil)
|
||||
// handle err
|
||||
result, err := poller.PollUntilDone(context.TODO(), nil)
|
||||
// handle err
|
||||
// use result
|
||||
|
||||
The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the
|
||||
context is canceled/timed out.
|
||||
|
||||
Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to
|
||||
this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation
|
||||
mechanism as required.
|
||||
|
||||
# Resume Tokens
|
||||
|
||||
Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to
|
||||
recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method.
|
||||
|
||||
token, err := poller.ResumeToken()
|
||||
// handle error
|
||||
|
||||
Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls
|
||||
to poller.Poll() might change the poller's state. In this case, a new token should be created.
|
||||
|
||||
After the token has been obtained, it can be used to recreate an instance of the originating poller.
|
||||
|
||||
poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{
|
||||
ResumeToken: token,
|
||||
})
|
||||
|
||||
When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken.
|
||||
|
||||
Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO
|
||||
BeginA() will result in an error.
|
||||
*/
|
||||
package azcore
|
14
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
generated
vendored
Normal file
14
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package azcore
|
||||
|
||||
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
|
||||
|
||||
// ResponseError is returned when a request is made to a service and
|
||||
// the service returns a non-success HTTP status code.
|
||||
// Use errors.As() to access this type in the error chain.
|
||||
type ResponseError = exported.ResponseError
|
48
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go
generated
vendored
Normal file
48
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package azcore
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ETag is a property used for optimistic concurrency during updates
|
||||
// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2
|
||||
// An ETag can be empty ("").
|
||||
type ETag string
|
||||
|
||||
// ETagAny is an ETag that represents everything, the value is "*"
|
||||
const ETagAny ETag = "*"
|
||||
|
||||
// Equals does a strong comparison of two ETags. Equals returns true when both
|
||||
// ETags are not weak and the values of the underlying strings are equal.
|
||||
func (e ETag) Equals(other ETag) bool {
|
||||
return !e.IsWeak() && !other.IsWeak() && e == other
|
||||
}
|
||||
|
||||
// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match
|
||||
// character-by-character, regardless of either or both being tagged as "weak".
|
||||
func (e ETag) WeakEquals(other ETag) bool {
|
||||
getStart := func(e1 ETag) int {
|
||||
if e1.IsWeak() {
|
||||
return 2
|
||||
}
|
||||
return 0
|
||||
}
|
||||
aStart := getStart(e)
|
||||
bStart := getStart(other)
|
||||
|
||||
aVal := e[aStart:]
|
||||
bVal := other[bStart:]
|
||||
|
||||
return aVal == bVal
|
||||
}
|
||||
|
||||
// IsWeak specifies whether the ETag is strong or weak.
|
||||
func (e ETag) IsWeak() bool {
|
||||
return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"")
|
||||
}
|
60
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
generated
vendored
Normal file
60
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package exported
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
|
||||
)
|
||||
|
||||
type nopCloser struct {
|
||||
io.ReadSeeker
|
||||
}
|
||||
|
||||
func (n nopCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
|
||||
// Exported as streaming.NopCloser().
|
||||
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
|
||||
return nopCloser{rs}
|
||||
}
|
||||
|
||||
// HasStatusCode returns true if the Response's status code is one of the specified values.
|
||||
// Exported as runtime.HasStatusCode().
|
||||
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, sc := range statusCodes {
|
||||
if resp.StatusCode == sc {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Payload reads and returns the response body or an error.
|
||||
// On a successful read, the response body is cached.
|
||||
// Subsequent reads will access the cached value.
|
||||
// Exported as runtime.Payload().
|
||||
func Payload(resp *http.Response) ([]byte, error) {
|
||||
// r.Body won't be a nopClosingBytesReader if downloading was skipped
|
||||
if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
bytesBody, err := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body = shared.NewNopClosingBytesReader(bytesBody)
|
||||
return bytesBody, nil
|
||||
}
|
97
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go
generated
vendored
Normal file
97
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package exported
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
)
|
||||
|
||||
// Policy represents an extensibility point for the Pipeline that can mutate the specified
|
||||
// Request and react to the received Response.
|
||||
// Exported as policy.Policy.
|
||||
type Policy interface {
|
||||
// Do applies the policy to the specified Request. When implementing a Policy, mutate the
|
||||
// request before calling req.Next() to move on to the next policy, and respond to the result
|
||||
// before returning to the caller.
|
||||
Do(req *Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
// Pipeline represents a primitive for sending HTTP requests and receiving responses.
|
||||
// Its behavior can be extended by specifying policies during construction.
|
||||
// Exported as runtime.Pipeline.
|
||||
type Pipeline struct {
|
||||
policies []Policy
|
||||
}
|
||||
|
||||
// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
|
||||
// Exported as policy.Transporter.
|
||||
type Transporter interface {
|
||||
// Do sends the HTTP request and returns the HTTP response or error.
|
||||
Do(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
// used to adapt a TransportPolicy to a Policy
|
||||
type transportPolicy struct {
|
||||
trans Transporter
|
||||
}
|
||||
|
||||
func (tp transportPolicy) Do(req *Request) (*http.Response, error) {
|
||||
if tp.trans == nil {
|
||||
return nil, errors.New("missing transporter")
|
||||
}
|
||||
resp, err := tp.trans.Do(req.Raw())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if resp == nil {
|
||||
// there was no response and no error (rare but can happen)
|
||||
// this ensures the retry policy will retry the request
|
||||
return nil, errors.New("received nil response")
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// NewPipeline creates a new Pipeline object from the specified Policies.
|
||||
// Not directly exported, but used as part of runtime.NewPipeline().
|
||||
func NewPipeline(transport Transporter, policies ...Policy) Pipeline {
|
||||
// transport policy must always be the last in the slice
|
||||
policies = append(policies, transportPolicy{trans: transport})
|
||||
return Pipeline{
|
||||
policies: policies,
|
||||
}
|
||||
}
|
||||
|
||||
// Do is called for each and every HTTP request. It passes the request through all
|
||||
// the Policy objects (which can transform the Request's URL/query parameters/headers)
|
||||
// and ultimately sends the transformed HTTP request over the network.
|
||||
func (p Pipeline) Do(req *Request) (*http.Response, error) {
|
||||
if req == nil {
|
||||
return nil, errors.New("request cannot be nil")
|
||||
}
|
||||
// check copied from Transport.roundTrip()
|
||||
for k, vv := range req.Raw().Header {
|
||||
if !httpguts.ValidHeaderFieldName(k) {
|
||||
if req.Raw().Body != nil {
|
||||
req.Raw().Body.Close()
|
||||
}
|
||||
return nil, fmt.Errorf("invalid header field name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
if req.Raw().Body != nil {
|
||||
req.Raw().Body.Close()
|
||||
}
|
||||
return nil, fmt.Errorf("invalid header field value %q for key %v", v, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
req.policies = p.policies
|
||||
return req.Next()
|
||||
}
|
156
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
generated
vendored
Normal file
156
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package exported
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
|
||||
)
|
||||
|
||||
// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
|
||||
// Don't use this type directly, use NewRequest() instead.
|
||||
// Exported as policy.Request.
|
||||
type Request struct {
|
||||
req *http.Request
|
||||
body io.ReadSeekCloser
|
||||
policies []Policy
|
||||
values opValues
|
||||
}
|
||||
|
||||
type opValues map[reflect.Type]interface{}
|
||||
|
||||
// Set adds/changes a value
|
||||
func (ov opValues) set(value interface{}) {
|
||||
ov[reflect.TypeOf(value)] = value
|
||||
}
|
||||
|
||||
// Get looks for a value set by SetValue first
|
||||
func (ov opValues) get(value interface{}) bool {
|
||||
v, ok := ov[reflect.ValueOf(value).Elem().Type()]
|
||||
if ok {
|
||||
reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v))
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// NewRequest creates a new Request with the specified input.
|
||||
// Exported as runtime.NewRequest().
|
||||
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if req.URL.Host == "" {
|
||||
return nil, errors.New("no Host in request URL")
|
||||
}
|
||||
if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") {
|
||||
return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme)
|
||||
}
|
||||
return &Request{req: req}, nil
|
||||
}
|
||||
|
||||
// Body returns the original body specified when the Request was created.
|
||||
func (req *Request) Body() io.ReadSeekCloser {
|
||||
return req.body
|
||||
}
|
||||
|
||||
// Raw returns the underlying HTTP request.
|
||||
func (req *Request) Raw() *http.Request {
|
||||
return req.req
|
||||
}
|
||||
|
||||
// Next calls the next policy in the pipeline.
|
||||
// If there are no more policies, nil and an error are returned.
|
||||
// This method is intended to be called from pipeline policies.
|
||||
// To send a request through a pipeline call Pipeline.Do().
|
||||
func (req *Request) Next() (*http.Response, error) {
|
||||
if len(req.policies) == 0 {
|
||||
return nil, errors.New("no more policies")
|
||||
}
|
||||
nextPolicy := req.policies[0]
|
||||
nextReq := *req
|
||||
nextReq.policies = nextReq.policies[1:]
|
||||
return nextPolicy.Do(&nextReq)
|
||||
}
|
||||
|
||||
// SetOperationValue adds/changes a mutable key/value associated with a single operation.
|
||||
func (req *Request) SetOperationValue(value interface{}) {
|
||||
if req.values == nil {
|
||||
req.values = opValues{}
|
||||
}
|
||||
req.values.set(value)
|
||||
}
|
||||
|
||||
// OperationValue looks for a value set by SetOperationValue().
|
||||
func (req *Request) OperationValue(value interface{}) bool {
|
||||
if req.values == nil {
|
||||
return false
|
||||
}
|
||||
return req.values.get(value)
|
||||
}
|
||||
|
||||
// SetBody sets the specified ReadSeekCloser as the HTTP request body.
|
||||
func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error {
|
||||
// Set the body and content length.
|
||||
size, err := body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size == 0 {
|
||||
body.Close()
|
||||
return nil
|
||||
}
|
||||
_, err = body.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Raw().GetBody = func() (io.ReadCloser, error) {
|
||||
_, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
|
||||
return body, err
|
||||
}
|
||||
// keep a copy of the original body. this is to handle cases
|
||||
// where req.Body is replaced, e.g. httputil.DumpRequest and friends.
|
||||
req.body = body
|
||||
req.req.Body = body
|
||||
req.req.ContentLength = size
|
||||
req.req.Header.Set(shared.HeaderContentType, contentType)
|
||||
req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
|
||||
return nil
|
||||
}
|
||||
|
||||
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
|
||||
func (req *Request) RewindBody() error {
|
||||
if req.body != nil {
|
||||
// Reset the stream back to the beginning and restore the body
|
||||
_, err := req.body.Seek(0, io.SeekStart)
|
||||
req.req.Body = req.body
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the request body.
|
||||
func (req *Request) Close() error {
|
||||
if req.body == nil {
|
||||
return nil
|
||||
}
|
||||
return req.body.Close()
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of the request with its context changed to ctx.
|
||||
func (req *Request) Clone(ctx context.Context) *Request {
|
||||
r2 := *req
|
||||
r2.req = req.req.Clone(ctx)
|
||||
return &r2
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue