mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
Merge branch 'master' into vmui/issue-5678/add-filters-logs
# Conflicts: # app/vmui/packages/vmui/src/pages/ExploreLogs/ExploreLogsBody/ExploreLogsBody.tsx # docs/CHANGELOG.md
This commit is contained in:
commit
cc63b9cbcf
859 changed files with 34247 additions and 13798 deletions
2
Makefile
2
Makefile
|
@ -466,7 +466,7 @@ benchmark-pure:
|
|||
vendor-update:
|
||||
go get -u -d ./lib/...
|
||||
go get -u -d ./app/...
|
||||
go mod tidy -compat=1.22
|
||||
go mod tidy -compat=1.21
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
|
|
169
README.md
169
README.md
|
@ -23,7 +23,8 @@ Documentation for the cluster version of VictoriaMetrics is available [here](htt
|
|||
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
|
||||
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
|
||||
If you have questions about VictoriaMetrics, then feel free asking them in the [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
|
||||
If you have questions about VictoriaMetrics, then feel free asking them in the [VictoriaMetrics community Slack chat](https://victoriametrics.slack.com/),
|
||||
you can join it via [Slack Inviter](https://slack.victoriametrics.com/).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://docs.victoriametrics.com/enterprise.html).
|
||||
|
@ -35,7 +36,7 @@ VictoriaMetrics is developed at a fast pace, so it is recommended to check the [
|
|||
and to perform [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
|
||||
[VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise/) provides long-term support lines of releases (LTS releases) -
|
||||
see [these docs](https://docs.victoriametrics.com/LTS-releases.md).
|
||||
see [these docs](https://docs.victoriametrics.com/lts-releases/).
|
||||
|
||||
VictoriaMetrics has achieved security certifications for Database Software Development and Software-Based Monitoring Services.
|
||||
We apply strict security measures in everything we do. See [Security page](https://victoriametrics.com/security/) for more details.
|
||||
|
@ -364,7 +365,8 @@ Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](
|
|||
|
||||
## vmui
|
||||
|
||||
VictoriaMetrics provides UI for query troubleshooting and exploration. The UI is available at `http://victoriametrics:8428/vmui`.
|
||||
VictoriaMetrics provides UI for query troubleshooting and exploration. The UI is available at `http://victoriametrics:8428/vmui`
|
||||
(or at `http://<vmselect>:8481/select/<accountID>/vmui/` in [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/cluster-victoriametrics/)).
|
||||
The UI allows exploring query results via graphs and tables. It also provides the following features:
|
||||
|
||||
- Explore:
|
||||
|
@ -910,9 +912,9 @@ VictoriaMetrics supports the following handlers from [Prometheus querying API](h
|
|||
|
||||
* [/api/v1/query](https://docs.victoriametrics.com/keyConcepts.html#instant-query)
|
||||
* [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
* [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers)
|
||||
* [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names)
|
||||
* [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values)
|
||||
* [/api/v1/series](https://docs.victoriametrics.com/url-examples/#apiv1series)
|
||||
* [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels)
|
||||
* [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues)
|
||||
* [/api/v1/status/tsdb](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats). See [these docs](#tsdb-stats) for details.
|
||||
* [/api/v1/targets](https://prometheus.io/docs/prometheus/latest/querying/api/#targets) - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for more details.
|
||||
* [/federate](https://prometheus.io/docs/prometheus/latest/federation/) - see [these docs](#federation) for more details.
|
||||
|
@ -1128,17 +1130,16 @@ as a service for your OS. A [snap package](https://snapcraft.io/victoriametrics)
|
|||
|
||||
## How to work with snapshots
|
||||
|
||||
VictoriaMetrics can create [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
for all the data stored under `-storageDataPath` directory.
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/create` in order to create an instant snapshot.
|
||||
The page will return the following JSON response:
|
||||
Send a request to `http://<victoriametrics-addr>:8428/snapshot/create` endpoint in order to create
|
||||
an [instant snapshot](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
The page returns the following JSON response on successful creation of snapshot:
|
||||
|
||||
```json
|
||||
{"status":"ok","snapshot":"<snapshot-name>"}
|
||||
```
|
||||
|
||||
Snapshots are created under `<-storageDataPath>/snapshots` directory, where `<-storageDataPath>`
|
||||
is the command-line flag value. Snapshots can be archived to backup storage at any time
|
||||
is the corresponding command-line flag value. Snapshots can be archived to backup storage at any time
|
||||
with [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||
|
||||
Snapshots consist of a mix of hard-links and soft-links to various files and directories inside `-storageDataPath`.
|
||||
|
@ -1150,20 +1151,32 @@ for more details. This adds some restrictions on what can be done with the conte
|
|||
- Do not copy subdirectories inside `<-storageDataPath>/snapshot` with `cp`, `rsync` or similar commands, since there are high chances
|
||||
that these commands won't copy some data stored in the snapshot. Prefer using [vmbackup](https://docs.victoriametrics.com/vmbackup.html) for making copies of snapshot data.
|
||||
|
||||
The `http://<victoriametrics-addr>:8428/snapshot/list` page contains the list of available snapshots.
|
||||
See also [snapshot troubleshooting](#snapshot-troubleshooting).
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete?snapshot=<snapshot-name>` in order
|
||||
to delete `<snapshot-name>` snapshot.
|
||||
The `http://<victoriametrics-addr>:8428/snapshot/list` endpoint returns the list of available snapshots.
|
||||
|
||||
Send a query to `http://<victoriametrics-addr>:8428/snapshot/delete?snapshot=<snapshot-name>` in order
|
||||
to delete the snapshot with `<snapshot-name>` name.
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete_all` in order to delete all the snapshots.
|
||||
|
||||
Steps for restoring from a snapshot:
|
||||
### How to restore from a snapshot
|
||||
|
||||
1. Stop VictoriaMetrics with `kill -INT`.
|
||||
1. Restore snapshot contents from backup with [vmrestore](https://docs.victoriametrics.com/vmrestore.html)
|
||||
to the directory pointed by `-storageDataPath`.
|
||||
1. Start VictoriaMetrics.
|
||||
|
||||
### Snapshot troubleshooting
|
||||
|
||||
Snapshot doesn't occupy disk space just after its' creation thanks to the [used approach](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
Old snapshots may start occupying additional disk space if they refer to old parts, which were already deleted during [background merge](#storage).
|
||||
That's why it is recommended deleting old snapshots after they are no longer needed in order to free up disk space used by old snapshots.
|
||||
This can be done either manually or automatically if the `-snapshotsMaxAge` command-line flag is set. Make sure that the backup process has enough time to complete
|
||||
when setting `-snapshotsMaxAge` command-line flag.
|
||||
|
||||
VictoriaMetrics exposes the current number of available snapshots via `vm_snapshots` metric at [`/metrics`](#monitoring) page.
|
||||
|
||||
## How to delete time series
|
||||
|
||||
Send a request to `http://<victoriametrics-addr>:8428/api/v1/admin/tsdb/delete_series?match[]=<timeseries_selector_for_delete>`,
|
||||
|
@ -1279,7 +1292,7 @@ where:
|
|||
* `unix_s` - unix seconds
|
||||
* `unix_ms` - unix milliseconds
|
||||
* `unix_ns` - unix nanoseconds
|
||||
* `rfc3339` - [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) time
|
||||
* `rfc3339` - [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) time (in the timezone of the server)
|
||||
* `custom:<layout>` - custom layout for time that is supported by [time.Format](https://golang.org/pkg/time/#Time.Format) function from Go.
|
||||
|
||||
* `<timeseries_selector_for_export>` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)
|
||||
|
@ -1523,10 +1536,10 @@ VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-sc
|
|||
|
||||
### Sending data via OpenTelemetry
|
||||
|
||||
VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentelemetry/api/v1/push` path.
|
||||
VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentelemetry/v1/metrics` path.
|
||||
|
||||
VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`.
|
||||
Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`.
|
||||
VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/v1/metrics`.
|
||||
Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/v1/metrics`.
|
||||
|
||||
## JSON line format
|
||||
|
||||
|
@ -1651,21 +1664,68 @@ See also [resource usage limits docs](#resource-usage-limits).
|
|||
|
||||
By default, VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
||||
|
||||
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
||||
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query. Queries, which need more memory, are rejected. Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
|
||||
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
||||
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM when executing unexpected heavy queries.
|
||||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries. See also `-search.maxMemoryPerQuery` command-line flag.
|
||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics.
|
||||
Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
||||
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query. Queries, which need more memory, are rejected.
|
||||
Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit
|
||||
for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
|
||||
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory
|
||||
some metainformation about the time series located by each query and spends some CPU time for processing the found time series.
|
||||
This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
||||
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled.
|
||||
This allows saving CPU and RAM when executing unexpected heavy queries.
|
||||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means
|
||||
bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB`
|
||||
of additional memory. So it is better to limit the number of concurrent queries, while pausing additional incoming queries if the concurrency limit is reached.
|
||||
VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for paused queries. See also `-search.maxMemoryPerQuery` command-line flag.
|
||||
- `-search.maxQueueDuration` limits the maximum duration queries may wait for execution when `-search.maxConcurrentRequests` concurrent queries are executed.
|
||||
- `-search.ignoreExtraFiltersAtLabelsAPI` enables ignoring of `match[]`, [`extra_filters[]` and `extra_label`](https://docs.victoriametrics.com/#prometheus-querying-api-enhancements)
|
||||
query args at [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels) and
|
||||
[/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues).
|
||||
This may be useful for reducing the load on VictoriaMetrics if the provided extra filters match too many time series.
|
||||
The downside is that the endpoints can return labels and series, which do not match the provided extra filters.
|
||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes
|
||||
raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory
|
||||
and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag
|
||||
allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
||||
- `-search.maxResponseSeries` limits the number of time series a single query can return from [`/api/v1/query`](https://docs.victoriametrics.com/keyConcepts.html#instant-query)
|
||||
and [`/api/v1/query_range`](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsSubqueryPerTimeseries` limits the number of calculated points, which can be generated per each matching time series during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation.
|
||||
- `-search.maxSeriesPerAggrFunc` limits the number of time series, which can be generated by [MetricsQL aggregate functions](https://docs.victoriametrics.com/MetricsQL.html#aggregate-functions) in a single query.
|
||||
- `-search.maxSeries` limits the number of time series, which may be returned from [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers). This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagValues` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series
|
||||
from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsSubqueryPerTimeseries` limits the number of calculated points, which can be generated per each matching time series
|
||||
during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation.
|
||||
- `-search.maxSeriesPerAggrFunc` limits the number of time series, which can be generated by [MetricsQL aggregate functions](https://docs.victoriametrics.com/MetricsQL.html#aggregate-functions)
|
||||
in a single query.
|
||||
- `-search.maxSeries` limits the number of time series, which may be returned from [/api/v1/series](https://docs.victoriametrics.com/url-examples/#apiv1series).
|
||||
This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts
|
||||
of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPIDuration` and `-search.maxLabelsAPISeries`.
|
||||
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels).
|
||||
This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory
|
||||
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPIDuration` and `-search.maxLabelsAPISeries`.
|
||||
- `-search.maxTagValues` limits the number of items, which may be returned from [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues).
|
||||
This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory
|
||||
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPIDuration` and `-search.maxLabelsAPISeries`.
|
||||
- `-search.maxLabelsAPISeries` limits the number of time series, which can be scanned when performing [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels),
|
||||
[/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues)
|
||||
or [/api/v1/series](https://docs.victoriametrics.com/url-examples/#apiv1series) requests.
|
||||
These endpoints are used mostly by Grafana for auto-completion of label names and label values. Queries to these endpoints may take big amounts of CPU time and memory
|
||||
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxLabelsAPISeries` to quite low value in order to limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPIDuration` and `-search.ignoreExtraFiltersAtLabelsAPI`.
|
||||
- `-search.maxLabelsAPIDuration` limits the duration for requests to [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels),
|
||||
[/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues)
|
||||
or [/api/v1/series](https://docs.victoriametrics.com/url-examples/#apiv1series).
|
||||
These endpoints are used mostly by Grafana for auto-completion of label names and label values. Queries to these endpoints may take big amounts of CPU time and memory
|
||||
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxLabelsAPIDuration` to quite low value in order to limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPISeries` and `-search.ignoreExtraFiltersAtLabelsAPI`.
|
||||
- `-search.maxTagValueSuffixesPerSearch` limits the number of entries, which may be returned from `/metrics/find` endpoint. See [Graphite Metrics API usage docs](#graphite-metrics-api-usage).
|
||||
|
||||
See also [resource usage limits at VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#resource-usage-limits),
|
||||
|
@ -1726,7 +1786,7 @@ This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/
|
|||
If multiple raw samples have **the same timestamp** on the given `-dedup.minScrapeInterval` discrete interval,
|
||||
then the sample with **the biggest value** is kept.
|
||||
|
||||
[Prometheus stalenes markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are processed as any other value during de-duplication.
|
||||
[Prometheus staleness markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are processed as any other value during de-duplication.
|
||||
If raw sample with the biggest timestamp on `-dedup.minScrapeInterval` contains a stale marker, then it is kept after the deduplication.
|
||||
This allows properly preserving staleness markers during the de-duplication.
|
||||
|
||||
|
@ -1752,6 +1812,12 @@ so the de-duplication consistently leaves samples for one `vmagent` instance and
|
|||
from other `vmagent` instances.
|
||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||
|
||||
VictoriaMetrics stores all the ingested samples to disk even if `-dedup.minScrapeInterval` command-line flag is set.
|
||||
The ingested samples are de-duplicated during [background merges](#storage) and during query execution.
|
||||
VictoriaMetrics also supports de-duplication during data ingestion before the data is stored to disk, via `-streamAggr.dedupInterval` command-line flag -
|
||||
see [these docs](https://docs.victoriametrics.com/stream-aggregation/#deduplication).
|
||||
|
||||
|
||||
## Storage
|
||||
|
||||
VictoriaMetrics buffers the ingested data in memory for up to a second. Then the buffered data is written to in-memory `parts`,
|
||||
|
@ -2259,7 +2325,10 @@ and [cardinality explorer docs](#cardinality-explorer).
|
|||
|
||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html).
|
||||
See also:
|
||||
|
||||
- [Snapshot troubleshooting](#snapshot-troubleshooting).
|
||||
- [General troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html).
|
||||
|
||||
## Push metrics
|
||||
|
||||
|
@ -2488,7 +2557,7 @@ Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics
|
|||
|
||||
Feel free asking any questions regarding VictoriaMetrics:
|
||||
|
||||
* [Slack](https://slack.victoriametrics.com/)
|
||||
* [Slack Inviter](https://slack.victoriametrics.com/) and [Slack channel](https://victoriametrics.slack.com/)
|
||||
* [Twitter](https://twitter.com/VictoriaMetrics/)
|
||||
* [Linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||
* [Reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
|
@ -2584,7 +2653,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-datadog.sanitizeMetricName
|
||||
Sanitize metric names for the ingested DataDog data to comply with DataDog behaviour described at https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics (default true)
|
||||
-dedup.minScrapeInterval duration
|
||||
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling
|
||||
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication
|
||||
-deleteAuthKey value
|
||||
authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries
|
||||
Flag value can be read from the given file when using -deleteAuthKey=file:///abs/path/to/file or -deleteAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -deleteAuthKey=http://host/path or -deleteAuthKey=https://host/path
|
||||
|
@ -2628,15 +2697,15 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-graphiteTrimTimestamp duration
|
||||
Trim timestamps for Graphite data to this duration. Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data (default 1s)
|
||||
-http.connTimeout duration
|
||||
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem
|
||||
Incoming connections to -httpListenAddr are closed after the configured timeout. This may help evenly spreading load among a cluster of services behind TCP-level load balancer. Zero value disables closing of incoming connections (default 2m0s)
|
||||
-http.disableResponseCompression
|
||||
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
|
||||
-http.header.csp default-src 'self'
|
||||
Value for 'Content-Security-Policy' header, recommended: default-src 'self'
|
||||
-http.header.csp string
|
||||
Value for 'Content-Security-Policy' header, recommended: "default-src 'self'"
|
||||
-http.header.frameOptions string
|
||||
Value for 'X-Frame-Options' header
|
||||
-http.header.hsts max-age=31536000; includeSubDomains
|
||||
Value for 'Strict-Transport-Security' header, recommended: max-age=31536000; includeSubDomains
|
||||
-http.header.hsts string
|
||||
Value for 'Strict-Transport-Security' header, recommended: 'max-age=31536000; includeSubDomains'
|
||||
-http.idleConnTimeout duration
|
||||
Timeout for incoming idle http connections (default 1m0s)
|
||||
-http.maxGracefulShutdownDuration duration
|
||||
|
@ -2719,7 +2788,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-loggerWarnsPerSecondLimit int
|
||||
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
|
||||
-maxConcurrentInserts int
|
||||
The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 32)
|
||||
The maximum number of concurrent insert requests. Default value depends on the number of CPU cores and should work for most cases since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration
|
||||
-maxInsertRequestSize size
|
||||
The maximum size in bytes of a single Prometheus remote_write API request
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
|
||||
|
@ -2907,6 +2976,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
The maximum number of points per series Graphite render API can return (default 1000000)
|
||||
-search.graphiteStorageStep duration
|
||||
The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overridden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s)
|
||||
-search.ignoreExtraFiltersAtLabelsAPI
|
||||
Whether to ignore match[], extra_filters[] and extra_label query args at /api/v1/labels and /api/v1/label/.../values . This may be useful for decreasing load on VictoriaMetrics when extra filters match too many time series. The downside is that suprflouos labels or series could be returned, which do not match the extra filters. See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration
|
||||
-search.latencyOffset duration
|
||||
The time when data points become visible in query results after the collection. It can be overridden on per-query basis via latency_offset arg. Too small value can result in incomplete last points for query results (default 30s)
|
||||
-search.logQueryMemoryUsage size
|
||||
|
@ -2928,6 +2999,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
The maximum number of tag keys returned from Graphite API, which returns tags. See https://docs.victoriametrics.com/#graphite-tags-api-usage (default 100000)
|
||||
-search.maxGraphiteTagValues int
|
||||
The maximum number of tag values returned from Graphite API, which returns tag values. See https://docs.victoriametrics.com/#graphite-tags-api-usage (default 100000)
|
||||
-search.maxLabelsAPIDuration duration
|
||||
The maximum duration for /api/v1/labels, /api/v1/label/.../values and /api/v1/series requests. See also -search.maxLabelsAPISeries and -search.ignoreExtraFiltersAtLabelsAPI (default 5s)
|
||||
-search.maxLabelsAPISeries int
|
||||
The maximum number of time series, which could be scanned when searching for the the matching time series at /api/v1/labels and /api/v1/label/.../values. This option allows limiting memory usage and CPU usage. See also -search.maxLabelsAPIDuration, -search.maxTagKeys, -search.maxTagValues and -search.ignoreExtraFiltersAtLabelsAPI (default 1000000)
|
||||
-search.maxLookback duration
|
||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaning due to historical reasons
|
||||
-search.maxMemoryPerQuery size
|
||||
|
@ -2963,11 +3038,11 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-search.maxTSDBStatusSeries int
|
||||
The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage (default 10000000)
|
||||
-search.maxTagKeys int
|
||||
The maximum number of tag keys returned from /api/v1/labels (default 100000)
|
||||
The maximum number of tag keys returned from /api/v1/labels . See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration (default 100000)
|
||||
-search.maxTagValueSuffixesPerSearch int
|
||||
The maximum number of tag value suffixes returned from /metrics/find (default 100000)
|
||||
-search.maxTagValues int
|
||||
The maximum number of tag values returned from /api/v1/label/<label_name>/values (default 100000)
|
||||
The maximum number of tag values returned from /api/v1/label/<label_name>/values . See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration (default 100000)
|
||||
-search.maxUniqueTimeseries int
|
||||
The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage (default 300000)
|
||||
-search.maxWorkersPerQuery int
|
||||
|
@ -3004,7 +3079,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
authKey, which must be passed in query string to /snapshot* pages
|
||||
Flag value can be read from the given file when using -snapshotAuthKey=file:///abs/path/to/file or -snapshotAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -snapshotAuthKey=http://host/path or -snapshotAuthKey=https://host/path
|
||||
-snapshotCreateTimeout duration
|
||||
The timeout for creating new snapshot. If set, make sure that timeout is lower than backup period
|
||||
Deprecated: this flag does nothing
|
||||
-snapshotsMaxAge value
|
||||
Automatically delete snapshots older than -snapshotsMaxAge if it is set to non-zero duration. Make sure that backup process has enough time to finish the backup before the corresponding snapshot is automatically deleted
|
||||
The following optional suffixes are supported: s (second), m (minute), h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 0)
|
||||
|
@ -3034,9 +3109,13 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-streamAggr.config string
|
||||
Optional path to file with stream aggregation config. See https://docs.victoriametrics.com/stream-aggregation.html . See also -streamAggr.keepInput, -streamAggr.dropInput and -streamAggr.dedupInterval
|
||||
-streamAggr.dedupInterval duration
|
||||
Input samples are de-duplicated with this interval before being aggregated. Only the last sample per each time series per each interval is aggregated if the interval is greater than zero
|
||||
Input samples are de-duplicated with this interval before optional aggregation with -streamAggr.config . See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication
|
||||
-streamAggr.dropInput
|
||||
Whether to drop all the input samples after the aggregation with -streamAggr.config. By default, only aggregated samples are dropped, while the remaining samples are stored in the database. See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html
|
||||
-streamAggr.dropInputLabels array
|
||||
An optional list of labels to drop from samples before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-streamAggr.keepInput
|
||||
Whether to keep all the input samples after the aggregation with -streamAggr.config. By default, only aggregated samples are dropped, while the remaining samples are stored in the database. See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html
|
||||
-tls array
|
||||
|
|
|
@ -7,8 +7,8 @@ The following versions of VictoriaMetrics receive regular security fixes:
|
|||
| Version | Supported |
|
||||
|---------|--------------------|
|
||||
| [latest release](https://docs.victoriametrics.com/CHANGELOG.html) | :white_check_mark: |
|
||||
| v1.97.x [LTS line](https://docs.victoriametrics.com/LTS-releases.html) | :white_check_mark: |
|
||||
| v1.93.x [LTS line](https://docs.victoriametrics.com/LTS-releases.html) | :white_check_mark: |
|
||||
| v1.97.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| v1.93.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| other releases | :x: |
|
||||
|
||||
See [this page](https://victoriametrics.com/security/) for more details.
|
||||
|
|
|
@ -31,7 +31,7 @@ var (
|
|||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
|
||||
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
|
||||
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running VictoriaMetrics. The following config files are checked: "+
|
||||
"-promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. "+
|
||||
"This can be changed with -promscrape.config.strictParse=false command-line flag")
|
||||
|
|
|
@ -39,11 +39,13 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testImportCSVWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/import/csv"
|
||||
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -56,14 +58,15 @@ var (
|
|||
)
|
||||
|
||||
type test struct {
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
InsertQuery string `json:"insert_query"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
Issue string `json:"issue"`
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
InsertQuery string `json:"insert_query"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
Issue string `json:"issue"`
|
||||
ExpectedResultLinesCount int `json:"expected_result_lines_count"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
|
@ -261,6 +264,14 @@ func testWrite(t *testing.T) {
|
|||
httpWrite(t, testPromWriteHTTPPath, test.InsertQuery, bytes.NewBuffer(data))
|
||||
}
|
||||
})
|
||||
t.Run("csv", func(t *testing.T) {
|
||||
for _, test := range readIn("csv", t, insertionTime) {
|
||||
if test.Data == nil {
|
||||
continue
|
||||
}
|
||||
httpWrite(t, testImportCSVWriteHTTPPath, test.InsertQuery, bytes.NewBuffer([]byte(strings.Join(test.Data, "\n"))))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("influxdb", func(t *testing.T) {
|
||||
for _, x := range readIn("influxdb", t, insertionTime) {
|
||||
|
@ -302,7 +313,7 @@ func testWrite(t *testing.T) {
|
|||
}
|
||||
|
||||
func testRead(t *testing.T) {
|
||||
for _, engine := range []string{"prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
for _, engine := range []string{"csv", "prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
t.Run(engine, func(t *testing.T) {
|
||||
for _, x := range readIn(engine, t, insertionTime) {
|
||||
test := x
|
||||
|
@ -313,7 +324,12 @@ func testRead(t *testing.T) {
|
|||
if test.Issue != "" {
|
||||
test.Issue = "\nRegression in " + test.Issue
|
||||
}
|
||||
switch true {
|
||||
switch {
|
||||
case strings.HasPrefix(q, "/api/v1/export/csv"):
|
||||
data := strings.Split(string(httpReadData(t, testReadHTTPPath, q)), "\n")
|
||||
if len(data) == test.ExpectedResultLinesCount {
|
||||
t.Fatalf("not expected number of csv lines want=%d\ngot=%d test=%s.%s\n\response=%q", len(data), test.ExpectedResultLinesCount, q, test.Issue, strings.Join(data, "\n"))
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/export"):
|
||||
if err := checkMetricsResult(httpReadMetrics(t, testReadHTTPPath, q), test.ResultMetrics); err != nil {
|
||||
t.Fatalf("Export. %s fails with error %s.%s", q, err, test.Issue)
|
||||
|
@ -427,6 +443,20 @@ func httpReadStruct(t *testing.T, address, query string, dst interface{}) {
|
|||
s.noError(json.NewDecoder(resp.Body).Decode(dst))
|
||||
}
|
||||
|
||||
func httpReadData(t *testing.T, address, query string) []byte {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Get(address + query)
|
||||
s.noError(err)
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
s.equalInt(resp.StatusCode, 200)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
s.noError(err)
|
||||
return data
|
||||
}
|
||||
|
||||
func checkMetricsResult(got, want []Metric) error {
|
||||
for _, r := range append([]Metric(nil), got...) {
|
||||
want = removeIfFoundMetrics(r, want)
|
||||
|
|
14
app/victoria-metrics/testdata/csv/basic.json
vendored
Normal file
14
app/victoria-metrics/testdata/csv/basic.json
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"name": "csv export",
|
||||
"data": [
|
||||
"rfc3339,4,{TIME_MS}",
|
||||
"rfc3339milli,6,{TIME_MS}",
|
||||
"ts,8,{TIME_MS}",
|
||||
"tsms,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:tfmt,2:metric:test_csv,3:time:unix_ms",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,tfmt,__value__,__timestamp__:rfc3339&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
14
app/victoria-metrics/testdata/csv/with_extra_labels.json
vendored
Normal file
14
app/victoria-metrics/testdata/csv/with_extra_labels.json
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"name": "csv export with extra_labels",
|
||||
"data": [
|
||||
"location-1,4,{TIME_MS}",
|
||||
"location-2,6,{TIME_MS}",
|
||||
"location-3,8,{TIME_MS}",
|
||||
"location-4,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:location,2:metric:test_csv_labels,3:time:unix_ms&extra_label=location=location-1",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,location,__value__,__timestamp__:unix_ms&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
|
@ -17,13 +18,18 @@ var (
|
|||
)
|
||||
|
||||
// ProcessQueryRequest handles /select/logsql/query request
|
||||
func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan struct{}) {
|
||||
func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan struct{}, cancel func()) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQuery(qStr)
|
||||
|
@ -34,7 +40,7 @@ func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan s
|
|||
w.Header().Set("Content-Type", "application/stream+json; charset=utf-8")
|
||||
|
||||
sw := getSortWriter()
|
||||
sw.Init(w, maxSortBufferSize.IntN())
|
||||
sw.Init(w, maxSortBufferSize.IntN(), limit)
|
||||
tenantIDs := []logstorage.TenantID{tenantID}
|
||||
vlstorage.RunQuery(tenantIDs, q, stopCh, func(columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 {
|
||||
|
@ -46,7 +52,11 @@ func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan s
|
|||
for rowIdx := 0; rowIdx < rowsCount; rowIdx++ {
|
||||
WriteJSONRow(bb, columns, rowIdx)
|
||||
}
|
||||
sw.MustWrite(bb.B)
|
||||
|
||||
if !sw.TryWrite(bb.B) {
|
||||
cancel()
|
||||
}
|
||||
|
||||
blockResultPool.Put(bb)
|
||||
})
|
||||
sw.FinalFlush()
|
||||
|
|
|
@ -36,8 +36,12 @@ var sortWriterPool sync.Pool
|
|||
// If the buf isn't empty at FinalFlush() call, then the buffered data
|
||||
// is sorted by _time field.
|
||||
type sortWriter struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
|
||||
maxLines int
|
||||
linesWritten int
|
||||
|
||||
maxBufLen int
|
||||
buf []byte
|
||||
bufFlushed bool
|
||||
|
@ -47,58 +51,121 @@ type sortWriter struct {
|
|||
|
||||
func (sw *sortWriter) reset() {
|
||||
sw.w = nil
|
||||
|
||||
sw.maxLines = 0
|
||||
sw.linesWritten = 0
|
||||
|
||||
sw.maxBufLen = 0
|
||||
sw.buf = sw.buf[:0]
|
||||
sw.bufFlushed = false
|
||||
sw.hasErr = false
|
||||
}
|
||||
|
||||
func (sw *sortWriter) Init(w io.Writer, maxBufLen int) {
|
||||
// Init initializes sw.
|
||||
//
|
||||
// If maxLines is set to positive value, then sw accepts up to maxLines
|
||||
// and then rejects all the other lines by returning false from TryWrite.
|
||||
func (sw *sortWriter) Init(w io.Writer, maxBufLen, maxLines int) {
|
||||
sw.reset()
|
||||
|
||||
sw.w = w
|
||||
sw.maxBufLen = maxBufLen
|
||||
sw.maxLines = maxLines
|
||||
}
|
||||
|
||||
func (sw *sortWriter) MustWrite(p []byte) {
|
||||
// TryWrite writes p to sw.
|
||||
//
|
||||
// True is returned on successful write, false otherwise.
|
||||
//
|
||||
// Unsuccessful write may occur on underlying write error or when maxLines lines are already written to sw.
|
||||
func (sw *sortWriter) TryWrite(p []byte) bool {
|
||||
sw.mu.Lock()
|
||||
defer sw.mu.Unlock()
|
||||
|
||||
if sw.hasErr {
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
if sw.bufFlushed {
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
if !sw.writeToUnderlyingWriterLocked(p) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
if len(sw.buf)+len(p) < sw.maxBufLen {
|
||||
sw.buf = append(sw.buf, p...)
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
sw.bufFlushed = true
|
||||
if len(sw.buf) > 0 {
|
||||
if _, err := sw.w.Write(sw.buf); err != nil {
|
||||
sw.hasErr = true
|
||||
return
|
||||
if !sw.writeToUnderlyingWriterLocked(sw.buf) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
sw.buf = sw.buf[:0]
|
||||
|
||||
if !sw.writeToUnderlyingWriterLocked(p) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (sw *sortWriter) writeToUnderlyingWriterLocked(p []byte) bool {
|
||||
if len(p) == 0 {
|
||||
return true
|
||||
}
|
||||
if sw.maxLines > 0 {
|
||||
if sw.linesWritten >= sw.maxLines {
|
||||
return false
|
||||
}
|
||||
sw.buf = sw.buf[:0]
|
||||
var linesLeft int
|
||||
p, linesLeft = trimLines(p, sw.maxLines-sw.linesWritten)
|
||||
println("DEBUG: end trimLines", string(p), linesLeft)
|
||||
sw.linesWritten += linesLeft
|
||||
}
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func trimLines(p []byte, maxLines int) ([]byte, int) {
|
||||
println("DEBUG: start trimLines", string(p), maxLines)
|
||||
if maxLines <= 0 {
|
||||
return nil, 0
|
||||
}
|
||||
n := bytes.Count(p, newline)
|
||||
if n < maxLines {
|
||||
return p, n
|
||||
}
|
||||
for n >= maxLines {
|
||||
idx := bytes.LastIndexByte(p, '\n')
|
||||
p = p[:idx]
|
||||
n--
|
||||
}
|
||||
return p[:len(p)+1], maxLines
|
||||
}
|
||||
|
||||
var newline = []byte("\n")
|
||||
|
||||
func (sw *sortWriter) FinalFlush() {
|
||||
if sw.hasErr || sw.bufFlushed {
|
||||
return
|
||||
}
|
||||
|
||||
rs := getRowsSorter()
|
||||
rs.parseRows(sw.buf)
|
||||
rs.sort()
|
||||
WriteJSONRows(sw.w, rs.rows)
|
||||
|
||||
rows := rs.rows
|
||||
if sw.maxLines > 0 && len(rows) > sw.maxLines {
|
||||
rows = rows[:sw.maxLines]
|
||||
}
|
||||
WriteJSONRows(sw.w, rows)
|
||||
|
||||
putRowsSorter(rs)
|
||||
}
|
||||
|
||||
|
|
|
@ -7,15 +7,16 @@ import (
|
|||
)
|
||||
|
||||
func TestSortWriter(t *testing.T) {
|
||||
f := func(maxBufLen int, data string, expectedResult string) {
|
||||
f := func(maxBufLen, maxLines int, data string, expectedResult string) {
|
||||
t.Helper()
|
||||
|
||||
var bb bytes.Buffer
|
||||
sw := getSortWriter()
|
||||
sw.Init(&bb, maxBufLen)
|
||||
|
||||
sw.Init(&bb, maxBufLen, maxLines)
|
||||
for _, s := range strings.Split(data, "\n") {
|
||||
sw.MustWrite([]byte(s + "\n"))
|
||||
if !sw.TryWrite([]byte(s + "\n")) {
|
||||
break
|
||||
}
|
||||
}
|
||||
sw.FinalFlush()
|
||||
putSortWriter(sw)
|
||||
|
@ -26,14 +27,20 @@ func TestSortWriter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
f(100, "", "")
|
||||
f(100, "{}", "{}\n")
|
||||
f(100, 0, "", "")
|
||||
f(100, 0, "{}", "{}\n")
|
||||
|
||||
data := `{"_time":"def","_msg":"xxx"}
|
||||
{"_time":"abc","_msg":"foo"}`
|
||||
resultExpected := `{"_time":"abc","_msg":"foo"}
|
||||
{"_time":"def","_msg":"xxx"}
|
||||
`
|
||||
f(100, data, resultExpected)
|
||||
f(10, data, data+"\n")
|
||||
f(100, 0, data, resultExpected)
|
||||
f(10, 0, data, data+"\n")
|
||||
|
||||
// Test with the maxLines
|
||||
f(100, 1, data, `{"_time":"abc","_msg":"foo"}`+"\n")
|
||||
f(10, 1, data, `{"_time":"def","_msg":"xxx"}`+"\n")
|
||||
f(10, 2, data, data+"\n")
|
||||
f(100, 2, data, resultExpected)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package vlselect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
@ -101,7 +102,8 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
|
||||
// Limit the number of concurrent queries, which can consume big amounts of CPU.
|
||||
startTime := time.Now()
|
||||
stopCh := r.Context().Done()
|
||||
ctx := r.Context()
|
||||
stopCh := ctx.Done()
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
|
@ -139,11 +141,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
}
|
||||
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
stopCh = ctxWithCancel.Done()
|
||||
|
||||
switch {
|
||||
case path == "/logsql/query":
|
||||
logsqlQueryRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
logsql.ProcessQueryRequest(w, r, stopCh)
|
||||
logsql.ProcessQueryRequest(w, r, stopCh, cancel)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.1e22ee10.css",
|
||||
"main.js": "./static/js/main.92cf3903.js",
|
||||
"static/js/522.da77e7b3.chunk.js": "./static/js/522.da77e7b3.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.48b7b7105a48d7775f01.md",
|
||||
"main.css": "./static/css/main.bc07cc78.css",
|
||||
"main.js": "./static/js/main.53048302.js",
|
||||
"static/js/685.bebe1265.chunk.js": "./static/js/685.bebe1265.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.61a686c0661a23e4f2eb.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.1e22ee10.css",
|
||||
"static/js/main.92cf3903.js"
|
||||
"static/css/main.bc07cc78.css",
|
||||
"static/js/main.53048302.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.92cf3903.js"></script><link href="./static/css/main.1e22ee10.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.53048302.js"></script><link href="./static/css/main.bc07cc78.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/static/css/main.bc07cc78.css
Normal file
1
app/vlselect/vmui/static/css/main.bc07cc78.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/static/js/685.bebe1265.chunk.js
Normal file
1
app/vlselect/vmui/static/js/685.bebe1265.chunk.js
Normal file
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.53048302.js
Normal file
2
app/vlselect/vmui/static/js/main.53048302.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -4,10 +4,8 @@
|
|||
http://jedwatson.github.io/classnames
|
||||
*/
|
||||
|
||||
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
||||
|
||||
/**
|
||||
* @remix-run/router v1.10.0
|
||||
* @remix-run/router v1.15.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
|
@ -18,7 +16,7 @@
|
|||
*/
|
||||
|
||||
/**
|
||||
* React Router DOM v6.17.0
|
||||
* React Router DOM v6.22.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
|
@ -29,7 +27,7 @@
|
|||
*/
|
||||
|
||||
/**
|
||||
* React Router v6.17.0
|
||||
* React Router v6.22.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
File diff suppressed because one or more lines are too long
|
@ -26,12 +26,18 @@ and introduction into [basic querying via MetricsQL](https://docs.victoriametric
|
|||
|
||||
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
||||
|
||||
* MetricsQL takes into account the previous point before the window in square brackets for range functions such as [rate](#rate) and [increase](#increase).
|
||||
This allows returning the exact results users expect for `increase(metric[$__interval])` queries instead of incomplete results Prometheus returns for such queries.
|
||||
* MetricsQL doesn't extrapolate range function results. This addresses [this issue from Prometheus](https://github.com/prometheus/prometheus/issues/3746).
|
||||
* MetricsQL takes into account the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) before the lookbehind window
|
||||
in square brackets for [increase](#increase) and [rate](#rate) functions. This allows returning the exact results users expect for `increase(metric[$__interval])` queries
|
||||
instead of incomplete results Prometheus returns for such queries. Prometheus misses the increase between the last sample before the lookbehind window
|
||||
and the first sample inside the lookbehind window.
|
||||
* MetricsQL doesn't extrapolate [rate](#rate) and [increase](#increase) function results, so it always returns the expected results. For example, it returns
|
||||
integer results from `increase()` over slow-changing integer counter. Prometheus in this case returns unexpected fractional results,
|
||||
which may significantly differ from the expected results. This addresses [this issue from Prometheus](https://github.com/prometheus/prometheus/issues/3746).
|
||||
See technical details about VictoriaMetrics and Prometheus calculations for [rate](#rate)
|
||||
and [increase](#increase) [in this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1215#issuecomment-850305711).
|
||||
* MetricsQL returns the expected non-empty responses for [rate](#rate) with `step` values smaller than scrape interval.
|
||||
* MetricsQL returns the expected non-empty responses for [rate](#rate) function when Grafana or [vmui](https://docs.victoriametrics.com/#vmui)
|
||||
passes `step` values smaller than the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
This addresses [this issue from Grafana](https://github.com/grafana/grafana/issues/11451).
|
||||
See also [this blog post](https://www.percona.com/blog/2020/02/28/better-prometheus-rate-function-with-victoriametrics/).
|
||||
* MetricsQL treats `scalar` type the same as `instant vector` without labels, since subtle differences between these types usually confuse users.
|
||||
|
@ -61,13 +67,14 @@ The list of MetricsQL features on top of PromQL:
|
|||
|
||||
* Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax.
|
||||
See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics).
|
||||
VictoriaMetrics also can be used as Graphite datasource in Grafana.
|
||||
See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)).
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
and the real interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) (aka `scrape_interval`).
|
||||
For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`.
|
||||
It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
It is roughly equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
The difference is documented in [rate() docs](#rate).
|
||||
* Numeric values can contain `_` delimiters for better readability. For example, `1_234_567_890` can be used in queries instead of `1234567890`.
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
selects series with `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
|
@ -117,7 +124,8 @@ The list of MetricsQL features on top of PromQL:
|
|||
Go to [WITH templates playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/expand-with-exprs) and try it.
|
||||
* String literals may be concatenated. This is useful with `WITH` templates:
|
||||
`WITH (commonPrefix="long_metric_prefix_") {__name__=commonPrefix+"suffix1"} / {__name__=commonPrefix+"suffix2"}`.
|
||||
* `keep_metric_names` modifier can be applied to all the [rollup functions](#rollup-functions), [transform functions](#transform-functions) and [binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
|
||||
* `keep_metric_names` modifier can be applied to all the [rollup functions](#rollup-functions), [transform functions](#transform-functions)
|
||||
and [binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
|
||||
This modifier prevents from dropping metric names in function results. See [these docs](#keep_metric_names).
|
||||
|
||||
## keep_metric_names
|
||||
|
@ -155,14 +163,15 @@ Additional details:
|
|||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) returns multiple time series,
|
||||
then rollups are calculated individually per each returned series.
|
||||
* If lookbehind window in square brackets is missing, then MetricsQL automatically sets the lookbehind window
|
||||
to the interval between points on the graph (aka `step` query arg at [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query),
|
||||
`$__interval` value from Grafana or `1i` duration in MetricsQL).
|
||||
For example, `rate(http_requests_total)` is equivalent to `rate(http_requests_total[$__interval])` in Grafana.
|
||||
It is also equivalent to `rate(http_requests_total[1i])`.
|
||||
* If lookbehind window in square brackets is missing, then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
Otherwise, it is automatically wrapped into [default_rollup](#default_rollup). For example, `foo{bar="baz"}`
|
||||
is automatically converted to `default_rollup(foo{bar="baz"}[1i])` before performing the calculations.
|
||||
is automatically converted to `default_rollup(foo{bar="baz"})` before performing the calculations.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) is passed to rollup function,
|
||||
then the inner arg is automatically converted to a [subquery](#subqueries).
|
||||
* All the rollup functions accept optional `keep_metric_names` modifier. If it is set, then the function keeps metric names in results.
|
||||
|
@ -177,7 +186,9 @@ The list of supported rollup functions:
|
|||
`absent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1
|
||||
if the given lookbehind window `d` doesn't contain raw samples. Otherwise, it returns an empty result.
|
||||
|
||||
This function is supported by PromQL. See also [present_over_time](#present_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [present_over_time](#present_over_time).
|
||||
|
||||
#### aggr_over_time
|
||||
|
||||
|
@ -207,7 +218,9 @@ See also [descent_over_time](#descent_over_time).
|
|||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [median_over_time](#median_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [median_over_time](#median_over_time).
|
||||
|
||||
#### changes
|
||||
|
||||
|
@ -220,7 +233,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [changes_prometheus](#changes_prometheus).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [changes_prometheus](#changes_prometheus).
|
||||
|
||||
#### changes_prometheus
|
||||
|
||||
|
@ -233,7 +248,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [changes](#changes).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [changes](#changes).
|
||||
|
||||
#### count_eq_over_time
|
||||
|
||||
|
@ -243,7 +260,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_over_time](#count_over_time) and [share_eq_over_time](#share_eq_over_time).
|
||||
See also [count_over_time](#count_over_time), [share_eq_over_time](#share_eq_over_time) and [count_values_over_time](#count_values_over_time).
|
||||
|
||||
#### count_gt_over_time
|
||||
|
||||
|
@ -282,8 +299,19 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_gt_over_time),
|
||||
[count_eq_over_time](#count_eq_over_time) and [count_ne_over_time](#count_ne_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_gt_over_time), [count_eq_over_time](#count_eq_over_time) and [count_ne_over_time](#count_ne_over_time).
|
||||
|
||||
#### count_values_over_time
|
||||
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of raw samples
|
||||
with the same value over the given lookbehind window and stores the counts in a time series with an additional `label`, which contains each initial value.
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_eq_over_time](#count_eq_over_time), [count_values](#count_values) and [distinct_over_time](#distinct_over_time) and [label_match](#label_match).
|
||||
|
||||
#### decreases_over_time
|
||||
|
||||
|
@ -299,6 +327,11 @@ See also [increases_over_time](#increases_over_time).
|
|||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
while `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the selected time series.
|
||||
This allows avoiding unexpected gaps on the graph when `step` is smaller than the `scrape_interval`.
|
||||
|
||||
#### delta
|
||||
|
||||
`delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
|
@ -310,7 +343,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
|
||||
#### delta_prometheus
|
||||
|
||||
|
@ -333,7 +368,9 @@ The derivative is calculated using linear regression.
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
|
||||
#### deriv_fast
|
||||
|
||||
|
@ -364,6 +401,8 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_values_over_time](#count_values_over_time).
|
||||
|
||||
#### duration_over_time
|
||||
|
||||
`duration_over_time(series_selector[d], max_interval)` is a [rollup function](#rollup-functions), which returns the duration in seconds
|
||||
|
@ -423,7 +462,9 @@ over the given lookbehind window `d` using the given smoothing factor `sf` and t
|
|||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
|
||||
This function is supported by PromQL. See also [range_linear_regression](#range_linear_regression).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### idelta
|
||||
|
||||
|
@ -432,7 +473,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [delta](#delta).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [delta](#delta).
|
||||
|
||||
#### ideriv
|
||||
|
||||
|
@ -455,7 +498,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||
|
||||
#### increase_prometheus
|
||||
|
||||
|
@ -499,7 +544,9 @@ It is expected that the `series_selector` returns time series of [counter type](
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### lag
|
||||
|
||||
|
@ -516,7 +563,9 @@ See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
|
|||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_time).
|
||||
|
||||
#### lifetime
|
||||
|
||||
|
@ -539,7 +588,9 @@ See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outli
|
|||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [tmax_over_time](#tmax_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tmax_over_time](#tmax_over_time).
|
||||
|
||||
#### median_over_time
|
||||
|
||||
|
@ -554,7 +605,9 @@ See also [avg_over_time](#avg_over_time).
|
|||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [tmin_over_time](#tmin_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tmin_over_time](#tmin_over_time).
|
||||
|
||||
#### mode_over_time
|
||||
|
||||
|
@ -580,7 +633,9 @@ See also [outliers_iqr](#outliers_iqr).
|
|||
linear interpolation over raw samples on the given lookbehind window `d`. The predicted value is calculated individually per each time series
|
||||
returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [range_linear_regression](#range_linear_regression).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### present_over_time
|
||||
|
||||
|
@ -597,7 +652,9 @@ This function is supported by PromQL.
|
|||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The `phi` value must be in the range `[0...1]`.
|
||||
|
||||
This function is supported by PromQL. See also [quantiles_over_time](#quantiles_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [quantiles_over_time](#quantiles_over_time).
|
||||
|
||||
#### quantiles_over_time
|
||||
|
||||
|
@ -622,9 +679,16 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
while `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the selected time series.
|
||||
This allows avoiding unexpected gaps on the graph when `step` is smaller than the `scrape_interval`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### rate_over_sum
|
||||
|
||||
|
@ -652,6 +716,7 @@ on the given lookbehind window `d` and returns them in time series with `rollup=
|
|||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_candlestick
|
||||
|
||||
|
@ -660,7 +725,8 @@ over raw samples on the given lookbehind window `d` and returns them in time ser
|
|||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). This function is useful for financial applications.
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
Optional 2nd argument `"open"`, `"high"` or `"low"` or `"close"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_delta
|
||||
|
||||
|
@ -670,6 +736,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -683,6 +750,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -694,6 +762,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names. See also [rollup_delta](#rollup_delta).
|
||||
|
||||
|
@ -707,10 +776,10 @@ See [this article](https://valyala.medium.com/why-irate-from-prometheus-doesnt-c
|
|||
when to use `rollup_rate()`.
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### rollup_scrape_interval
|
||||
|
@ -721,6 +790,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names. See also [scrape_interval](#scrape_interval).
|
||||
|
||||
|
@ -783,7 +853,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [stdvar_over_time](#stdvar_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [stdvar_over_time](#stdvar_over_time).
|
||||
|
||||
#### stdvar_over_time
|
||||
|
||||
|
@ -792,7 +864,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [stddev_over_time](#stddev_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [stddev_over_time](#stddev_over_time).
|
||||
|
||||
#### sum_eq_over_time
|
||||
|
||||
|
@ -844,7 +918,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [timestamp_with_name](#timestamp_with_name).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [time](#time) and [now](#now).
|
||||
|
||||
#### timestamp_with_name
|
||||
|
||||
|
@ -853,7 +929,7 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are preserved in the resulting rollups.
|
||||
|
||||
See also [timestamp](#timestamp).
|
||||
See also [timestamp](#timestamp) and [keep_metric_names](#keep_metric_names) modifier.
|
||||
|
||||
#### tfirst_over_time
|
||||
|
||||
|
@ -920,7 +996,7 @@ Additional details:
|
|||
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the transformations.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature[1i]))`.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature))`.
|
||||
* All the transform functions accept optional `keep_metric_names` modifier. If it is set,
|
||||
then the function doesn't drop metric names from the resulting time series. See [these docs](#keep_metric_names).
|
||||
|
||||
|
@ -938,7 +1014,9 @@ This function is supported by PromQL.
|
|||
|
||||
`absent(q)` is a [transform function](#transform-functions), which returns 1 if `q` has no points. Otherwise, returns an empty result.
|
||||
|
||||
This function is supported by PromQL. See also [absent_over_time](#absent_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [absent_over_time](#absent_over_time).
|
||||
|
||||
#### acos
|
||||
|
||||
|
@ -947,7 +1025,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [asin](#asin) and [cos](#cos).
|
||||
|
||||
#### acosh
|
||||
|
||||
|
@ -956,7 +1036,9 @@ This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sinh](#cosh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sinh](#cosh).
|
||||
|
||||
#### asin
|
||||
|
||||
|
@ -965,7 +1047,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [acos](#acos) and [sin](#sin).
|
||||
|
||||
#### asinh
|
||||
|
||||
|
@ -974,7 +1058,9 @@ This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sinh](#sinh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sinh](#sinh).
|
||||
|
||||
#### atan
|
||||
|
||||
|
@ -983,7 +1069,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [tan](#tan).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tan](#tan).
|
||||
|
||||
#### atanh
|
||||
|
||||
|
@ -992,7 +1080,9 @@ This function is supported by PromQL. See also [tan](#tan).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [tanh](#tanh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tanh](#tanh).
|
||||
|
||||
#### bitmap_and
|
||||
|
||||
|
@ -1023,25 +1113,33 @@ See also [prometheus_buckets](#prometheus_buckets) and [histogram_quantile](#his
|
|||
|
||||
`ceil(q)` is a [transform function](#transform-functions), which rounds every point for every time series returned by `q` to the upper nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [floor](#floor) and [round](#round).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [floor](#floor) and [round](#round).
|
||||
|
||||
#### clamp
|
||||
|
||||
`clamp(q, min, max)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `min` and `max` values.
|
||||
|
||||
This function is supported by PromQL. See also [clamp_min](#clamp_min) and [clamp_max](#clamp_max).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp_min](#clamp_min) and [clamp_max](#clamp_max).
|
||||
|
||||
#### clamp_max
|
||||
|
||||
`clamp_max(q, max)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `max` value.
|
||||
|
||||
This function is supported by PromQL. See also [clamp](#clamp) and [clamp_min](#clamp_min).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp](#clamp) and [clamp_min](#clamp_min).
|
||||
|
||||
#### clamp_min
|
||||
|
||||
`clamp_min(q, min)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `min` value.
|
||||
|
||||
This function is supported by PromQL. See also [clamp](#clamp) and [clamp_max](#clamp_max).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp](#clamp) and [clamp_max](#clamp_max).
|
||||
|
||||
#### cos
|
||||
|
||||
|
@ -1049,7 +1147,9 @@ This function is supported by PromQL. See also [clamp](#clamp) and [clamp_max](#
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sin](#sin).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sin](#sin).
|
||||
|
||||
#### cosh
|
||||
|
||||
|
@ -1058,7 +1158,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [acosh](#acosh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [acosh](#acosh).
|
||||
|
||||
#### day_of_month
|
||||
|
||||
|
@ -1069,6 +1171,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_week](#day_of_week) and [day_of_year](#day_of_year).
|
||||
|
||||
#### day_of_week
|
||||
|
||||
`day_of_week(q)` is a [transform function](#transform-functions), which returns the day of week for every point of every time series returned by `q`.
|
||||
|
@ -1078,6 +1182,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_month](#day_of_month) and [day_of_year](#day_of_year).
|
||||
|
||||
#### day_of_year
|
||||
|
||||
`day_of_year(q)` is a [transform function](#transform-functions), which returns the day of year for every point of every time series returned by `q`.
|
||||
|
@ -1087,6 +1193,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_week](#day_of_week) and [day_of_month](#day_of_month).
|
||||
|
||||
#### days_in_month
|
||||
|
||||
`days_in_month(q)` is a [transform function](#transform-functions), which returns the number of days in the month identified
|
||||
|
@ -1104,7 +1212,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [rad](#rad).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [rad](#rad).
|
||||
|
||||
#### drop_empty_series
|
||||
|
||||
|
@ -1130,13 +1240,17 @@ See also [start](#start), [time](#time) and [now](#now).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [ln](#ln).
|
||||
|
||||
#### floor
|
||||
|
||||
`floor(q)` is a [transform function](#transform-functions), which rounds every point for every time series returned by `q` to the lower nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [ceil](#ceil) and [round](#round).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [ceil](#ceil) and [round](#round).
|
||||
|
||||
#### histogram_avg
|
||||
|
||||
|
@ -1159,8 +1273,9 @@ When the [percentile](https://en.wikipedia.org/wiki/Percentile) is calculated ov
|
|||
then all the input histograms **must** have buckets with identical boundaries, e.g. they must have the same set of `le` or `vmrange` labels.
|
||||
Otherwise, the returned result may be invalid. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3231) for details.
|
||||
|
||||
This function is supported by PromQL (except of the `boundLabel` arg). See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share)
|
||||
and [quantile](#quantile).
|
||||
This function is supported by PromQL (except of the `boundLabel` arg).
|
||||
|
||||
See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share) and [quantile](#quantile).
|
||||
|
||||
#### histogram_quantiles
|
||||
|
||||
|
@ -1232,7 +1347,9 @@ This allows implementing simple paging for `q` time series. See also [limitk](#l
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [exp](#exp) and [log2](#log2).
|
||||
|
||||
#### log2
|
||||
|
||||
|
@ -1240,7 +1357,9 @@ This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [log10](#log10) and [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [log10](#log10) and [ln](#ln).
|
||||
|
||||
#### log10
|
||||
|
||||
|
@ -1248,7 +1367,9 @@ This function is supported by PromQL. See also [log10](#log10) and [ln](#ln).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [log2](#log2) and [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [log2](#log2) and [ln](#ln).
|
||||
|
||||
#### minute
|
||||
|
||||
|
@ -1287,7 +1408,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [deg](#deg).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [deg](#deg).
|
||||
|
||||
#### prometheus_buckets
|
||||
|
||||
|
@ -1415,7 +1538,9 @@ for points returned by `q`, e.g. it is equivalent to the following query: `(q -
|
|||
`round(q, nearest)` is a [transform function](#transform-functions), which rounds every point of every time series returned by `q` to the `nearest` multiple.
|
||||
If `nearest` is missing then the rounding is performed to the nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [floor](#floor) and [ceil](#ceil).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [floor](#floor) and [ceil](#ceil).
|
||||
|
||||
#### ru
|
||||
|
||||
|
@ -1459,7 +1584,9 @@ This function is supported by PromQL.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [cos](#cos).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [cos](#cos).
|
||||
|
||||
#### sinh
|
||||
|
||||
|
@ -1468,7 +1595,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [cosh](#cosh).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [cosh](#cosh).
|
||||
|
||||
#### tan
|
||||
|
||||
|
@ -1476,7 +1605,9 @@ This function is supported by MetricsQL. See also [cosh](#cosh).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [atan](#atan).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [atan](#atan).
|
||||
|
||||
#### tanh
|
||||
|
||||
|
@ -1485,7 +1616,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [atanh](#atanh).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [atanh](#atanh).
|
||||
|
||||
#### smooth_exponential
|
||||
|
||||
|
@ -1496,13 +1629,17 @@ by `q` using [exponential moving average](https://en.wikipedia.org/wiki/Moving_a
|
|||
|
||||
`sort(q)` is a [transform function](#transform-functions), which sorts series in ascending order by the last point in every time series returned by `q`.
|
||||
|
||||
This function is supported by PromQL. See also [sort_desc](#sort_desc) and [sort_by_label](#sort_by_label).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sort_desc](#sort_desc) and [sort_by_label](#sort_by_label).
|
||||
|
||||
#### sort_desc
|
||||
|
||||
`sort_desc(q)` is a [transform function](#transform-functions), which sorts series in descending order by the last point in every time series returned by `q`.
|
||||
|
||||
This function is supported by PromQL. See also [sort](#sort) and [sort_by_label](#sort_by_label_desc).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sort](#sort) and [sort_by_label](#sort_by_label_desc).
|
||||
|
||||
#### sqrt
|
||||
|
||||
|
@ -1531,7 +1668,9 @@ See also [start](#start) and [end](#end).
|
|||
|
||||
`time()` is a [transform function](#transform-functions), which returns unix timestamp for every returned point.
|
||||
|
||||
This function is supported by PromQL. See also [now](#now), [start](#start) and [end](#end).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [timestamp](#timestamp), [now](#now), [start](#start) and [end](#end).
|
||||
|
||||
#### timezone_offset
|
||||
|
||||
|
@ -1580,7 +1719,7 @@ Additional details:
|
|||
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before performing the label transformation.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature[1i]), "foo")`.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature), "foo")`.
|
||||
|
||||
See also [implicit query conversions](#implicit-query-conversions).
|
||||
|
||||
|
@ -1757,7 +1896,7 @@ Additional details:
|
|||
Multiple labels can be put in `by` and `without` modifiers.
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the aggregate.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up[1i]))`.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up))`.
|
||||
* Aggregate functions accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point
|
||||
across time series returned by `q1`, `q2` and `q3`.
|
||||
* Aggregate functions support optional `limit N` suffix, which can be used for limiting the number of output groups.
|
||||
|
@ -1785,7 +1924,9 @@ This function is supported by PromQL.
|
|||
`bottomk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` points with the smallest values across all the time series returned by `q`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [topk](#topk).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [topk](#topk), [bottomk_min](#bottomk_min) and [#bottomk_last](#bottomk_last).
|
||||
|
||||
#### bottomk_avg
|
||||
|
||||
|
@ -1847,10 +1988,14 @@ The aggregate is calculated individually per each group of points with the same
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [count_values_over_time](#count_values_over_time) and [label_match](#label_match).
|
||||
|
||||
#### distinct
|
||||
|
||||
`distinct(q)` is [aggregate function](#aggregate-functions), which calculates the number of unique values per each group of points with the same timestamp.
|
||||
|
||||
See also [distinct_over_time](#distinct_over_time).
|
||||
|
||||
#### geomean
|
||||
|
||||
`geomean(q)` is [aggregate function](#aggregate-functions), which calculates geometric mean per each group of points with the same timestamp.
|
||||
|
@ -1942,7 +2087,9 @@ See also [outliers_iqr](#outliers_iqr) and [outliers_mad](#outliers_mad).
|
|||
for all the time series returned by `q`. `phi` must be in the range `[0...1]`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [quantiles](#quantiles) and [histogram_quantile](#histogram_quantile).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [quantiles](#quantiles) and [histogram_quantile](#histogram_quantile).
|
||||
|
||||
#### quantiles
|
||||
|
||||
|
@ -2001,7 +2148,9 @@ for all the time series returned by `q`. The aggregate is calculated individuall
|
|||
`topk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` points with the biggest values across all the time series returned by `q`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [bottomk](#bottomk).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [bottomk](#bottomk), [topk_max](#topk_max) and [topk_last](#topk_last).
|
||||
|
||||
#### topk_avg
|
||||
|
||||
|
@ -2061,7 +2210,7 @@ See also [zscore_over_time](#zscore_over_time), [range_trim_zscore](#range_trim_
|
|||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) form a subquery.
|
||||
Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions).
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m[1i]))[1i:1i])`, so it becomes a subquery,
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m))[1i:1i])`, so it becomes a subquery,
|
||||
since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
|
||||
VictoriaMetrics performs subqueries in the following way:
|
||||
|
@ -2076,21 +2225,23 @@ VictoriaMetrics performs subqueries in the following way:
|
|||
|
||||
VictoriaMetrics performs the following implicit conversions for incoming queries before starting the calculations:
|
||||
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions),
|
||||
then `[1i]` is automatically added there. The `[1i]` means one `step` value, which is passed
|
||||
to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is also known as `$__interval` in Grafana. For example, `rate(http_requests_count)` is automatically transformed to `rate(http_requests_count[1i])`.
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions), then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
which aren't wrapped into [rollup functions](#rollup-functions), are automatically wrapped into [default_rollup](#default_rollup) function.
|
||||
Examples:
|
||||
* `foo` is transformed to `default_rollup(foo[1i])`
|
||||
* `foo + bar` is transformed to `default_rollup(foo[1i]) + default_rollup(bar[1i])`
|
||||
* `count(up)` is transformed to `count(default_rollup(up[1i]))`, because [count](#count) isn't a [rollup function](#rollup-functions) -
|
||||
* `foo` is transformed to `default_rollup(foo)`
|
||||
* `foo + bar` is transformed to `default_rollup(foo) + default_rollup(bar)`
|
||||
* `count(up)` is transformed to `count(default_rollup(up))`, because [count](#count) isn't a [rollup function](#rollup-functions) -
|
||||
it is [aggregate function](#aggregate-functions)
|
||||
* `abs(temperature)` is transformed to `abs(default_rollup(temperature[1i]))`, because [abs](#abs) isn't a [rollup function](#rollup-functions) -
|
||||
* `abs(temperature)` is transformed to `abs(default_rollup(temperature))`, because [abs](#abs) isn't a [rollup function](#rollup-functions) -
|
||||
it is [transform function](#transform-functions)
|
||||
* If `step` in square brackets is missing inside [subquery](#subqueries), then `1i` step is automatically added there.
|
||||
For example, `avg_over_time(rate(http_requests_total[5m])[1h])` is automatically converted to `avg_over_time(rate(http_requests_total[5m])[1h:1i])`.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
is passed to [rollup function](#rollup-functions), then a [subquery](#subqueries) with `1i` lookbehind window and `1i` step is automatically formed.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up[1i])))[1i:1i])`.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
|
@ -8,7 +8,6 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/csvimport"
|
||||
|
@ -314,7 +313,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "/opentelemetry/api/v1/push":
|
||||
case "/opentelemetry/api/v1/push", "/opentelemetry/v1/metrics":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(nil, r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
|
@ -459,7 +458,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/ready":
|
||||
if rdy := atomic.LoadInt32(&promscrape.PendingScrapeConfigs); rdy > 0 {
|
||||
if rdy := promscrape.PendingScrapeConfigs.Load(); rdy > 0 {
|
||||
errMsg := fmt.Sprintf("waiting for scrapes to init, left: %d", rdy)
|
||||
http.Error(w, errMsg, http.StatusTooEarly)
|
||||
} else {
|
||||
|
@ -560,7 +559,7 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
|||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "opentelemetry/api/v1/push":
|
||||
case "opentelemetry/api/v1/push", "opentelemetry/v1/metrics":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(at, r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
|
@ -686,8 +685,8 @@ var (
|
|||
datadogIntakeRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
|
||||
datadogMetadataRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
|
||||
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
|
||||
newrelicWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
newrelicWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -27,10 +28,15 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
var processBody func([]byte) ([]byte, error)
|
||||
if req.Header.Get("Content-Type") == "application/json" {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
if req.Header.Get("X-Amz-Firehouse-Protocol-Version") != "" {
|
||||
processBody = firehose.ProcessRequestBody
|
||||
} else {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
}
|
||||
}
|
||||
return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return stream.ParseStream(req.Body, isGzipped, processBody, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return insertRows(at, tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func (ps *pendingSeries) periodicFlusher() {
|
|||
ps.mu.Unlock()
|
||||
return
|
||||
case <-ticker.C:
|
||||
if fasttime.UnixTimestamp()-atomic.LoadUint64(&ps.wr.lastFlushTime) < uint64(flushSeconds) {
|
||||
if fasttime.UnixTimestamp()-ps.wr.lastFlushTime.Load() < uint64(flushSeconds) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -93,8 +93,7 @@ func (ps *pendingSeries) periodicFlusher() {
|
|||
}
|
||||
|
||||
type writeRequest struct {
|
||||
// Move lastFlushTime to the top of the struct in order to guarantee atomic access on 32-bit architectures.
|
||||
lastFlushTime uint64
|
||||
lastFlushTime atomic.Uint64
|
||||
|
||||
// The queue to send blocks to.
|
||||
fq *persistentqueue.FastQueue
|
||||
|
@ -155,7 +154,7 @@ func (wr *writeRequest) mustWriteBlock(block []byte) bool {
|
|||
|
||||
func (wr *writeRequest) tryFlush() bool {
|
||||
wr.wr.Timeseries = wr.tss
|
||||
atomic.StoreUint64(&wr.lastFlushTime, fasttime.UnixTimestamp())
|
||||
wr.lastFlushTime.Store(fasttime.UnixTimestamp())
|
||||
if !tryPushWriteRequest(&wr.wr, wr.fq.TryWriteBlock, wr.isVMRemoteWrite) {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -56,7 +56,8 @@ var (
|
|||
keepDanglingQueues = flag.Bool("remoteWrite.keepDanglingQueues", false, "Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. "+
|
||||
"Useful when -remoteWrite.url is changed temporarily and persistent queue files will be needed later on.")
|
||||
queues = flag.Int("remoteWrite.queues", cgroup.AvailableCPUs()*2, "The number of concurrent queues to each -remoteWrite.url. Set more queues if default number of queues "+
|
||||
"isn't enough for sending high volume of collected data to remote storage. Default value is 2 * numberOfAvailableCPUs")
|
||||
"isn't enough for sending high volume of collected data to remote storage. "+
|
||||
"Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage")
|
||||
showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
maxPendingBytesPerURL = flagutil.NewArrayBytes("remoteWrite.maxDiskUsagePerURL", 0, "The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath "+
|
||||
|
@ -89,8 +90,11 @@ var (
|
|||
streamAggrDropInput = flagutil.NewArrayBool("remoteWrite.streamAggr.dropInput", "Whether to drop all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before being aggregated. "+
|
||||
"Only the last sample per each time series per each interval is aggregated if the interval is greater than zero")
|
||||
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation "+
|
||||
"with -remoteWrite.streamAggr.config . See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels")
|
||||
|
||||
disableOnDiskQueue = flag.Bool("remoteWrite.disableOnDiskQueue", false, "Whether to disable storing pending data to -remoteWrite.tmpDataPath "+
|
||||
"when the configured remote storage systems cannot keep up with the data ingestion rate. See https://docs.victoriametrics.com/vmagent.html#disabling-on-disk-persistence ."+
|
||||
"See also -remoteWrite.dropSamplesOnOverload")
|
||||
|
@ -536,22 +540,22 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
|||
// Push sharded data to remote storages in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rwctxs))
|
||||
var anyPushFailed uint64
|
||||
var anyPushFailed atomic.Bool
|
||||
for i, rwctx := range rwctxs {
|
||||
tssShard := tssByURL[i]
|
||||
if len(tssShard) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(rwctx *remoteWriteCtx, tss []prompbmarshal.TimeSeries) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tss) {
|
||||
atomic.StoreUint64(&anyPushFailed, 1)
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx, tssShard)
|
||||
}
|
||||
wg.Wait()
|
||||
return atomic.LoadUint64(&anyPushFailed) == 0
|
||||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
// Replicate data among rwctxs.
|
||||
|
@ -559,17 +563,17 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
|||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rwctxs))
|
||||
var anyPushFailed uint64
|
||||
var anyPushFailed atomic.Bool
|
||||
for _, rwctx := range rwctxs {
|
||||
go func(rwctx *remoteWriteCtx) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tssBlock) {
|
||||
atomic.StoreUint64(&anyPushFailed, 1)
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx)
|
||||
}
|
||||
wg.Wait()
|
||||
return atomic.LoadUint64(&anyPushFailed) == 0
|
||||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
// sortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set.
|
||||
|
@ -665,12 +669,14 @@ type remoteWriteCtx struct {
|
|||
fq *persistentqueue.FastQueue
|
||||
c *client
|
||||
|
||||
sas atomic.Pointer[streamaggr.Aggregators]
|
||||
sas atomic.Pointer[streamaggr.Aggregators]
|
||||
deduplicator *streamaggr.Deduplicator
|
||||
|
||||
streamAggrKeepInput bool
|
||||
streamAggrDropInput bool
|
||||
|
||||
pss []*pendingSeries
|
||||
pssNextIdx uint64
|
||||
pssNextIdx atomic.Uint64
|
||||
|
||||
rowsPushedAfterRelabel *metrics.Counter
|
||||
rowsDroppedByRelabel *metrics.Counter
|
||||
|
@ -738,9 +744,13 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
|||
|
||||
// Initialize sas
|
||||
sasFile := streamAggrConfig.GetOptionalArg(argIdx)
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(argIdx)
|
||||
if sasFile != "" {
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(argIdx)
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, dedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: dedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, opts)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot initialize stream aggregators from -remoteWrite.streamAggr.config=%q: %s", sasFile, err)
|
||||
}
|
||||
|
@ -749,17 +759,24 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
|||
rwctx.streamAggrDropInput = streamAggrDropInput.GetOptionalArg(argIdx)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, sasFile)).Set(1)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_success_timestamp_seconds{path=%q}`, sasFile)).Set(fasttime.UnixTimestamp())
|
||||
} else if dedupInterval > 0 {
|
||||
rwctx.deduplicator = streamaggr.NewDeduplicator(rwctx.pushInternalTrackDropped, dedupInterval, *streamAggrDropInputLabels)
|
||||
}
|
||||
|
||||
return rwctx
|
||||
}
|
||||
|
||||
func (rwctx *remoteWriteCtx) MustStop() {
|
||||
// sas must be stopped before rwctx is closed
|
||||
// sas and deduplicator must be stopped before rwctx is closed
|
||||
// because sas can write pending series to rwctx.pss if there are any
|
||||
sas := rwctx.sas.Swap(nil)
|
||||
sas.MustStop()
|
||||
|
||||
if rwctx.deduplicator != nil {
|
||||
rwctx.deduplicator.MustStop()
|
||||
rwctx.deduplicator = nil
|
||||
}
|
||||
|
||||
for _, ps := range rwctx.pss {
|
||||
ps.MustStop()
|
||||
}
|
||||
|
@ -798,7 +815,7 @@ func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries) bool {
|
|||
rowsCount := getRowsCount(tss)
|
||||
rwctx.rowsPushedAfterRelabel.Add(rowsCount)
|
||||
|
||||
// Apply stream aggregation if any
|
||||
// Apply stream aggregation or deduplication if they are configured
|
||||
sas := rwctx.sas.Load()
|
||||
if sas != nil {
|
||||
matchIdxs := matchIdxsPool.Get()
|
||||
|
@ -813,6 +830,10 @@ func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries) bool {
|
|||
tss = dropAggregatedSeries(tss, matchIdxs.B, rwctx.streamAggrDropInput)
|
||||
}
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
} else if rwctx.deduplicator != nil {
|
||||
rwctx.deduplicator.Push(tss)
|
||||
clear(tss)
|
||||
tss = tss[:0]
|
||||
}
|
||||
|
||||
// Try pushing the data to remote storage
|
||||
|
@ -841,7 +862,7 @@ func dropAggregatedSeries(src []prompbmarshal.TimeSeries, matchIdxs []byte, drop
|
|||
}
|
||||
}
|
||||
tail := src[len(dst):]
|
||||
_ = prompbmarshal.ResetTimeSeries(tail)
|
||||
clear(tail)
|
||||
return dst
|
||||
}
|
||||
|
||||
|
@ -872,7 +893,7 @@ func (rwctx *remoteWriteCtx) tryPushInternal(tss []prompbmarshal.TimeSeries) boo
|
|||
}
|
||||
|
||||
pss := rwctx.pss
|
||||
idx := atomic.AddUint64(&rwctx.pssNextIdx, 1) % uint64(len(pss))
|
||||
idx := rwctx.pssNextIdx.Add(1) % uint64(len(pss))
|
||||
|
||||
ok := pss[idx].TryPush(tss)
|
||||
|
||||
|
@ -894,8 +915,10 @@ func (rwctx *remoteWriteCtx) reinitStreamAggr() {
|
|||
|
||||
logger.Infof("reloading stream aggregation configs pointed by -remoteWrite.streamAggr.config=%q", sasFile)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reloads_total{path=%q}`, sasFile)).Inc()
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(rwctx.idx)
|
||||
sasNew, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, dedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: streamAggrDedupInterval.GetOptionalArg(rwctx.idx),
|
||||
}
|
||||
sasNew, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, opts)
|
||||
if err != nil {
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reloads_errors_total{path=%q}`, sasFile)).Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, sasFile)).Set(0)
|
||||
|
@ -937,8 +960,10 @@ func CheckStreamAggrConfigs() error {
|
|||
if sasFile == "" {
|
||||
continue
|
||||
}
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(idx)
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, pushNoop, dedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: streamAggrDedupInterval.GetOptionalArg(idx),
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, pushNoop, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load -remoteWrite.streamAggr.config=%q: %w", sasFile, err)
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ var (
|
|||
)
|
||||
|
||||
type statConn struct {
|
||||
closed uint64
|
||||
closed atomic.Int32
|
||||
net.Conn
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func (sc *statConn) Write(p []byte) (int, error) {
|
|||
|
||||
func (sc *statConn) Close() error {
|
||||
err := sc.Conn.Close()
|
||||
if atomic.AddUint64(&sc.closed, 1) == 1 {
|
||||
if sc.closed.Add(1) == 1 {
|
||||
conns.Dec()
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -1158,9 +1158,9 @@
|
|||
$labels.pod }}.'
|
||||
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh
|
||||
expr: |
|
||||
sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (container, pod, namespace)
|
||||
sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (cluster, container, pod, namespace)
|
||||
/
|
||||
sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container, pod, namespace)
|
||||
sum(increase(container_cpu_cfs_periods_total{}[5m])) by (cluster, container, pod, namespace)
|
||||
> ( 25 / 100 )
|
||||
for: 15m
|
||||
labels:
|
||||
|
|
|
@ -46,7 +46,7 @@ var (
|
|||
oauth2TokenURL = flag.String("datasource.oauth2.tokenUrl", "", "Optional OAuth2 tokenURL to use for -datasource.url")
|
||||
oauth2Scopes = flag.String("datasource.oauth2.scopes", "", "Optional OAuth2 scopes to use for -datasource.url. Scopes must be delimited by ';'")
|
||||
|
||||
lookBack = flag.Duration("datasource.lookback", 0, `Will be deprecated soon, please adjust "-search.latencyOffset" at datasource side `+
|
||||
lookBack = flag.Duration("datasource.lookback", 0, `Deprecated: please adjust "-search.latencyOffset" at datasource side `+
|
||||
`or specify "latency_offset" in rule group's params. Lookback defines how far into the past to look when evaluating queries. `+
|
||||
`For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.`)
|
||||
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries. "+
|
||||
|
@ -91,7 +91,7 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
|
|||
logger.Warnf("flag `-datasource.queryTimeAlignment` is deprecated and will be removed in next releases. Please use `eval_alignment` in rule group instead.")
|
||||
}
|
||||
if *lookBack != 0 {
|
||||
logger.Warnf("flag `-datasource.lookback` will be deprecated soon. Please use `-rule.evalDelay` command-line flag instead. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155 for details.")
|
||||
logger.Warnf("flag `-datasource.lookback` is deprecated and will be removed in next releases. Please adjust `-search.latencyOffset` at datasource side or specify `latency_offset` in rule group's params. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155 for details.")
|
||||
}
|
||||
|
||||
tr, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
|
@ -133,7 +133,6 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
|
|||
authCfg: authCfg,
|
||||
datasourceURL: strings.TrimSuffix(*addr, "/"),
|
||||
appendTypePrefix: *appendTypePrefix,
|
||||
lookBack: *lookBack,
|
||||
queryStep: *queryStep,
|
||||
dataSourceType: datasourcePrometheus,
|
||||
extraParams: extraParams,
|
||||
|
|
|
@ -35,7 +35,6 @@ type VMStorage struct {
|
|||
authCfg *promauth.Config
|
||||
datasourceURL string
|
||||
appendTypePrefix bool
|
||||
lookBack time.Duration
|
||||
queryStep time.Duration
|
||||
dataSourceType datasourceType
|
||||
|
||||
|
@ -63,7 +62,6 @@ func (s *VMStorage) Clone() *VMStorage {
|
|||
authCfg: s.authCfg,
|
||||
datasourceURL: s.datasourceURL,
|
||||
appendTypePrefix: s.appendTypePrefix,
|
||||
lookBack: s.lookBack,
|
||||
queryStep: s.queryStep,
|
||||
|
||||
dataSourceType: s.dataSourceType,
|
||||
|
@ -122,13 +120,12 @@ func (s *VMStorage) BuildWithParams(params QuerierParams) Querier {
|
|||
}
|
||||
|
||||
// NewVMStorage is a constructor for VMStorage
|
||||
func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Duration, queryStep time.Duration, appendTypePrefix bool, c *http.Client) *VMStorage {
|
||||
func NewVMStorage(baseURL string, authCfg *promauth.Config, queryStep time.Duration, appendTypePrefix bool, c *http.Client) *VMStorage {
|
||||
return &VMStorage{
|
||||
c: c,
|
||||
authCfg: authCfg,
|
||||
datasourceURL: strings.TrimSuffix(baseURL, "/"),
|
||||
appendTypePrefix: appendTypePrefix,
|
||||
lookBack: lookBack,
|
||||
queryStep: queryStep,
|
||||
dataSourceType: datasourcePrometheus,
|
||||
extraParams: url.Values{},
|
||||
|
@ -137,11 +134,11 @@ func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Durati
|
|||
|
||||
// Query executes the given query and returns parsed response
|
||||
func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) (Result, *http.Request, error) {
|
||||
req, err := s.newQueryRequest(query, ts)
|
||||
req, err := s.newQueryRequest(ctx, query, ts)
|
||||
if err != nil {
|
||||
return Result{}, nil, err
|
||||
}
|
||||
resp, err := s.do(ctx, req)
|
||||
resp, err := s.do(req)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// Return unexpected error to the caller.
|
||||
|
@ -149,11 +146,11 @@ func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) (Resu
|
|||
}
|
||||
// Something in the middle between client and datasource might be closing
|
||||
// the connection. So we do a one more attempt in hope request will succeed.
|
||||
req, err = s.newQueryRequest(query, ts)
|
||||
req, err = s.newQueryRequest(ctx, query, ts)
|
||||
if err != nil {
|
||||
return Result{}, nil, fmt.Errorf("second attempt: %w", err)
|
||||
}
|
||||
resp, err = s.do(ctx, req)
|
||||
resp, err = s.do(req)
|
||||
if err != nil {
|
||||
return Result{}, nil, fmt.Errorf("second attempt: %w", err)
|
||||
}
|
||||
|
@ -182,11 +179,11 @@ func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end tim
|
|||
if end.IsZero() {
|
||||
return res, fmt.Errorf("end param is missing")
|
||||
}
|
||||
req, err := s.newQueryRangeRequest(query, start, end)
|
||||
req, err := s.newQueryRangeRequest(ctx, query, start, end)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
resp, err := s.do(ctx, req)
|
||||
resp, err := s.do(req)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// Return unexpected error to the caller.
|
||||
|
@ -194,11 +191,11 @@ func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end tim
|
|||
}
|
||||
// Something in the middle between client and datasource might be closing
|
||||
// the connection. So we do a one more attempt in hope request will succeed.
|
||||
req, err = s.newQueryRangeRequest(query, start, end)
|
||||
req, err = s.newQueryRangeRequest(ctx, query, start, end)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("second attempt: %w", err)
|
||||
}
|
||||
resp, err = s.do(ctx, req)
|
||||
resp, err = s.do(req)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("second attempt: %w", err)
|
||||
}
|
||||
|
@ -210,7 +207,7 @@ func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end tim
|
|||
return res, err
|
||||
}
|
||||
|
||||
func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||
func (s *VMStorage) do(req *http.Request) (*http.Response, error) {
|
||||
ru := req.URL.Redacted()
|
||||
if *showDatasourceURL {
|
||||
ru = req.URL.String()
|
||||
|
@ -218,7 +215,7 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
|
|||
if s.debug {
|
||||
logger.Infof("DEBUG datasource request: executing %s request with params %q", req.Method, ru)
|
||||
}
|
||||
resp, err := s.c.Do(req.WithContext(ctx))
|
||||
resp, err := s.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting response from %s: %w", ru, err)
|
||||
}
|
||||
|
@ -230,8 +227,8 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *VMStorage) newQueryRangeRequest(query string, start, end time.Time) (*http.Request, error) {
|
||||
req, err := s.newRequest()
|
||||
func (s *VMStorage) newQueryRangeRequest(ctx context.Context, query string, start, end time.Time) (*http.Request, error) {
|
||||
req, err := s.newRequest(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create query_range request to datasource %q: %w", s.datasourceURL, err)
|
||||
}
|
||||
|
@ -239,8 +236,8 @@ func (s *VMStorage) newQueryRangeRequest(query string, start, end time.Time) (*h
|
|||
return req, nil
|
||||
}
|
||||
|
||||
func (s *VMStorage) newQueryRequest(query string, ts time.Time) (*http.Request, error) {
|
||||
req, err := s.newRequest()
|
||||
func (s *VMStorage) newQueryRequest(ctx context.Context, query string, ts time.Time) (*http.Request, error) {
|
||||
req, err := s.newRequest(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create query request to datasource %q: %w", s.datasourceURL, err)
|
||||
}
|
||||
|
@ -248,15 +245,15 @@ func (s *VMStorage) newQueryRequest(query string, ts time.Time) (*http.Request,
|
|||
case "", datasourcePrometheus:
|
||||
s.setPrometheusInstantReqParams(req, query, ts)
|
||||
case datasourceGraphite:
|
||||
s.setGraphiteReqParams(req, query, ts)
|
||||
s.setGraphiteReqParams(req, query)
|
||||
default:
|
||||
logger.Panicf("BUG: engine not found: %q", s.dataSourceType)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *VMStorage) newRequest() (*http.Request, error) {
|
||||
req, err := http.NewRequest(http.MethodPost, s.datasourceURL, nil)
|
||||
func (s *VMStorage) newRequest(ctx context.Context) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.datasourceURL, nil)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error from http.NewRequest(%q): %s", s.datasourceURL, err)
|
||||
}
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type graphiteResponse []graphiteResponseTarget
|
||||
|
@ -48,17 +46,13 @@ const (
|
|||
graphitePrefix = "/graphite"
|
||||
)
|
||||
|
||||
func (s *VMStorage) setGraphiteReqParams(r *http.Request, query string, timestamp time.Time) {
|
||||
func (s *VMStorage) setGraphiteReqParams(r *http.Request, query string) {
|
||||
if s.appendTypePrefix {
|
||||
r.URL.Path += graphitePrefix
|
||||
}
|
||||
r.URL.Path += graphitePath
|
||||
q := r.URL.Query()
|
||||
from := "-5min"
|
||||
if s.lookBack > 0 {
|
||||
lookBack := timestamp.Add(-s.lookBack)
|
||||
from = strconv.FormatInt(lookBack.Unix(), 10)
|
||||
}
|
||||
q.Set("from", from)
|
||||
q.Set("format", "json")
|
||||
q.Set("target", query)
|
||||
|
|
|
@ -161,9 +161,6 @@ func (s *VMStorage) setPrometheusInstantReqParams(r *http.Request, query string,
|
|||
r.URL.Path += "/api/v1/query"
|
||||
}
|
||||
q := r.URL.Query()
|
||||
if s.lookBack > 0 {
|
||||
timestamp = timestamp.Add(-s.lookBack)
|
||||
}
|
||||
q.Set("time", timestamp.Format(time.RFC3339))
|
||||
if !*disableStepParam && s.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
// always convert to seconds to keep compatibility with older
|
||||
|
|
|
@ -86,7 +86,7 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected: %s", err)
|
||||
}
|
||||
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client())
|
||||
s := NewVMStorage(srv.URL, authCfg, 0, false, srv.Client())
|
||||
|
||||
p := datasourcePrometheus
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(p), EvaluationInterval: 15 * time.Second})
|
||||
|
@ -225,7 +225,7 @@ func TestVMInstantQueryWithRetry(t *testing.T) {
|
|||
srv := httptest.NewServer(mux)
|
||||
defer srv.Close()
|
||||
|
||||
s := NewVMStorage(srv.URL, nil, time.Minute, 0, false, srv.Client())
|
||||
s := NewVMStorage(srv.URL, nil, 0, false, srv.Client())
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourcePrometheus)})
|
||||
|
||||
expErr := func(err string) {
|
||||
|
@ -334,7 +334,7 @@ func TestVMRangeQuery(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected: %s", err)
|
||||
}
|
||||
s := NewVMStorage(srv.URL, authCfg, time.Minute, *queryStep, false, srv.Client())
|
||||
s := NewVMStorage(srv.URL, authCfg, *queryStep, false, srv.Client())
|
||||
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourcePrometheus), EvaluationInterval: 15 * time.Second})
|
||||
|
||||
|
@ -487,17 +487,6 @@ func TestRequestParams(t *testing.T) {
|
|||
checkEqualString(t, "bar", p)
|
||||
},
|
||||
},
|
||||
{
|
||||
"lookback",
|
||||
false,
|
||||
&VMStorage{
|
||||
lookBack: time.Minute,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
exp := url.Values{"query": {query}, "time": {timestamp.Add(-time.Minute).Format(time.RFC3339)}}
|
||||
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"evaluation interval",
|
||||
false,
|
||||
|
@ -510,20 +499,6 @@ func TestRequestParams(t *testing.T) {
|
|||
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"lookback + evaluation interval",
|
||||
false,
|
||||
&VMStorage{
|
||||
lookBack: time.Minute,
|
||||
evaluationInterval: 15 * time.Second,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
evalInterval := 15 * time.Second
|
||||
tt := timestamp.Add(-time.Minute)
|
||||
exp := url.Values{"query": {query}, "step": {evalInterval.String()}, "time": {tt.Format(time.RFC3339)}}
|
||||
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"step override",
|
||||
false,
|
||||
|
@ -637,7 +612,7 @@ func TestRequestParams(t *testing.T) {
|
|||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req, err := tc.vm.newRequest()
|
||||
req, err := tc.vm.newRequest(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -649,7 +624,7 @@ func TestRequestParams(t *testing.T) {
|
|||
tc.vm.setPrometheusInstantReqParams(req, query, timestamp)
|
||||
}
|
||||
case datasourceGraphite:
|
||||
tc.vm.setGraphiteReqParams(req, query, timestamp)
|
||||
tc.vm.setGraphiteReqParams(req, query)
|
||||
}
|
||||
tc.checkFn(t, req)
|
||||
})
|
||||
|
@ -735,7 +710,7 @@ func TestHeaders(t *testing.T) {
|
|||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
vm := tt.vmFn()
|
||||
req, err := vm.newQueryRequest("foo", time.Now())
|
||||
req, err := vm.newQueryRequest(ctx, "foo", time.Now())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -79,5 +79,5 @@ func Init() (datasource.QuerierBuilder, error) {
|
|||
return nil, fmt.Errorf("failed to configure auth: %w", err)
|
||||
}
|
||||
c := &http.Client{Transport: tr}
|
||||
return datasource.NewVMStorage(*addr, authCfg, 0, 0, false, c), nil
|
||||
return datasource.NewVMStorage(*addr, authCfg, 0, false, c), nil
|
||||
}
|
||||
|
|
|
@ -279,7 +279,7 @@ L:
|
|||
|
||||
func (c *Client) send(ctx context.Context, data []byte) error {
|
||||
r := bytes.NewReader(data)
|
||||
req, err := http.NewRequest(http.MethodPost, c.addr, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.addr, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new HTTP request: %w", err)
|
||||
}
|
||||
|
@ -302,7 +302,7 @@ func (c *Client) send(ctx context.Context, data []byte) error {
|
|||
if !*disablePathAppend {
|
||||
req.URL.Path = path.Join(req.URL.Path, "/api/v1/write")
|
||||
}
|
||||
resp, err := c.c.Do(req.WithContext(ctx))
|
||||
resp, err := c.c.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while sending request to %s: %w; Data len %d(%d)",
|
||||
req.URL.Redacted(), err, len(data), r.Size())
|
||||
|
|
|
@ -91,14 +91,12 @@ func newRWServer() *rwServer {
|
|||
}
|
||||
|
||||
type rwServer struct {
|
||||
// WARN: ordering of fields is important for alignment!
|
||||
// see https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
acceptedRows uint64
|
||||
acceptedRows atomic.Uint64
|
||||
*httptest.Server
|
||||
}
|
||||
|
||||
func (rw *rwServer) accepted() int {
|
||||
return int(atomic.LoadUint64(&rw.acceptedRows))
|
||||
return int(rw.acceptedRows.Load())
|
||||
}
|
||||
|
||||
func (rw *rwServer) err(w http.ResponseWriter, err error) {
|
||||
|
@ -144,7 +142,7 @@ func (rw *rwServer) handler(w http.ResponseWriter, r *http.Request) {
|
|||
rw.err(w, fmt.Errorf("unmarhsal err: %w", err))
|
||||
return
|
||||
}
|
||||
atomic.AddUint64(&rw.acceptedRows, uint64(len(wr.Timeseries)))
|
||||
rw.acceptedRows.Add(uint64(len(wr.Timeseries)))
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,12 @@
|
|||
function expandAll() {
|
||||
$('.collapse').addClass('show');
|
||||
$('.group-heading').each(function () {
|
||||
let style = $(this).attr("style")
|
||||
// display only elements that are currently visible
|
||||
if (style === "display: none;") {
|
||||
return
|
||||
}
|
||||
$(this).next().addClass('show')
|
||||
});
|
||||
}
|
||||
|
||||
function collapseAll() {
|
||||
|
@ -15,6 +22,100 @@ function toggleByID(id) {
|
|||
}
|
||||
}
|
||||
|
||||
function debounce(func, delay) {
|
||||
let timer;
|
||||
return function (...args) {
|
||||
clearTimeout(timer);
|
||||
timer = setTimeout(() => {
|
||||
func.apply(this, args);
|
||||
}, delay);
|
||||
};
|
||||
}
|
||||
|
||||
$('#search').on("keyup", debounce(search, 500));
|
||||
|
||||
// search shows or hides groups&rules that satisfy the search phrase.
|
||||
// case-insensitive, respects GET param `search`.
|
||||
function search() {
|
||||
$(".rule").show();
|
||||
|
||||
let groupHeader = $(".group-heading")
|
||||
let searchPhrase = $("#search").val().toLowerCase()
|
||||
if (searchPhrase.length === 0) {
|
||||
groupHeader.show()
|
||||
setParamURL('search', '')
|
||||
return
|
||||
}
|
||||
|
||||
$(".rule-table").removeClass('show');
|
||||
groupHeader.hide()
|
||||
|
||||
searchPhrase = searchPhrase.toLowerCase()
|
||||
filterRuleByName(searchPhrase);
|
||||
filterRuleByLabels(searchPhrase);
|
||||
filterGroupsByName(searchPhrase);
|
||||
|
||||
setParamURL('search', searchPhrase)
|
||||
}
|
||||
|
||||
function setParamURL(key, value) {
|
||||
let url = new URL(location.href)
|
||||
url.searchParams.set(key, value);
|
||||
window.history.replaceState(null, null, `?${url.searchParams.toString()}`);
|
||||
}
|
||||
|
||||
function getParamURL(key) {
|
||||
let url = new URL(location.href)
|
||||
return url.searchParams.get(key)
|
||||
}
|
||||
|
||||
function filterGroupsByName(searchPhrase) {
|
||||
$(".group-heading").each(function () {
|
||||
const groupName = $(this).attr('data-group-name').toLowerCase();
|
||||
const hasValue = groupName.indexOf(searchPhrase) >= 0
|
||||
|
||||
if (!hasValue) {
|
||||
return
|
||||
}
|
||||
|
||||
const target = $(this).attr("data-bs-target");
|
||||
$(`div[id="${target}"] .rule`).show();
|
||||
$(this).show();
|
||||
});
|
||||
}
|
||||
|
||||
function filterRuleByName(searchPhrase) {
|
||||
$(".rule").each(function () {
|
||||
const ruleName = $(this).attr("data-rule-name").toLowerCase();
|
||||
const hasValue = ruleName.indexOf(searchPhrase) >= 0
|
||||
if (!hasValue) {
|
||||
$(this).hide();
|
||||
return
|
||||
}
|
||||
|
||||
const target = $(this).attr('data-bs-target')
|
||||
$(`#rules-${target}`).addClass('show');
|
||||
$(`div[data-bs-target='rules-${target}']`).show();
|
||||
$(this).show();
|
||||
});
|
||||
}
|
||||
|
||||
function filterRuleByLabels(searchPhrase) {
|
||||
$(".rule").each(function () {
|
||||
const matches = $(".label", this).filter(function () {
|
||||
const label = $(this).text().toLowerCase();
|
||||
return label.indexOf(searchPhrase) >= 0;
|
||||
}).length;
|
||||
|
||||
if (matches > 0) {
|
||||
const target = $(this).attr('data-bs-target')
|
||||
$(`#rules-${target}`).addClass('show');
|
||||
$(`div[data-bs-target='rules-${target}']`).show();
|
||||
$(this).show();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
$(document).ready(function () {
|
||||
$(".group-heading a").click(function (e) {
|
||||
e.stopPropagation(); // prevent collapse logic on link click
|
||||
|
@ -32,6 +133,13 @@ $(document).ready(function () {
|
|||
});
|
||||
});
|
||||
|
||||
// update search element with value from URL, if any
|
||||
let searchPhrase = getParamURL('search')
|
||||
$("#search").val(searchPhrase)
|
||||
|
||||
// apply filtering by search phrase
|
||||
search()
|
||||
|
||||
let hash = window.location.hash.substr(1);
|
||||
toggleByID(hash);
|
||||
});
|
||||
|
|
|
@ -296,7 +296,11 @@ func (rh *requestHandler) groups(rf rulesFilter) []apiGroup {
|
|||
}
|
||||
// sort list of groups for deterministic output
|
||||
sort.Slice(groups, func(i, j int) bool {
|
||||
return groups[i].Name < groups[j].Name
|
||||
a, b := groups[i], groups[j]
|
||||
if a.Name != b.Name {
|
||||
return a.Name < b.Name
|
||||
}
|
||||
return a.File < b.File
|
||||
})
|
||||
return groups
|
||||
}
|
||||
|
|
|
@ -70,15 +70,29 @@ btn-primary
|
|||
}
|
||||
}
|
||||
%}
|
||||
<a class="btn {%= buttonActive(filter, "") %}" role="button" onclick="window.location = window.location.pathname">All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
<a class="btn {%= buttonActive(filter, "unhealthy") %}" role="button" onclick="location.href='?filter=unhealthy'" title="Show only rules with errors">Unhealthy</a>
|
||||
<a class="btn {%= buttonActive(filter, "noMatch") %}" role="button" onclick="location.href='?filter=noMatch'" title="Show only rules matching no time series during last evaluation">NoMatch</a>
|
||||
<div class="btn-toolbar mb-3" role="toolbar">
|
||||
<div>
|
||||
<a class="btn {%= buttonActive(filter, "") %}" role="button" onclick="window.location = window.location.pathname">All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
<a class="btn {%= buttonActive(filter, "unhealthy") %}" role="button" onclick="location.href='?filter=unhealthy'" title="Show only rules with errors">Unhealthy</a>
|
||||
<a class="btn {%= buttonActive(filter, "noMatch") %}" role="button" onclick="location.href='?filter=noMatch'" title="Show only rules matching no time series during last evaluation">NoMatch</a>
|
||||
</div>
|
||||
<div class="col-md-4 col-lg-5">
|
||||
<div class="px-3 input-group">
|
||||
<div class="input-group-prepend">
|
||||
<span class="input-group-text">
|
||||
<svg fill="#000000" height="25px" width="20px" version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 490.4 490.4" xml:space="preserve"><g id="SVGRepo_bgCarrier" stroke-width="0"></g><g id="SVGRepo_tracerCarrier" stroke-linecap="round" stroke-linejoin="round"></g><g id="SVGRepo_iconCarrier"> <g> <path d="M484.1,454.796l-110.5-110.6c29.8-36.3,47.6-82.8,47.6-133.4c0-116.3-94.3-210.6-210.6-210.6S0,94.496,0,210.796 s94.3,210.6,210.6,210.6c50.8,0,97.4-18,133.8-48l110.5,110.5c12.9,11.8,25,4.2,29.2,0C492.5,475.596,492.5,463.096,484.1,454.796z M41.1,210.796c0-93.6,75.9-169.5,169.5-169.5s169.6,75.9,169.6,169.5s-75.9,169.5-169.5,169.5S41.1,304.396,41.1,210.796z"></path> </g> </g></svg>
|
||||
</span>
|
||||
</div>
|
||||
<input id="search" placeholder="Filter by group, rule or labels" type="text" class="form-control"/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% if len(groups) > 0 %}
|
||||
{% for _, g := range groups %}
|
||||
<div
|
||||
class="group-heading{% if rNotOk[g.ID] > 0 %} alert-danger{%endif%}" data-bs-target="rules-{%s g.ID %}">
|
||||
class="group-heading{% if rNotOk[g.ID] > 0 %} alert-danger{%endif%}" data-bs-target="rules-{%s g.ID %}" data-group-name="{%s g.Name %}">
|
||||
<span class="anchor" id="group-{%s g.ID %}"></span>
|
||||
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %} (every {%f.0 g.Interval %}s) #</a>
|
||||
{% if rNotOk[g.ID] > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d rNotOk[g.ID] %}</span> {% endif %}
|
||||
|
@ -100,7 +114,7 @@ btn-primary
|
|||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="collapse" id="rules-{%s g.ID %}">
|
||||
<div class="collapse rule-table" id="rules-{%s g.ID %}">
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
|
@ -111,7 +125,7 @@ btn-primary
|
|||
</thead>
|
||||
<tbody>
|
||||
{% for _, r := range g.Rules %}
|
||||
<tr{% if r.LastError != "" %} class="alert-danger"{% endif %}>
|
||||
<tr class="rule{% if r.LastError != "" %} alert-danger{% endif %}" data-rule-name="{%s r.Name %}" data-bs-target="{%s g.ID %}">
|
||||
<td>
|
||||
<div class="row">
|
||||
<div class="col-12 mb-2">
|
||||
|
@ -134,7 +148,7 @@ btn-primary
|
|||
<div class="col-12 mb-2">
|
||||
{% if len(r.Labels) > 0 %} <b>Labels:</b>{% endif %}
|
||||
{% for k, v := range r.Labels %}
|
||||
<span class="ms-1 badge bg-primary">{%s k %}={%s v %}</span>
|
||||
<span class="ms-1 badge bg-primary label">{%s k %}={%s v %}</span>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% if r.LastError != "" %}
|
||||
|
@ -170,11 +184,25 @@ btn-primary
|
|||
{%code prefix := utils.Prefix(r.URL.Path) %}
|
||||
{%= tpl.Header(r, navItems, "Alerts", getLastConfigError()) %}
|
||||
{% if len(groupAlerts) > 0 %}
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
<div class="btn-toolbar mb-3" role="toolbar">
|
||||
<div>
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
</div>
|
||||
<div class="col-md-4 col-lg-5">
|
||||
<div class="px-3 input-group">
|
||||
<div class="input-group-prepend">
|
||||
<span class="input-group-text">
|
||||
<svg fill="#000000" height="25px" width="20px" version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 490.4 490.4" xml:space="preserve"><g id="SVGRepo_bgCarrier" stroke-width="0"></g><g id="SVGRepo_tracerCarrier" stroke-linecap="round" stroke-linejoin="round"></g><g id="SVGRepo_iconCarrier"> <g> <path d="M484.1,454.796l-110.5-110.6c29.8-36.3,47.6-82.8,47.6-133.4c0-116.3-94.3-210.6-210.6-210.6S0,94.496,0,210.796 s94.3,210.6,210.6,210.6c50.8,0,97.4-18,133.8-48l110.5,110.5c12.9,11.8,25,4.2,29.2,0C492.5,475.596,492.5,463.096,484.1,454.796z M41.1,210.796c0-93.6,75.9-169.5,169.5-169.5s169.6,75.9,169.6,169.5s-75.9,169.5-169.5,169.5S41.1,304.396,41.1,210.796z"></path> </g> </g></svg>
|
||||
</span>
|
||||
</div>
|
||||
<input id="search" placeholder="Filter by group, rule or labels" type="text" class="form-control"/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% for _, ga := range groupAlerts %}
|
||||
{%code g := ga.Group %}
|
||||
<div class="group-heading alert-danger" data-bs-target="rules-{%s g.ID %}">
|
||||
<div class="group-heading alert-danger" data-bs-target="rules-{%s g.ID %}" data-group-name="{%s g.Name %}">
|
||||
<span class="anchor" id="group-{%s g.ID %}"></span>
|
||||
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %}</a>
|
||||
<span class="badge bg-danger" title="Number of active alerts">{%d len(ga.Alerts) %}</span>
|
||||
|
@ -192,7 +220,7 @@ btn-primary
|
|||
}
|
||||
sort.Strings(keys)
|
||||
%}
|
||||
<div class="collapse" id="rules-{%s g.ID %}">
|
||||
<div class="collapse rule-table" id="rules-{%s g.ID %}">
|
||||
{% for _, ruleID := range keys %}
|
||||
{%code
|
||||
defaultAR := alertsByRule[ruleID][0]
|
||||
|
@ -203,45 +231,46 @@ btn-primary
|
|||
sort.Strings(labelKeys)
|
||||
%}
|
||||
<br>
|
||||
<b>alert:</b> {%s defaultAR.Name %} ({%d len(alertsByRule[ruleID]) %})
|
||||
| <span><a target="_blank" href="{%s defaultAR.SourceLink %}">Source</a></span>
|
||||
<br>
|
||||
<b>expr:</b><code><pre>{%s defaultAR.Expression %}</pre></code>
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Labels</th>
|
||||
<th scope="col">State</th>
|
||||
<th scope="col">Active at</th>
|
||||
<th scope="col">Value</th>
|
||||
<th scope="col">Link</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for _, ar := range alertsByRule[ruleID] %}
|
||||
<tr>
|
||||
<td>
|
||||
{% for _, k := range labelKeys %}
|
||||
<span class="ms-1 badge bg-primary">{%s k %}={%s ar.Labels[k] %}</span>
|
||||
{% endfor %}
|
||||
</td>
|
||||
<td>{%= badgeState(ar.State) %}</td>
|
||||
<td>
|
||||
{%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}
|
||||
{% if ar.Restored %}{%= badgeRestored() %}{% endif %}
|
||||
{% if ar.Stabilizing %}{%= badgeStabilizing() %}{% endif %}
|
||||
</td>
|
||||
<td>{%s ar.Value %}</td>
|
||||
<td>
|
||||
<a href="{%s prefix+ar.WebLink() %}">Details</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
<div class="rule" data-rule-name="{%s defaultAR.Name %}" data-bs-target="{%s g.ID %}">
|
||||
<b>alert:</b> {%s defaultAR.Name %} ({%d len(alertsByRule[ruleID]) %})
|
||||
| <span><a target="_blank" href="{%s defaultAR.SourceLink %}">Source</a></span>
|
||||
<br>
|
||||
<b>expr:</b><code><pre>{%s defaultAR.Expression %}</pre></code>
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Labels</th>
|
||||
<th scope="col">State</th>
|
||||
<th scope="col">Active at</th>
|
||||
<th scope="col">Value</th>
|
||||
<th scope="col">Link</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for _, ar := range alertsByRule[ruleID] %}
|
||||
<tr>
|
||||
<td>
|
||||
{% for _, k := range labelKeys %}
|
||||
<span class="ms-1 badge bg-primary label">{%s k %}={%s ar.Labels[k] %}</span>
|
||||
{% endfor %}
|
||||
</td>
|
||||
<td>{%= badgeState(ar.State) %}</td>
|
||||
<td>
|
||||
{%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}
|
||||
{% if ar.Restored %}{%= badgeRestored() %}{% endif %}
|
||||
{% if ar.Stabilizing %}{%= badgeStabilizing() %}{% endif %}
|
||||
</td>
|
||||
<td>{%s ar.Value %}</td>
|
||||
<td>
|
||||
<a href="{%s prefix+ar.WebLink() %}">Details</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<br>
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,15 +2,17 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -36,7 +38,11 @@ var (
|
|||
defaultRetryStatusCodes = flagutil.NewArrayInt("retryStatusCodes", 0, "Comma-separated list of default HTTP response status codes when vmauth re-tries the request on other backends. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html#load-balancing for details")
|
||||
defaultLoadBalancingPolicy = flag.String("loadBalancingPolicy", "least_loaded", "The default load balancing policy to use for backend urls specified inside url_prefix section. "+
|
||||
"Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/vmauth.html#load-balancing for more details")
|
||||
"Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/vmauth.html#load-balancing")
|
||||
discoverBackendIPsGlobal = flag.Bool("discoverBackendIPs", false, "Whether to discover backend IPs via periodic DNS queries to hostnames specified in url_prefix. "+
|
||||
"This may be useful when url_prefix points to a hostname with dynamically scaled instances behind it. See https://docs.victoriametrics.com/vmauth.html#discovering-backend-ips")
|
||||
discoverBackendIPsInterval = flag.Duration("discoverBackendIPsInterval", 10*time.Second, "The interval for re-discovering backend IPs if -discoverBackendIPs command-line flag is set. "+
|
||||
"Too low value may lead to DNS errors")
|
||||
)
|
||||
|
||||
// AuthConfig represents auth config.
|
||||
|
@ -50,11 +56,14 @@ type AuthConfig struct {
|
|||
|
||||
// UserInfo is user information read from authConfigPath
|
||||
type UserInfo struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
BearerToken string `yaml:"bearer_token,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
||||
BearerToken string `yaml:"bearer_token,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
|
||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||
DiscoverBackendIPs *bool `yaml:"discover_backend_ips,omitempty"`
|
||||
URLMaps []URLMap `yaml:"url_map,omitempty"`
|
||||
HeadersConf HeadersConf `yaml:",inline"`
|
||||
MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"`
|
||||
|
@ -109,6 +118,8 @@ func (ui *UserInfo) getMaxConcurrentRequests() int {
|
|||
type Header struct {
|
||||
Name string
|
||||
Value string
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshals h from f.
|
||||
|
@ -117,6 +128,8 @@ func (h *Header) UnmarshalYAML(f func(interface{}) error) error {
|
|||
if err := f(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
h.sOriginal = s
|
||||
|
||||
n := strings.IndexByte(s, ':')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing speparator char ':' between Name and Value in the header %q; expected format - 'Name: Value'", s)
|
||||
|
@ -128,21 +141,29 @@ func (h *Header) UnmarshalYAML(f func(interface{}) error) error {
|
|||
|
||||
// MarshalYAML marshals h to yaml.
|
||||
func (h *Header) MarshalYAML() (interface{}, error) {
|
||||
s := fmt.Sprintf("%s: %s", h.Name, h.Value)
|
||||
return s, nil
|
||||
return h.sOriginal, nil
|
||||
}
|
||||
|
||||
// URLMap is a mapping from source paths to target urls.
|
||||
type URLMap struct {
|
||||
// SrcHosts is the list of regular expressions, which match the request hostname.
|
||||
// SrcPaths is an optional list of regular expressions, which must match the request path.
|
||||
SrcPaths []*Regex `yaml:"src_paths,omitempty"`
|
||||
|
||||
// SrcHosts is an optional list of regular expressions, which must match the request hostname.
|
||||
SrcHosts []*Regex `yaml:"src_hosts,omitempty"`
|
||||
|
||||
// SrcPaths is the list of regular expressions, which match the request path.
|
||||
SrcPaths []*Regex `yaml:"src_paths,omitempty"`
|
||||
// SrcQueryArgs is an optional list of query args, which must match request URL query args.
|
||||
SrcQueryArgs []QueryArg `yaml:"src_query_args,omitempty"`
|
||||
|
||||
// SrcHeaders is an optional list of headers, which must match request headers.
|
||||
SrcHeaders []Header `yaml:"src_headers,omitempty"`
|
||||
|
||||
// UrlPrefix contains backend url prefixes for the proxied request url.
|
||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||
|
||||
// DiscoverBackendIPs instructs discovering URLPrefix backend IPs via DNS.
|
||||
DiscoverBackendIPs *bool `yaml:"discover_backend_ips,omitempty"`
|
||||
|
||||
// HeadersConf is the config for augumenting request and response headers.
|
||||
HeadersConf HeadersConf `yaml:",inline"`
|
||||
|
||||
|
@ -158,25 +179,70 @@ type URLMap struct {
|
|||
|
||||
// Regex represents a regex
|
||||
type Regex struct {
|
||||
re *regexp.Regexp
|
||||
|
||||
sOriginal string
|
||||
re *regexp.Regexp
|
||||
}
|
||||
|
||||
// QueryArg represents HTTP query arg
|
||||
type QueryArg struct {
|
||||
Name string
|
||||
Value string
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshals up from yaml.
|
||||
func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error {
|
||||
var s string
|
||||
if err := f(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
qa.sOriginal = s
|
||||
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n >= 0 {
|
||||
qa.Name = s[:n]
|
||||
qa.Value = s[n+1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML marshals up to yaml.
|
||||
func (qa *QueryArg) MarshalYAML() (interface{}, error) {
|
||||
return qa.sOriginal, nil
|
||||
}
|
||||
|
||||
// URLPrefix represents passed `url_prefix`
|
||||
type URLPrefix struct {
|
||||
n uint32
|
||||
|
||||
// the list of backend urls
|
||||
bus []*backendURL
|
||||
|
||||
// requests are re-tried on other backend urls for these http response status codes
|
||||
retryStatusCodes []int
|
||||
|
||||
// load balancing policy used
|
||||
loadBalancingPolicy string
|
||||
|
||||
// how many request path prefix parts to drop before routing the request to backendURL.
|
||||
// how many request path prefix parts to drop before routing the request to backendURL
|
||||
dropSrcPathPrefixParts int
|
||||
|
||||
// busOriginal contains the original list of backends specified in yaml config.
|
||||
busOriginal []*url.URL
|
||||
|
||||
// n is an atomic counter, which is used for balancing load among available backends.
|
||||
n atomic.Uint32
|
||||
|
||||
// the list of backend urls
|
||||
//
|
||||
// the list can be dynamically updated if `discover_backend_ips` option is set.
|
||||
bus atomic.Pointer[[]*backendURL]
|
||||
|
||||
// if this option is set, then backend ips for busOriginal are periodically re-discovered and put to bus.
|
||||
discoverBackendIPs bool
|
||||
|
||||
// The next deadline for DNS-based discovery of backend IPs
|
||||
nextDiscoveryDeadline atomic.Uint64
|
||||
|
||||
// vOriginal contains the original yaml value for URLPrefix.
|
||||
vOriginal interface{}
|
||||
}
|
||||
|
||||
func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error {
|
||||
|
@ -192,49 +258,146 @@ func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error {
|
|||
}
|
||||
|
||||
type backendURL struct {
|
||||
brokenDeadline uint64
|
||||
concurrentRequests int32
|
||||
url *url.URL
|
||||
brokenDeadline atomic.Uint64
|
||||
concurrentRequests atomic.Int32
|
||||
|
||||
url *url.URL
|
||||
}
|
||||
|
||||
func (bu *backendURL) isBroken() bool {
|
||||
ct := fasttime.UnixTimestamp()
|
||||
return ct < atomic.LoadUint64(&bu.brokenDeadline)
|
||||
return ct < bu.brokenDeadline.Load()
|
||||
}
|
||||
|
||||
func (bu *backendURL) setBroken() {
|
||||
deadline := fasttime.UnixTimestamp() + uint64((*failTimeout).Seconds())
|
||||
atomic.StoreUint64(&bu.brokenDeadline, deadline)
|
||||
bu.brokenDeadline.Store(deadline)
|
||||
}
|
||||
|
||||
func (bu *backendURL) get() {
|
||||
atomic.AddInt32(&bu.concurrentRequests, 1)
|
||||
bu.concurrentRequests.Add(1)
|
||||
}
|
||||
|
||||
func (bu *backendURL) put() {
|
||||
atomic.AddInt32(&bu.concurrentRequests, -1)
|
||||
bu.concurrentRequests.Add(-1)
|
||||
}
|
||||
|
||||
func (up *URLPrefix) getBackendsCount() int {
|
||||
return len(up.bus)
|
||||
pbus := up.bus.Load()
|
||||
return len(*pbus)
|
||||
}
|
||||
|
||||
// getBackendURL returns the backendURL depending on the load balance policy.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func (up *URLPrefix) getBackendURL() *backendURL {
|
||||
up.discoverBackendIPsIfNeeded()
|
||||
|
||||
pbus := up.bus.Load()
|
||||
bus := *pbus
|
||||
if up.loadBalancingPolicy == "first_available" {
|
||||
return up.getFirstAvailableBackendURL()
|
||||
return getFirstAvailableBackendURL(bus)
|
||||
}
|
||||
return up.getLeastLoadedBackendURL()
|
||||
return getLeastLoadedBackendURL(bus, &up.n)
|
||||
}
|
||||
|
||||
func (up *URLPrefix) discoverBackendIPsIfNeeded() {
|
||||
if !up.discoverBackendIPs {
|
||||
// The discovery is disabled.
|
||||
return
|
||||
}
|
||||
|
||||
ct := fasttime.UnixTimestamp()
|
||||
deadline := up.nextDiscoveryDeadline.Load()
|
||||
if ct < deadline {
|
||||
// There is no need in discovering backends.
|
||||
return
|
||||
}
|
||||
|
||||
intervalSec := math.Ceil(discoverBackendIPsInterval.Seconds())
|
||||
if intervalSec <= 0 {
|
||||
intervalSec = 1
|
||||
}
|
||||
nextDeadline := ct + uint64(intervalSec)
|
||||
if !up.nextDiscoveryDeadline.CompareAndSwap(deadline, nextDeadline) {
|
||||
// Concurrent goroutine already started the discovery.
|
||||
return
|
||||
}
|
||||
|
||||
// Discover ips for all the backendURLs
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(intervalSec))
|
||||
hostToIPs := make(map[string][]string)
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
if hostToIPs[host] != nil {
|
||||
// ips for the given host have been already discovered
|
||||
continue
|
||||
}
|
||||
addrs, err := resolver.LookupIPAddr(ctx, host)
|
||||
var ips []string
|
||||
if err != nil {
|
||||
logger.Warnf("cannot discover backend IPs for %s: %s; use it literally", bu, err)
|
||||
ips = []string{host}
|
||||
} else {
|
||||
ips = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
ips[i] = addr.String()
|
||||
}
|
||||
// sort ips, so they could be compared below in areEqualBackendURLs()
|
||||
sort.Strings(ips)
|
||||
}
|
||||
hostToIPs[host] = ips
|
||||
}
|
||||
cancel()
|
||||
|
||||
// generate new backendURLs for the resolved IPs
|
||||
var busNew []*backendURL
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
port := bu.Port()
|
||||
for _, ip := range hostToIPs[host] {
|
||||
buCopy := *bu
|
||||
buCopy.Host = ip
|
||||
if port != "" {
|
||||
buCopy.Host += ":" + port
|
||||
}
|
||||
busNew = append(busNew, &backendURL{
|
||||
url: &buCopy,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pbus := up.bus.Load()
|
||||
if areEqualBackendURLs(*pbus, busNew) {
|
||||
return
|
||||
}
|
||||
|
||||
// Store new backend urls
|
||||
up.bus.Store(&busNew)
|
||||
}
|
||||
|
||||
func areEqualBackendURLs(a, b []*backendURL) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, aURL := range a {
|
||||
bURL := b[i]
|
||||
if aURL.url.String() != bURL.url.String() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var resolver = &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: true,
|
||||
}
|
||||
|
||||
// getFirstAvailableBackendURL returns the first available backendURL, which isn't broken.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func (up *URLPrefix) getFirstAvailableBackendURL() *backendURL {
|
||||
bus := up.bus
|
||||
|
||||
func getFirstAvailableBackendURL(bus []*backendURL) *backendURL {
|
||||
bu := bus[0]
|
||||
if !bu.isBroken() {
|
||||
// Fast path - send the request to the first url.
|
||||
|
@ -256,8 +419,7 @@ func (up *URLPrefix) getFirstAvailableBackendURL() *backendURL {
|
|||
// getLeastLoadedBackendURL returns the backendURL with the minimum number of concurrent requests.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func (up *URLPrefix) getLeastLoadedBackendURL() *backendURL {
|
||||
bus := up.bus
|
||||
func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *backendURL {
|
||||
if len(bus) == 1 {
|
||||
// Fast path - return the only backend url.
|
||||
bu := bus[0]
|
||||
|
@ -266,7 +428,7 @@ func (up *URLPrefix) getLeastLoadedBackendURL() *backendURL {
|
|||
}
|
||||
|
||||
// Slow path - select other backend urls.
|
||||
n := atomic.AddUint32(&up.n, 1)
|
||||
n := atomicCounter.Add(1)
|
||||
|
||||
for i := uint32(0); i < uint32(len(bus)); i++ {
|
||||
idx := (n + i) % uint32(len(bus))
|
||||
|
@ -274,22 +436,22 @@ func (up *URLPrefix) getLeastLoadedBackendURL() *backendURL {
|
|||
if bu.isBroken() {
|
||||
continue
|
||||
}
|
||||
if atomic.LoadInt32(&bu.concurrentRequests) == 0 {
|
||||
if bu.concurrentRequests.Load() == 0 {
|
||||
// Fast path - return the backend with zero concurrently executed requests.
|
||||
// Do not use atomic.CompareAndSwapInt32(), since it is much slower on systems with many CPU cores.
|
||||
atomic.AddInt32(&bu.concurrentRequests, 1)
|
||||
// Do not use CompareAndSwap() instead of Load(), since it is much slower on systems with many CPU cores.
|
||||
bu.concurrentRequests.Add(1)
|
||||
return bu
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path - return the backend with the minimum number of concurrently executed requests.
|
||||
buMin := bus[n%uint32(len(bus))]
|
||||
minRequests := atomic.LoadInt32(&buMin.concurrentRequests)
|
||||
minRequests := buMin.concurrentRequests.Load()
|
||||
for _, bu := range bus {
|
||||
if bu.isBroken() {
|
||||
continue
|
||||
}
|
||||
if n := atomic.LoadInt32(&bu.concurrentRequests); n < minRequests {
|
||||
if n := bu.concurrentRequests.Load(); n < minRequests {
|
||||
buMin = bu
|
||||
minRequests = n
|
||||
}
|
||||
|
@ -304,6 +466,7 @@ func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error {
|
|||
if err := f(&v); err != nil {
|
||||
return err
|
||||
}
|
||||
up.vOriginal = v
|
||||
|
||||
var urls []string
|
||||
switch x := v.(type) {
|
||||
|
@ -326,38 +489,21 @@ func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error {
|
|||
return fmt.Errorf("unexpected type for `url_prefix`: %T; want string or []string", v)
|
||||
}
|
||||
|
||||
bus := make([]*backendURL, len(urls))
|
||||
bus := make([]*url.URL, len(urls))
|
||||
for i, u := range urls {
|
||||
pu, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal %q into url: %w", u, err)
|
||||
}
|
||||
bus[i] = &backendURL{
|
||||
url: pu,
|
||||
}
|
||||
bus[i] = pu
|
||||
}
|
||||
up.bus = bus
|
||||
up.busOriginal = bus
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML marshals up to yaml.
|
||||
func (up *URLPrefix) MarshalYAML() (interface{}, error) {
|
||||
var b []byte
|
||||
if len(up.bus) == 1 {
|
||||
u := up.bus[0].url.String()
|
||||
b = strconv.AppendQuote(b, u)
|
||||
return string(b), nil
|
||||
}
|
||||
b = append(b, '[')
|
||||
for i, bu := range up.bus {
|
||||
u := bu.url.String()
|
||||
b = strconv.AppendQuote(b, u)
|
||||
if i+1 < len(up.bus) {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, ']')
|
||||
return string(b), nil
|
||||
return up.vOriginal, nil
|
||||
}
|
||||
|
||||
func (r *Regex) match(s string) bool {
|
||||
|
@ -378,12 +524,13 @@ func (r *Regex) UnmarshalYAML(f func(interface{}) error) error {
|
|||
if err := f(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
r.sOriginal = s
|
||||
|
||||
sAnchored := "^(?:" + s + ")$"
|
||||
re, err := regexp.Compile(sAnchored)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot build regexp from %q: %w", s, err)
|
||||
}
|
||||
r.sOriginal = s
|
||||
r.re = re
|
||||
return nil
|
||||
}
|
||||
|
@ -581,17 +728,9 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
|||
byAuthToken := make(map[string]*UserInfo, len(uis))
|
||||
for i := range uis {
|
||||
ui := &uis[i]
|
||||
if ui.Username != "" && ui.Password == "" {
|
||||
// Do not allow setting username without password if there are other auth configs exist.
|
||||
// This should prevent from typical mis-configuration when access by username without password
|
||||
// remains open if other authorization schemes are defined.
|
||||
if ui.BearerToken != "" {
|
||||
return nil, fmt.Errorf("bearer_token=%q and username=%q cannot be set simultaneously", ui.BearerToken, ui.Username)
|
||||
}
|
||||
}
|
||||
ats := getAuthTokens(ui.BearerToken, ui.Username, ui.Password)
|
||||
if len(ats) == 0 {
|
||||
return nil, fmt.Errorf("one of bearer_token, username or mtls must be set")
|
||||
ats, err := getAuthTokens(ui.BearerToken, ui.Username, ui.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, at := range ats {
|
||||
if uiOld := byAuthToken[at]; uiOld != nil {
|
||||
|
@ -599,15 +738,10 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
|||
at, ui.Username, ui.Name, uiOld.Username, uiOld.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if err := ui.initURLs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ui.BearerToken != "" && ui.Password != "" {
|
||||
return nil, fmt.Errorf("password shouldn't be set for bearer_token %q", ui.BearerToken)
|
||||
}
|
||||
|
||||
metricLabels, err := ui.getMetricLabels()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse metric_labels: %w", err)
|
||||
|
@ -665,8 +799,9 @@ func (ui *UserInfo) initURLs() error {
|
|||
retryStatusCodes := defaultRetryStatusCodes.Values()
|
||||
loadBalancingPolicy := *defaultLoadBalancingPolicy
|
||||
dropSrcPathPrefixParts := 0
|
||||
discoverBackendIPs := *discoverBackendIPsGlobal
|
||||
if ui.URLPrefix != nil {
|
||||
if err := ui.URLPrefix.sanitize(); err != nil {
|
||||
if err := ui.URLPrefix.sanitizeAndInitialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
if ui.RetryStatusCodes != nil {
|
||||
|
@ -678,30 +813,35 @@ func (ui *UserInfo) initURLs() error {
|
|||
if ui.DropSrcPathPrefixParts != nil {
|
||||
dropSrcPathPrefixParts = *ui.DropSrcPathPrefixParts
|
||||
}
|
||||
if ui.DiscoverBackendIPs != nil {
|
||||
discoverBackendIPs = *ui.DiscoverBackendIPs
|
||||
}
|
||||
ui.URLPrefix.retryStatusCodes = retryStatusCodes
|
||||
ui.URLPrefix.dropSrcPathPrefixParts = dropSrcPathPrefixParts
|
||||
ui.URLPrefix.discoverBackendIPs = discoverBackendIPs
|
||||
if err := ui.URLPrefix.setLoadBalancingPolicy(loadBalancingPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if ui.DefaultURL != nil {
|
||||
if err := ui.DefaultURL.sanitize(); err != nil {
|
||||
if err := ui.DefaultURL.sanitizeAndInitialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, e := range ui.URLMaps {
|
||||
if len(e.SrcPaths) == 0 && len(e.SrcHosts) == 0 {
|
||||
return fmt.Errorf("missing `src_paths` and `src_hosts` in `url_map`")
|
||||
if len(e.SrcPaths) == 0 && len(e.SrcHosts) == 0 && len(e.SrcQueryArgs) == 0 && len(e.SrcHeaders) == 0 {
|
||||
return fmt.Errorf("missing `src_paths`, `src_hosts`, `src_query_args` and `src_headers` in `url_map`")
|
||||
}
|
||||
if e.URLPrefix == nil {
|
||||
return fmt.Errorf("missing `url_prefix` in `url_map`")
|
||||
}
|
||||
if err := e.URLPrefix.sanitize(); err != nil {
|
||||
if err := e.URLPrefix.sanitizeAndInitialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
rscs := retryStatusCodes
|
||||
lbp := loadBalancingPolicy
|
||||
dsp := dropSrcPathPrefixParts
|
||||
dbd := discoverBackendIPs
|
||||
if e.RetryStatusCodes != nil {
|
||||
rscs = e.RetryStatusCodes
|
||||
}
|
||||
|
@ -711,14 +851,18 @@ func (ui *UserInfo) initURLs() error {
|
|||
if e.DropSrcPathPrefixParts != nil {
|
||||
dsp = *e.DropSrcPathPrefixParts
|
||||
}
|
||||
if e.DiscoverBackendIPs != nil {
|
||||
dbd = *e.DiscoverBackendIPs
|
||||
}
|
||||
e.URLPrefix.retryStatusCodes = rscs
|
||||
if err := e.URLPrefix.setLoadBalancingPolicy(lbp); err != nil {
|
||||
return err
|
||||
}
|
||||
e.URLPrefix.dropSrcPathPrefixParts = dsp
|
||||
e.URLPrefix.discoverBackendIPs = dbd
|
||||
}
|
||||
if len(ui.URLMaps) == 0 && ui.URLPrefix == nil {
|
||||
return fmt.Errorf("missing `url_prefix`")
|
||||
return fmt.Errorf("missing `url_prefix` or `url_map`")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -737,18 +881,21 @@ func (ui *UserInfo) name() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func getAuthTokens(bearerToken, username, password string) []string {
|
||||
var ats []string
|
||||
func getAuthTokens(bearerToken, username, password string) ([]string, error) {
|
||||
if bearerToken != "" {
|
||||
if username != "" || password != "" {
|
||||
return nil, fmt.Errorf("username and password cannot be specified if bearer_token is set")
|
||||
}
|
||||
// Accept the bearerToken as Basic Auth username with empty password
|
||||
at1 := getHTTPAuthBearerToken(bearerToken)
|
||||
at2 := getHTTPAuthBasicToken(bearerToken, "")
|
||||
ats = append(ats, at1, at2)
|
||||
} else if username != "" {
|
||||
at := getHTTPAuthBasicToken(username, password)
|
||||
ats = append(ats, at)
|
||||
return []string{at1, at2}, nil
|
||||
}
|
||||
return ats
|
||||
if username != "" {
|
||||
at := getHTTPAuthBasicToken(username, password)
|
||||
return []string{at}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("missing authorization options; bearer_token or username must be set")
|
||||
}
|
||||
|
||||
func getHTTPAuthBearerToken(bearerToken string) string {
|
||||
|
@ -764,28 +911,38 @@ func getHTTPAuthBasicToken(username, password string) string {
|
|||
func getAuthTokensFromRequest(r *http.Request) []string {
|
||||
var ats []string
|
||||
|
||||
ah := r.Header.Get("Authorization")
|
||||
if ah == "" {
|
||||
return ats
|
||||
// Obtain possible auth tokens from Authorization header
|
||||
if ah := r.Header.Get("Authorization"); ah != "" {
|
||||
if strings.HasPrefix(ah, "Token ") {
|
||||
// Handle InfluxDB's proprietary token authentication scheme as a bearer token authentication
|
||||
// See https://docs.influxdata.com/influxdb/v2.0/api/
|
||||
ah = strings.Replace(ah, "Token", "Bearer", 1)
|
||||
}
|
||||
at := "http_auth:" + ah
|
||||
ats = append(ats, at)
|
||||
}
|
||||
if strings.HasPrefix(ah, "Token ") {
|
||||
// Handle InfluxDB's proprietary token authentication scheme as a bearer token authentication
|
||||
// See https://docs.influxdata.com/influxdb/v2.0/api/
|
||||
ah = strings.Replace(ah, "Token", "Bearer", 1)
|
||||
}
|
||||
at := "http_auth:" + ah
|
||||
ats = append(ats, at)
|
||||
|
||||
return ats
|
||||
}
|
||||
|
||||
func (up *URLPrefix) sanitize() error {
|
||||
for _, bu := range up.bus {
|
||||
puNew, err := sanitizeURLPrefix(bu.url)
|
||||
func (up *URLPrefix) sanitizeAndInitialize() error {
|
||||
for i, bu := range up.busOriginal {
|
||||
puNew, err := sanitizeURLPrefix(bu)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bu.url = puNew
|
||||
up.busOriginal[i] = puNew
|
||||
}
|
||||
|
||||
// Initialize up.bus
|
||||
bus := make([]*backendURL, len(up.busOriginal))
|
||||
for i, bu := range up.busOriginal {
|
||||
bus[i] = &backendURL{
|
||||
url: bu,
|
||||
}
|
||||
}
|
||||
up.bus.Store(&bus)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -17,9 +17,9 @@ func TestParseAuthConfigFailure(t *testing.T) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = parseAuthConfigUsers(ac)
|
||||
users, err := parseAuthConfigUsers(ac)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
t.Fatalf("expecting non-nil error; got %v", users)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ users:
|
|||
- url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// Invalid regexp in src_path.
|
||||
// Invalid regexp in src_paths
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
|
@ -210,6 +210,24 @@ users:
|
|||
url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// Invalid src_query_args
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
url_map:
|
||||
- src_query_args: abc
|
||||
url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// Invalid src_headers
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
url_map:
|
||||
- src_headers: abc
|
||||
url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// Invalid headers in url_map (missing ':')
|
||||
f(`
|
||||
users:
|
||||
|
@ -310,7 +328,7 @@ users:
|
|||
- username: foo
|
||||
url_prefix: http://foo
|
||||
- username: bar
|
||||
url_prefix: https://bar/x///
|
||||
url_prefix: https://bar/x/
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthBasicToken("foo", ""): {
|
||||
Username: "foo",
|
||||
|
@ -318,11 +336,52 @@ users:
|
|||
},
|
||||
getHTTPAuthBasicToken("bar", ""): {
|
||||
Username: "bar",
|
||||
URLPrefix: mustParseURL("https://bar/x"),
|
||||
URLPrefix: mustParseURL("https://bar/x/"),
|
||||
},
|
||||
})
|
||||
|
||||
// non-empty URLMap
|
||||
sharedUserInfo := &UserInfo{
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}),
|
||||
URLPrefix: mustParseURL("http://vmselect/select/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"foo\\.bar", "baz:1234"}),
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
SrcQueryArgs: []QueryArg{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
},
|
||||
SrcHeaders: []Header{
|
||||
{
|
||||
Name: "TenantID",
|
||||
Value: "345",
|
||||
},
|
||||
},
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
f(`
|
||||
users:
|
||||
- bearer_token: foo
|
||||
|
@ -331,70 +390,17 @@ users:
|
|||
url_prefix: http://vmselect/select/0/prometheus
|
||||
- src_paths: ["/api/v1/write"]
|
||||
src_hosts: ["foo\\.bar", "baz:1234"]
|
||||
src_query_args: ['foo=bar']
|
||||
src_headers: ['TenantID: 345']
|
||||
url_prefix: ["http://vminsert1/insert/0/prometheus","http://vminsert2/insert/0/prometheus"]
|
||||
headers:
|
||||
- "foo: bar"
|
||||
- "xxx: y"
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthBearerToken("foo"): {
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}),
|
||||
URLPrefix: mustParseURL("http://vmselect/select/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"foo\\.bar", "baz:1234"}),
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
getHTTPAuthBasicToken("foo", ""): {
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}),
|
||||
URLPrefix: mustParseURL("http://vmselect/select/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"foo\\.bar", "baz:1234"}),
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
getHTTPAuthBearerToken("foo"): sharedUserInfo,
|
||||
getHTTPAuthBasicToken("foo", ""): sharedUserInfo,
|
||||
})
|
||||
|
||||
// Multiple users with the same name - this should work, since these users have different passwords
|
||||
f(`
|
||||
users:
|
||||
|
@ -403,7 +409,7 @@ users:
|
|||
url_prefix: http://foo
|
||||
- username: foo-same
|
||||
password: bar
|
||||
url_prefix: https://bar/x///
|
||||
url_prefix: https://bar/x
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthBasicToken("foo-same", "baz"): {
|
||||
Username: "foo-same",
|
||||
|
@ -498,6 +504,7 @@ users:
|
|||
}),
|
||||
},
|
||||
})
|
||||
|
||||
// With metric_labels
|
||||
f(`
|
||||
users:
|
||||
|
@ -509,7 +516,7 @@ users:
|
|||
team: dev
|
||||
- username: foo-same
|
||||
password: bar
|
||||
url_prefix: https://bar/x///
|
||||
url_prefix: https://bar/x
|
||||
metric_labels:
|
||||
backend_env: test
|
||||
team: accounting
|
||||
|
@ -694,6 +701,7 @@ func mustParseURL(u string) *URLPrefix {
|
|||
|
||||
func mustParseURLs(us []string) *URLPrefix {
|
||||
bus := make([]*backendURL, len(us))
|
||||
urls := make([]*url.URL, len(us))
|
||||
for i, u := range us {
|
||||
pu, err := url.Parse(u)
|
||||
if err != nil {
|
||||
|
@ -702,10 +710,17 @@ func mustParseURLs(us []string) *URLPrefix {
|
|||
bus[i] = &backendURL{
|
||||
url: pu,
|
||||
}
|
||||
urls[i] = pu
|
||||
}
|
||||
return &URLPrefix{
|
||||
bus: bus,
|
||||
up := &URLPrefix{}
|
||||
if len(us) == 1 {
|
||||
up.vOriginal = us[0]
|
||||
} else {
|
||||
up.vOriginal = us
|
||||
}
|
||||
up.bus.Store(&bus)
|
||||
up.busOriginal = urls
|
||||
return up
|
||||
}
|
||||
|
||||
func intp(n int) *int {
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -183,7 +184,7 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
|||
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
u := normalizeURL(r.URL)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, r.Header)
|
||||
isDefault := false
|
||||
if up == nil {
|
||||
if ui.DefaultURL == nil {
|
||||
|
@ -238,7 +239,14 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
|||
// This code has been copied from net/http/httputil/reverseproxy.go
|
||||
req := sanitizeRequestHeaders(r)
|
||||
req.URL = targetURL
|
||||
req.Host = targetURL.Host
|
||||
|
||||
if req.URL.Scheme == "https" {
|
||||
// Override req.Host only for https requests, since https server verifies hostnames during TLS handshake,
|
||||
// so it expects the targetURL.Host in the request.
|
||||
// There is no need in overriding the req.Host for http requests, since it is expected that backend server
|
||||
// may properly process queries with the original req.Host.
|
||||
req.Host = targetURL.Host
|
||||
}
|
||||
updateHeadersByConfig(req.Header, hc.RequestHeaders)
|
||||
res, err := ui.httpTransport.RoundTrip(req)
|
||||
rtb, rtbOK := req.Body.(*readTrackingBody)
|
||||
|
@ -271,7 +279,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
|||
logger.Warnf("remoteAddr: %s; requestURI: %s; retrying the request to %s because of response error: %s", remoteAddr, req.URL, targetURL, err)
|
||||
return false
|
||||
}
|
||||
if hasInt(retryStatusCodes, res.StatusCode) {
|
||||
if slices.Contains(retryStatusCodes, res.StatusCode) {
|
||||
_ = res.Body.Close()
|
||||
if !rtbOK || !rtb.canRetry() {
|
||||
// If we get an error from the retry_status_codes list, but cannot execute retry,
|
||||
|
@ -313,15 +321,6 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
|||
return true
|
||||
}
|
||||
|
||||
func hasInt(a []int, n int) bool {
|
||||
for _, x := range a {
|
||||
if x == n {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var copyBufPool bytesutil.ByteBufferPool
|
||||
|
||||
func copyHeader(dst, src http.Header) {
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -49,11 +51,22 @@ func dropPrefixParts(path string, parts int) string {
|
|||
return path
|
||||
}
|
||||
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL) (*URLPrefix, HeadersConf) {
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL, h http.Header) (*URLPrefix, HeadersConf) {
|
||||
for _, e := range ui.URLMaps {
|
||||
if matchAnyRegex(e.SrcHosts, u.Host) && matchAnyRegex(e.SrcPaths, u.Path) {
|
||||
return e.URLPrefix, e.HeadersConf
|
||||
if !matchAnyRegex(e.SrcHosts, u.Host) {
|
||||
continue
|
||||
}
|
||||
if !matchAnyRegex(e.SrcPaths, u.Path) {
|
||||
continue
|
||||
}
|
||||
if !matchAnyQueryArg(e.SrcQueryArgs, u.Query()) {
|
||||
continue
|
||||
}
|
||||
if !matchAnyHeader(e.SrcHeaders, h) {
|
||||
continue
|
||||
}
|
||||
|
||||
return e.URLPrefix, e.HeadersConf
|
||||
}
|
||||
if ui.URLPrefix != nil {
|
||||
return ui.URLPrefix, ui.HeadersConf
|
||||
|
@ -73,6 +86,30 @@ func matchAnyRegex(rs []*Regex, s string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func matchAnyQueryArg(qas []QueryArg, args url.Values) bool {
|
||||
if len(qas) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, qa := range qas {
|
||||
if slices.Contains(args[qa.Name], qa.Value) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchAnyHeader(headers []Header, h http.Header) bool {
|
||||
if len(headers) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, header := range headers {
|
||||
if slices.Contains(h.Values(header.Name), header.Value) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func normalizeURL(uOrig *url.URL) *url.URL {
|
||||
u := *uOrig
|
||||
// Prevent from attacks with using `..` in r.URL.Path
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -89,19 +90,21 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, nil)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot determie backend: %s", err)
|
||||
}
|
||||
bu := up.getLeastLoadedBackendURL()
|
||||
bu := up.getBackendURL()
|
||||
target := mergeURLs(bu.url, u, up.dropSrcPathPrefixParts)
|
||||
bu.put()
|
||||
if target.String() != expectedTarget {
|
||||
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
|
||||
}
|
||||
headersStr := fmt.Sprintf("%q", hc.RequestHeaders)
|
||||
if headersStr != expectedRequestHeaders {
|
||||
t.Fatalf("unexpected request headers; got %s; want %s", headersStr, expectedRequestHeaders)
|
||||
if s := headersToString(hc.RequestHeaders); s != expectedRequestHeaders {
|
||||
t.Fatalf("unexpected request headers; got %q; want %q", s, expectedRequestHeaders)
|
||||
}
|
||||
if s := headersToString(hc.ResponseHeaders); s != expectedResponseHeaders {
|
||||
t.Fatalf("unexpected response headers; got %q; want %q", s, expectedResponseHeaders)
|
||||
}
|
||||
if !reflect.DeepEqual(up.retryStatusCodes, expectedRetryStatusCodes) {
|
||||
t.Fatalf("unexpected retryStatusCodes; got %d; want %d", up.retryStatusCodes, expectedRetryStatusCodes)
|
||||
|
@ -116,41 +119,55 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
// Simple routing with `url_prefix`
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "", "http://foo.bar/.", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "", "http://foo.bar/.", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{{
|
||||
Name: "bb",
|
||||
Value: "aaa",
|
||||
}},
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "bb",
|
||||
Value: "aaa",
|
||||
},
|
||||
},
|
||||
ResponseHeaders: []Header{
|
||||
{
|
||||
Name: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{503, 501},
|
||||
LoadBalancingPolicy: "first_available",
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
}, "/a/b/c", "http://foo.bar/c", `[{"bb" "aaa"}]`, `[]`, []int{503, 501}, "first_available", 2)
|
||||
}, "/a/b/c", "http://foo.bar/c", `bb: aaa`, `x: y`, []int{503, 501}, "first_available", 2)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/federate"),
|
||||
}, "/", "http://foo.bar/federate", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/", "http://foo.bar/federate", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||
}, "/z", "https://sss:3894/x/y/z", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/z", "https://sss:3894/x/y/z", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||
}, "/../../aaa", "https://sss:3894/x/y/aaa", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/../../aaa", "https://sss:3894/x/y/aaa", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "", "", nil, "least_loaded", 0)
|
||||
|
||||
// Complex routing with `url_map`
|
||||
ui := &UserInfo{
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"host42"}),
|
||||
SrcPaths: getRegexs([]string{"/vmsingle/api/v1/query"}),
|
||||
SrcHosts: getRegexs([]string{"host42"}),
|
||||
SrcPaths: getRegexs([]string{"/vmsingle/api/v1/query"}),
|
||||
SrcQueryArgs: []QueryArg{
|
||||
{
|
||||
Name: "db",
|
||||
Value: "foo",
|
||||
},
|
||||
},
|
||||
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
|
@ -195,12 +212,12 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
RetryStatusCodes: []int{502},
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
}
|
||||
f(ui, "http://host42/vmsingle/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up",
|
||||
`[{"xx" "aa"} {"yy" "asdf"}]`, `[{"qwe" "rty"}]`, []int{503, 500, 501}, "first_available", 1)
|
||||
f(ui, "http://host42/vmsingle/api/v1/query?query=up&db=foo", "http://vmselect/0/prometheus/api/v1/query?db=foo&query=up",
|
||||
"xx: aa\nyy: asdf", "qwe: rty", []int{503, 500, 501}, "first_available", 1)
|
||||
f(ui, "http://host123/vmsingle/api/v1/query?query=up", "http://default-server/v1/query?query=up",
|
||||
`[{"bb" "aaa"}]`, `[{"x" "y"}]`, []int{502}, "least_loaded", 2)
|
||||
f(ui, "https://foo-host/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", []int{}, "least_loaded", 0)
|
||||
f(ui, "https://foo-host/foo/bar/api/v1/query_range", "http://default-server/api/v1/query_range", `[{"bb" "aaa"}]`, `[{"x" "y"}]`, []int{502}, "least_loaded", 2)
|
||||
"bb: aaa", "x: y", []int{502}, "least_loaded", 2)
|
||||
f(ui, "https://foo-host/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "", "", []int{}, "least_loaded", 0)
|
||||
f(ui, "https://foo-host/foo/bar/api/v1/query_range", "http://default-server/api/v1/query_range", "bb: aaa", "x: y", []int{502}, "least_loaded", 2)
|
||||
|
||||
// Complex routing regexp paths in `url_map`
|
||||
ui = &UserInfo{
|
||||
|
@ -220,19 +237,19 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
},
|
||||
URLPrefix: mustParseURL("http://default-server"),
|
||||
}
|
||||
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "https://vmui.foobar.com/a/b?c=d", "http://vmui.host:1234/vmui/a/b?c=d", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "https://vmui.foobar.com/a/b?c=d", "http://vmui.host:1234/vmui/a/b?c=d", "", "", nil, "least_loaded", 0)
|
||||
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=dev"),
|
||||
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"),
|
||||
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "", "", nil, "least_loaded", 0)
|
||||
}
|
||||
|
||||
func TestCreateTargetURLFailure(t *testing.T) {
|
||||
|
@ -243,7 +260,7 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
|||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, nil)
|
||||
if up != nil {
|
||||
t.Fatalf("unexpected non-empty up=%#v", up)
|
||||
}
|
||||
|
@ -264,3 +281,11 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
|||
},
|
||||
}, "/api/v1/write")
|
||||
}
|
||||
|
||||
func headersToString(hs []Header) string {
|
||||
a := make([]string, len(hs))
|
||||
for i, h := range hs {
|
||||
a[i] = fmt.Sprintf("%s: %s", h.Name, h.Value)
|
||||
}
|
||||
return strings.Join(a, "\n")
|
||||
}
|
||||
|
|
|
@ -40,6 +40,11 @@ const (
|
|||
vmSignificantFigures = "vm-significant-figures"
|
||||
vmRoundDigits = "vm-round-digits"
|
||||
vmDisableProgressBar = "vm-disable-progress-bar"
|
||||
vmCertFile = "vm-cert-file"
|
||||
vmKeyFile = "vm-key-file"
|
||||
vmCAFile = "vm-CA-file"
|
||||
vmServerName = "vm-server-name"
|
||||
vmInsecureSkipVerify = "vm-insecure-skip-verify"
|
||||
|
||||
// also used in vm-native
|
||||
vmExtraLabel = "vm-extra-label"
|
||||
|
@ -119,19 +124,45 @@ var (
|
|||
Name: vmDisableProgressBar,
|
||||
Usage: "Whether to disable progress bar per each worker during the import.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to '--vmAddr'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to '--vmAddr'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to '--vmAddr'. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmServerName,
|
||||
Usage: "Optional TLS server name to use for connections to '--vmAddr'. By default, the server name from '--vmAddr' is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmInsecureSkipVerify,
|
||||
Usage: "Whether to skip tls verification when connecting to '--vmAddr'",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
otsdbAddr = "otsdb-addr"
|
||||
otsdbConcurrency = "otsdb-concurrency"
|
||||
otsdbQueryLimit = "otsdb-query-limit"
|
||||
otsdbOffsetDays = "otsdb-offset-days"
|
||||
otsdbHardTSStart = "otsdb-hard-ts-start"
|
||||
otsdbRetentions = "otsdb-retentions"
|
||||
otsdbFilters = "otsdb-filters"
|
||||
otsdbNormalize = "otsdb-normalize"
|
||||
otsdbMsecsTime = "otsdb-msecstime"
|
||||
otsdbAddr = "otsdb-addr"
|
||||
otsdbConcurrency = "otsdb-concurrency"
|
||||
otsdbQueryLimit = "otsdb-query-limit"
|
||||
otsdbOffsetDays = "otsdb-offset-days"
|
||||
otsdbHardTSStart = "otsdb-hard-ts-start"
|
||||
otsdbRetentions = "otsdb-retentions"
|
||||
otsdbFilters = "otsdb-filters"
|
||||
otsdbNormalize = "otsdb-normalize"
|
||||
otsdbMsecsTime = "otsdb-msecstime"
|
||||
otsdbCertFile = "otsdb-cert-file"
|
||||
otsdbKeyFile = "otsdb-key-file"
|
||||
otsdbCAFile = "otsdb-CA-file"
|
||||
otsdbServerName = "otsdb-server-name"
|
||||
otsdbInsecureSkipVerify = "otsdb-insecure-skip-verify"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -191,6 +222,27 @@ var (
|
|||
Value: false,
|
||||
Usage: "Whether to normalize all data received to lower case before forwarding to VictoriaMetrics",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to -otsdb-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to -otsdb-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to -otsdb-addr. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbServerName,
|
||||
Usage: "Optional TLS server name to use for connections to -otsdb-addr. By default, the server name from -otsdb-addr is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: otsdbInsecureSkipVerify,
|
||||
Usage: "Whether to skip tls verification when connecting to -otsdb-addr",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -361,6 +413,10 @@ const (
|
|||
vmNativeSrcPassword = "vm-native-src-password"
|
||||
vmNativeSrcHeaders = "vm-native-src-headers"
|
||||
vmNativeSrcBearerToken = "vm-native-src-bearer-token"
|
||||
vmNativeSrcCertFile = "vm-native-src-cert-file"
|
||||
vmNativeSrcKeyFile = "vm-native-src-key-file"
|
||||
vmNativeSrcCAFile = "vm-native-src-ca-file"
|
||||
vmNativeSrcServerName = "vm-native-src-server-name"
|
||||
vmNativeSrcInsecureSkipVerify = "vm-native-src-insecure-skip-verify"
|
||||
|
||||
vmNativeDstAddr = "vm-native-dst-addr"
|
||||
|
@ -368,6 +424,10 @@ const (
|
|||
vmNativeDstPassword = "vm-native-dst-password"
|
||||
vmNativeDstHeaders = "vm-native-dst-headers"
|
||||
vmNativeDstBearerToken = "vm-native-dst-bearer-token"
|
||||
vmNativeDstCertFile = "vm-native-dst-cert-file"
|
||||
vmNativeDstKeyFile = "vm-native-dst-key-file"
|
||||
vmNativeDstCAFile = "vm-native-dst-ca-file"
|
||||
vmNativeDstServerName = "vm-native-dst-server-name"
|
||||
vmNativeDstInsecureSkipVerify = "vm-native-dst-insecure-skip-verify"
|
||||
)
|
||||
|
||||
|
@ -432,6 +492,28 @@ var (
|
|||
Name: vmNativeSrcBearerToken,
|
||||
Usage: "Optional bearer auth token to use for the corresponding `--vm-native-src-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to `--vm-native-src-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to `--vm-native-src-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to `--vm-native-src-addr`. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcServerName,
|
||||
Usage: "Optional TLS server name to use for connections to `--vm-native-src-addr`. By default, the server name from `--vm-native-src-addr` is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeSrcInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to `--vm-native-src-addr`",
|
||||
Value: false,
|
||||
},
|
||||
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstAddr,
|
||||
Usage: "VictoriaMetrics address to perform import to. \n" +
|
||||
|
@ -459,6 +541,28 @@ var (
|
|||
Name: vmNativeDstBearerToken,
|
||||
Usage: "Optional bearer auth token to use for the corresponding `--vm-native-dst-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to `--vm-native-dst-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to `--vm-native-dst-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to `--vm-native-dst-addr`. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstServerName,
|
||||
Usage: "Optional TLS server name to use for connections to `--vm-native-dst-addr`. By default, the server name from `--vm-native-dst-addr` is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeDstInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to `--vm-native-dst-addr`",
|
||||
Value: false,
|
||||
},
|
||||
|
||||
&cli.StringSliceFlag{
|
||||
Name: vmExtraLabel,
|
||||
Value: nil,
|
||||
|
@ -494,16 +598,6 @@ var (
|
|||
"Non-binary export/import API is less efficient, but supports deduplication if it is configured on vm-native-src-addr side.",
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeSrcInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to the source address",
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeDstInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to the destination address",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
@ -50,8 +49,20 @@ func main() {
|
|||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("OpenTSDB import mode")
|
||||
|
||||
// create Transport with given TLS config
|
||||
certFile := c.String(otsdbCertFile)
|
||||
keyFile := c.String(otsdbKeyFile)
|
||||
caFile := c.String(otsdbCAFile)
|
||||
serverName := c.String(otsdbServerName)
|
||||
insecureSkipVerify := c.Bool(otsdbInsecureSkipVerify)
|
||||
addr := c.String(otsdbAddr)
|
||||
|
||||
tr, err := httputils.Transport(addr, certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Transport: %s", err)
|
||||
}
|
||||
oCfg := opentsdb.Config{
|
||||
Addr: c.String(otsdbAddr),
|
||||
Addr: addr,
|
||||
Limit: c.Int(otsdbQueryLimit),
|
||||
Offset: c.Int64(otsdbOffsetDays),
|
||||
HardTS: c.Int64(otsdbHardTSStart),
|
||||
|
@ -59,13 +70,17 @@ func main() {
|
|||
Filters: c.StringSlice(otsdbFilters),
|
||||
Normalize: c.Bool(otsdbNormalize),
|
||||
MsecsTime: c.Bool(otsdbMsecsTime),
|
||||
Transport: tr,
|
||||
}
|
||||
otsdbClient, err := opentsdb.NewClient(oCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create opentsdb client: %s", err)
|
||||
}
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
// disable progress bars since openTSDB implementation
|
||||
// does not use progress bar pool
|
||||
vmCfg.DisableProgressBar = true
|
||||
|
@ -92,7 +107,7 @@ func main() {
|
|||
serverName := c.String(influxServerName)
|
||||
insecureSkipVerify := c.Bool(influxInsecureSkipVerify)
|
||||
|
||||
tc, err := httputils.TLSConfig(certFile, caFile, keyFile, serverName, insecureSkipVerify)
|
||||
tc, err := httputils.TLSConfig(certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TLS Config: %s", err)
|
||||
}
|
||||
|
@ -117,7 +132,10 @@ func main() {
|
|||
return fmt.Errorf("failed to create influx client: %s", err)
|
||||
}
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
importer, err = vm.NewImporter(ctx, vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
|
@ -140,28 +158,42 @@ func main() {
|
|||
Usage: "Migrate time series via Prometheus remote-read protocol",
|
||||
Flags: mergeFlags(globalFlags, remoteReadFlags, vmFlags),
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("Remote-read import mode")
|
||||
|
||||
addr := c.String(remoteReadSrcAddr)
|
||||
|
||||
// create TLS config
|
||||
certFile := c.String(remoteReadCertFile)
|
||||
keyFile := c.String(remoteReadKeyFile)
|
||||
caFile := c.String(remoteReadCAFile)
|
||||
serverName := c.String(remoteReadServerName)
|
||||
insecureSkipVerify := c.Bool(remoteReadInsecureSkipVerify)
|
||||
|
||||
tr, err := httputils.Transport(addr, certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create transport: %s", err)
|
||||
}
|
||||
|
||||
rr, err := remoteread.NewClient(remoteread.Config{
|
||||
Addr: c.String(remoteReadSrcAddr),
|
||||
Username: c.String(remoteReadUser),
|
||||
Password: c.String(remoteReadPassword),
|
||||
Timeout: c.Duration(remoteReadHTTPTimeout),
|
||||
UseStream: c.Bool(remoteReadUseStream),
|
||||
Headers: c.String(remoteReadHeaders),
|
||||
LabelName: c.String(remoteReadFilterLabel),
|
||||
LabelValue: c.String(remoteReadFilterLabelValue),
|
||||
CertFile: c.String(remoteReadCertFile),
|
||||
KeyFile: c.String(remoteReadKeyFile),
|
||||
CAFile: c.String(remoteReadCAFile),
|
||||
ServerName: c.String(remoteReadServerName),
|
||||
InsecureSkipVerify: c.Bool(remoteReadInsecureSkipVerify),
|
||||
DisablePathAppend: c.Bool(remoteReadDisablePathAppend),
|
||||
Addr: addr,
|
||||
Transport: tr,
|
||||
Username: c.String(remoteReadUser),
|
||||
Password: c.String(remoteReadPassword),
|
||||
Timeout: c.Duration(remoteReadHTTPTimeout),
|
||||
UseStream: c.Bool(remoteReadUseStream),
|
||||
Headers: c.String(remoteReadHeaders),
|
||||
LabelName: c.String(remoteReadFilterLabel),
|
||||
LabelValue: c.String(remoteReadFilterLabelValue),
|
||||
DisablePathAppend: c.Bool(remoteReadDisablePathAppend),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error create remote read client: %s", err)
|
||||
}
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
importer, err := vm.NewImporter(ctx, vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
|
@ -190,7 +222,10 @@ func main() {
|
|||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("Prometheus import mode")
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
importer, err = vm.NewImporter(ctx, vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
|
@ -232,7 +267,6 @@ func main() {
|
|||
|
||||
var srcExtraLabels []string
|
||||
srcAddr := strings.Trim(c.String(vmNativeSrcAddr), "/")
|
||||
srcInsecureSkipVerify := c.Bool(vmNativeSrcInsecureSkipVerify)
|
||||
srcAuthConfig, err := auth.Generate(
|
||||
auth.WithBasicAuth(c.String(vmNativeSrcUser), c.String(vmNativeSrcPassword)),
|
||||
auth.WithBearer(c.String(vmNativeSrcBearerToken)),
|
||||
|
@ -240,16 +274,26 @@ func main() {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error initilize auth config for source: %s", srcAddr)
|
||||
}
|
||||
|
||||
// create TLS config
|
||||
srcCertFile := c.String(vmNativeSrcCertFile)
|
||||
srcKeyFile := c.String(vmNativeSrcKeyFile)
|
||||
srcCAFile := c.String(vmNativeSrcCAFile)
|
||||
srcServerName := c.String(vmNativeSrcServerName)
|
||||
srcInsecureSkipVerify := c.Bool(vmNativeSrcInsecureSkipVerify)
|
||||
|
||||
srcTC, err := httputils.TLSConfig(srcCertFile, srcKeyFile, srcCAFile, srcServerName, srcInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TLS Config: %s", err)
|
||||
}
|
||||
|
||||
srcHTTPClient := &http.Client{Transport: &http.Transport{
|
||||
DisableKeepAlives: disableKeepAlive,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: srcInsecureSkipVerify,
|
||||
},
|
||||
TLSClientConfig: srcTC,
|
||||
}}
|
||||
|
||||
dstAddr := strings.Trim(c.String(vmNativeDstAddr), "/")
|
||||
dstExtraLabels := c.StringSlice(vmExtraLabel)
|
||||
dstInsecureSkipVerify := c.Bool(vmNativeDstInsecureSkipVerify)
|
||||
dstAuthConfig, err := auth.Generate(
|
||||
auth.WithBasicAuth(c.String(vmNativeDstUser), c.String(vmNativeDstPassword)),
|
||||
auth.WithBearer(c.String(vmNativeDstBearerToken)),
|
||||
|
@ -257,11 +301,22 @@ func main() {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error initilize auth config for destination: %s", dstAddr)
|
||||
}
|
||||
|
||||
// create TLS config
|
||||
dstCertFile := c.String(vmNativeDstCertFile)
|
||||
dstKeyFile := c.String(vmNativeDstKeyFile)
|
||||
dstCAFile := c.String(vmNativeDstCAFile)
|
||||
dstServerName := c.String(vmNativeDstServerName)
|
||||
dstInsecureSkipVerify := c.Bool(vmNativeDstInsecureSkipVerify)
|
||||
|
||||
dstTC, err := httputils.TLSConfig(dstCertFile, dstKeyFile, dstCAFile, dstServerName, dstInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TLS Config: %s", err)
|
||||
}
|
||||
|
||||
dstHTTPClient := &http.Client{Transport: &http.Transport{
|
||||
DisableKeepAlives: disableKeepAlive,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: dstInsecureSkipVerify,
|
||||
},
|
||||
TLSClientConfig: dstTC,
|
||||
}}
|
||||
|
||||
p := vmNativeProcessor{
|
||||
|
@ -317,14 +372,14 @@ func main() {
|
|||
if err != nil {
|
||||
return cli.Exit(fmt.Errorf("cannot open exported block at path=%q err=%w", blockPath, err), 1)
|
||||
}
|
||||
var blocksCount uint64
|
||||
var blocksCount atomic.Uint64
|
||||
if err := stream.Parse(f, isBlockGzipped, func(block *stream.Block) error {
|
||||
atomic.AddUint64(&blocksCount, 1)
|
||||
blocksCount.Add(1)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return cli.Exit(fmt.Errorf("cannot parse block at path=%q, blocksCount=%d, err=%w", blockPath, blocksCount, err), 1)
|
||||
return cli.Exit(fmt.Errorf("cannot parse block at path=%q, blocksCount=%d, err=%w", blockPath, blocksCount.Load(), err), 1)
|
||||
}
|
||||
log.Printf("successfully verified block at path=%q, blockCount=%d", blockPath, blocksCount)
|
||||
log.Printf("successfully verified block at path=%q, blockCount=%d", blockPath, blocksCount.Load())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
@ -349,9 +404,24 @@ func main() {
|
|||
log.Printf("Total time: %v", time.Since(start))
|
||||
}
|
||||
|
||||
func initConfigVM(c *cli.Context) vm.Config {
|
||||
func initConfigVM(c *cli.Context) (vm.Config, error) {
|
||||
addr := c.String(vmAddr)
|
||||
|
||||
// create Transport with given TLS config
|
||||
certFile := c.String(vmCertFile)
|
||||
keyFile := c.String(vmKeyFile)
|
||||
caFile := c.String(vmCAFile)
|
||||
serverName := c.String(vmServerName)
|
||||
insecureSkipVerify := c.Bool(vmInsecureSkipVerify)
|
||||
|
||||
tr, err := httputils.Transport(addr, certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return vm.Config{}, fmt.Errorf("failed to create Transport: %s", err)
|
||||
}
|
||||
|
||||
return vm.Config{
|
||||
Addr: c.String(vmAddr),
|
||||
Addr: addr,
|
||||
Transport: tr,
|
||||
User: c.String(vmUser),
|
||||
Password: c.String(vmPassword),
|
||||
Concurrency: uint8(c.Int(vmConcurrency)),
|
||||
|
@ -363,5 +433,5 @@ func initConfigVM(c *cli.Context) vm.Config {
|
|||
ExtraLabels: c.StringSlice(vmExtraLabel),
|
||||
RateLimit: c.Int64(vmRateLimit),
|
||||
DisableProgressBar: c.Bool(vmDisableProgressBar),
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -47,6 +47,8 @@ type Client struct {
|
|||
Normalize bool
|
||||
HardTS int64
|
||||
MsecsTime bool
|
||||
|
||||
c *http.Client
|
||||
}
|
||||
|
||||
// Config contains fields required
|
||||
|
@ -60,6 +62,7 @@ type Config struct {
|
|||
Filters []string
|
||||
Normalize bool
|
||||
MsecsTime bool
|
||||
Transport *http.Transport
|
||||
}
|
||||
|
||||
// TimeRange contains data about time ranges to query
|
||||
|
@ -107,7 +110,8 @@ type Metric struct {
|
|||
// FindMetrics discovers all metrics that OpenTSDB knows about (given a filter)
|
||||
// e.g. /api/suggest?type=metrics&q=system&max=100000
|
||||
func (c Client) FindMetrics(q string) ([]string, error) {
|
||||
resp, err := http.Get(q)
|
||||
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
||||
}
|
||||
|
@ -131,7 +135,7 @@ func (c Client) FindMetrics(q string) ([]string, error) {
|
|||
// e.g. /api/search/lookup?m=system.load5&limit=1000000
|
||||
func (c Client) FindSeries(metric string) ([]Meta, error) {
|
||||
q := fmt.Sprintf("%s/api/search/lookup?m=%s&limit=%d", c.Addr, metric, c.Limit)
|
||||
resp, err := http.Get(q)
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set GET request to %q: %s", q, err)
|
||||
}
|
||||
|
@ -184,7 +188,7 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64, m
|
|||
series.Metric, tagStr)
|
||||
|
||||
q := fmt.Sprintf("%s/api/query?%s", c.Addr, queryStr)
|
||||
resp, err := http.Get(q)
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return Metric{}, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
||||
}
|
||||
|
@ -325,6 +329,7 @@ func NewClient(cfg Config) (*Client, error) {
|
|||
Normalize: cfg.Normalize,
|
||||
HardTS: cfg.HardTS,
|
||||
MsecsTime: cfg.MsecsTime,
|
||||
c: &http.Client{Transport: cfg.Transport},
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -61,7 +62,8 @@ func TestRemoteRead(t *testing.T) {
|
|||
{
|
||||
name: "step month on month time range",
|
||||
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
|
||||
vmCfg: vm.Config{Addr: "", Concurrency: 1, DisableProgressBar: true},
|
||||
vmCfg: vm.Config{Addr: "", Concurrency: 1, DisableProgressBar: true,
|
||||
Transport: http.DefaultTransport.(*http.Transport)},
|
||||
start: "2022-09-26T11:23:05+02:00",
|
||||
end: "2022-11-26T11:24:05+02:00",
|
||||
numOfSamples: 2,
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
|
@ -46,6 +45,8 @@ type Client struct {
|
|||
type Config struct {
|
||||
// Addr of remote storage
|
||||
Addr string
|
||||
// Transport allows specifying custom http.Transport
|
||||
Transport *http.Transport
|
||||
// DisablePathAppend disable automatic appending of the remote read path
|
||||
DisablePathAppend bool
|
||||
// Timeout defines timeout for HTTP requests
|
||||
|
@ -64,15 +65,6 @@ type Config struct {
|
|||
// LabelName, LabelValue stands for label=~value pair used for read requests.
|
||||
// Is optional.
|
||||
LabelName, LabelValue string
|
||||
|
||||
// Optional cert file, key file, CA file and server name for client side TLS configuration
|
||||
CertFile string
|
||||
KeyFile string
|
||||
CAFile string
|
||||
ServerName string
|
||||
|
||||
// TLSSkipVerify defines whether to skip TLS certificate verification when connecting to the remote read address.
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
||||
// Filter defines a list of filters applied to requested data
|
||||
|
@ -110,16 +102,13 @@ func NewClient(cfg Config) (*Client, error) {
|
|||
}
|
||||
}
|
||||
|
||||
tr, err := httputils.Transport(cfg.Addr, cfg.CertFile, cfg.KeyFile, cfg.CAFile, cfg.ServerName, cfg.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
||||
client := &http.Client{Timeout: cfg.Timeout}
|
||||
if cfg.Transport != nil {
|
||||
client.Transport = cfg.Transport
|
||||
}
|
||||
|
||||
c := &Client{
|
||||
c: &http.Client{
|
||||
Timeout: cfg.Timeout,
|
||||
Transport: tr,
|
||||
},
|
||||
c: client,
|
||||
addr: strings.TrimSuffix(cfg.Addr, "/"),
|
||||
disablePathAppend: cfg.DisablePathAppend,
|
||||
user: cfg.Username,
|
||||
|
@ -182,7 +171,7 @@ func (c *Client) fetch(ctx context.Context, data []byte, streamCb StreamCallback
|
|||
if c.disablePathAppend {
|
||||
u = c.addr
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPost, u, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new HTTP request: %w", err)
|
||||
}
|
||||
|
@ -195,7 +184,7 @@ func (c *Client) fetch(ctx context.Context, data []byte, streamCb StreamCallback
|
|||
}
|
||||
req.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0")
|
||||
|
||||
resp, err := c.do(req.WithContext(ctx))
|
||||
resp, err := c.do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while sending request to %s: %w; Data len %d(%d)",
|
||||
req.URL.Redacted(), err, len(data), r.Size())
|
||||
|
|
|
@ -14,11 +14,10 @@ const (
|
|||
)
|
||||
|
||||
func parseTime(s string) (time.Time, error) {
|
||||
secs, err := promutils.ParseTime(s)
|
||||
msecs, err := promutils.ParseTimeMsec(s)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("cannot parse %s: %w", s, err)
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs < minTimeMsecs {
|
||||
msecs = 0
|
||||
}
|
||||
|
|
|
@ -26,6 +26,8 @@ type Config struct {
|
|||
// --httpListenAddr value for single node version
|
||||
// --httpListenAddr value of vmselect component for cluster version
|
||||
Addr string
|
||||
// Transport allows specifying custom http.Transport
|
||||
Transport *http.Transport
|
||||
// Concurrency defines number of worker
|
||||
// performing the import requests concurrently
|
||||
Concurrency uint8
|
||||
|
@ -62,6 +64,7 @@ type Config struct {
|
|||
// see https://docs.victoriametrics.com/#how-to-import-time-series-data
|
||||
type Importer struct {
|
||||
addr string
|
||||
client *http.Client
|
||||
importPath string
|
||||
compress bool
|
||||
user string
|
||||
|
@ -128,8 +131,14 @@ func NewImporter(ctx context.Context, cfg Config) (*Importer, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
if cfg.Transport != nil {
|
||||
client.Transport = cfg.Transport
|
||||
}
|
||||
|
||||
im := &Importer{
|
||||
addr: addr,
|
||||
client: client,
|
||||
importPath: importPath,
|
||||
compress: cfg.Compress,
|
||||
user: cfg.User,
|
||||
|
@ -291,7 +300,7 @@ func (im *Importer) Ping() error {
|
|||
if im.user != "" {
|
||||
req.SetBasicAuth(im.user, im.password)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
resp, err := im.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -321,7 +330,7 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
|||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- do(req)
|
||||
errCh <- im.do(req)
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
|
@ -375,8 +384,8 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
|||
// ErrBadRequest represents bad request error.
|
||||
var ErrBadRequest = errors.New("bad request")
|
||||
|
||||
func do(req *http.Request) error {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
func (im *Importer) do(req *http.Request) error {
|
||||
resp, err := im.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error when performing request: %s", err)
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ func (ctx *InsertCtx) ApplyRelabeling() {
|
|||
// FlushBufs flushes buffered rows to the underlying storage.
|
||||
func (ctx *InsertCtx) FlushBufs() error {
|
||||
sas := sasGlobal.Load()
|
||||
if sas != nil && !ctx.skipStreamAggr {
|
||||
if (sas != nil || deduplicator != nil) && !ctx.skipStreamAggr {
|
||||
matchIdxs := matchIdxsPool.Get()
|
||||
matchIdxs.B = ctx.streamAggrCtx.push(ctx.mrs, matchIdxs.B)
|
||||
if !*streamAggrKeepInput {
|
||||
|
|
|
@ -9,10 +9,10 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -28,8 +28,10 @@ var (
|
|||
streamAggrDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop all the input samples after the aggregation with -streamAggr.config. "+
|
||||
"By default, only aggregated samples are dropped, while the remaining samples are stored in the database. "+
|
||||
"See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
streamAggrDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before being aggregated. "+
|
||||
"Only the last sample per each time series per each interval is aggregated if the interval is greater than zero")
|
||||
streamAggrDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation with -streamAggr.config . "+
|
||||
"See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels")
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -41,7 +43,8 @@ var (
|
|||
saCfgSuccess = metrics.NewGauge(`vminsert_streamagg_config_last_reload_successful`, nil)
|
||||
saCfgTimestamp = metrics.NewCounter(`vminsert_streamagg_config_last_reload_success_timestamp_seconds`)
|
||||
|
||||
sasGlobal atomic.Pointer[streamaggr.Aggregators]
|
||||
sasGlobal atomic.Pointer[streamaggr.Aggregators]
|
||||
deduplicator *streamaggr.Deduplicator
|
||||
)
|
||||
|
||||
// CheckStreamAggrConfig checks config pointed by -stramaggr.config
|
||||
|
@ -50,7 +53,10 @@ func CheckStreamAggrConfig() error {
|
|||
return nil
|
||||
}
|
||||
pushNoop := func(tss []prompbmarshal.TimeSeries) {}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushNoop, *streamAggrDedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushNoop, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when loading -streamAggr.config=%q: %w", *streamAggrConfig, err)
|
||||
}
|
||||
|
@ -65,15 +71,23 @@ func InitStreamAggr() {
|
|||
saCfgReloaderStopCh = make(chan struct{})
|
||||
|
||||
if *streamAggrConfig == "" {
|
||||
if *streamAggrDedupInterval > 0 {
|
||||
deduplicator = streamaggr.NewDeduplicator(pushAggregateSeries, *streamAggrDedupInterval, *streamAggrDropInputLabels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
sighupCh := procutil.NewSighupChan()
|
||||
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, *streamAggrDedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, opts)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot load -streamAggr.config=%q: %s", *streamAggrConfig, err)
|
||||
}
|
||||
|
||||
sasGlobal.Store(sas)
|
||||
saCfgSuccess.Set(1)
|
||||
saCfgTimestamp.Set(fasttime.UnixTimestamp())
|
||||
|
@ -97,7 +111,10 @@ func reloadStreamAggrConfig() {
|
|||
logger.Infof("reloading -streamAggr.config=%q", *streamAggrConfig)
|
||||
saCfgReloads.Inc()
|
||||
|
||||
sasNew, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, *streamAggrDedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
}
|
||||
sasNew, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, opts)
|
||||
if err != nil {
|
||||
saCfgSuccess.Set(0)
|
||||
saCfgReloadErr.Inc()
|
||||
|
@ -124,61 +141,101 @@ func MustStopStreamAggr() {
|
|||
|
||||
sas := sasGlobal.Swap(nil)
|
||||
sas.MustStop()
|
||||
|
||||
if deduplicator != nil {
|
||||
deduplicator.MustStop()
|
||||
deduplicator = nil
|
||||
}
|
||||
}
|
||||
|
||||
type streamAggrCtx struct {
|
||||
mn storage.MetricName
|
||||
tss [1]prompbmarshal.TimeSeries
|
||||
mn storage.MetricName
|
||||
tss []prompbmarshal.TimeSeries
|
||||
labels []prompbmarshal.Label
|
||||
samples []prompbmarshal.Sample
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (ctx *streamAggrCtx) Reset() {
|
||||
ctx.mn.Reset()
|
||||
ts := &ctx.tss[0]
|
||||
promrelabel.CleanLabels(ts.Labels)
|
||||
|
||||
clear(ctx.tss)
|
||||
ctx.tss = ctx.tss[:0]
|
||||
|
||||
clear(ctx.labels)
|
||||
ctx.labels = ctx.labels[:0]
|
||||
|
||||
ctx.samples = ctx.samples[:0]
|
||||
ctx.buf = ctx.buf[:0]
|
||||
}
|
||||
|
||||
func (ctx *streamAggrCtx) push(mrs []storage.MetricRow, matchIdxs []byte) []byte {
|
||||
matchIdxs = bytesutil.ResizeNoCopyMayOverallocate(matchIdxs, len(mrs))
|
||||
for i := 0; i < len(matchIdxs); i++ {
|
||||
matchIdxs[i] = 0
|
||||
}
|
||||
|
||||
mn := &ctx.mn
|
||||
tss := ctx.tss[:]
|
||||
ts := &tss[0]
|
||||
labels := ts.Labels
|
||||
samples := ts.Samples
|
||||
sas := sasGlobal.Load()
|
||||
var matchIdxsLocal []byte
|
||||
for idx, mr := range mrs {
|
||||
tss := ctx.tss
|
||||
labels := ctx.labels
|
||||
samples := ctx.samples
|
||||
buf := ctx.buf
|
||||
|
||||
tssLen := len(tss)
|
||||
for _, mr := range mrs {
|
||||
if err := mn.UnmarshalRaw(mr.MetricNameRaw); err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal recently marshaled MetricName: %s", err)
|
||||
}
|
||||
|
||||
labels = append(labels[:0], prompbmarshal.Label{
|
||||
labelsLen := len(labels)
|
||||
|
||||
bufLen := len(buf)
|
||||
buf = append(buf, mn.MetricGroup...)
|
||||
metricGroup := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: bytesutil.ToUnsafeString(mn.MetricGroup),
|
||||
Value: metricGroup,
|
||||
})
|
||||
|
||||
for _, tag := range mn.Tags {
|
||||
bufLen = len(buf)
|
||||
buf = append(buf, tag.Key...)
|
||||
name := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
bufLen = len(buf)
|
||||
buf = append(buf, tag.Value...)
|
||||
value := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: bytesutil.ToUnsafeString(tag.Key),
|
||||
Value: bytesutil.ToUnsafeString(tag.Value),
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
samples = append(samples[:0], prompbmarshal.Sample{
|
||||
samplesLen := len(samples)
|
||||
samples = append(samples, prompbmarshal.Sample{
|
||||
Timestamp: mr.Timestamp,
|
||||
Value: mr.Value,
|
||||
})
|
||||
|
||||
ts.Labels = labels
|
||||
ts.Samples = samples
|
||||
|
||||
matchIdxsLocal = sas.Push(tss, matchIdxsLocal)
|
||||
if matchIdxsLocal[0] != 0 {
|
||||
matchIdxs[idx] = 1
|
||||
}
|
||||
tss = append(tss, prompbmarshal.TimeSeries{
|
||||
Labels: labels[labelsLen:],
|
||||
Samples: samples[samplesLen:],
|
||||
})
|
||||
}
|
||||
ctx.tss = tss
|
||||
ctx.labels = labels
|
||||
ctx.samples = samples
|
||||
ctx.buf = buf
|
||||
|
||||
tss = tss[tssLen:]
|
||||
|
||||
sas := sasGlobal.Load()
|
||||
if sas != nil {
|
||||
matchIdxs = sas.Push(tss, matchIdxs)
|
||||
} else if deduplicator != nil {
|
||||
matchIdxs = bytesutil.ResizeNoCopyMayOverallocate(matchIdxs, len(tss))
|
||||
for i := range matchIdxs {
|
||||
matchIdxs[i] = 1
|
||||
}
|
||||
deduplicator.Push(tss)
|
||||
}
|
||||
|
||||
ctx.Reset()
|
||||
|
||||
return matchIdxs
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -217,7 +216,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
addInfluxResponseHeaders(w)
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "/opentelemetry/api/v1/push":
|
||||
case "/opentelemetry/api/v1/push", "/opentelemetry/v1/metrics":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
|
@ -354,7 +353,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/ready":
|
||||
if rdy := atomic.LoadInt32(&promscrape.PendingScrapeConfigs); rdy > 0 {
|
||||
if rdy := promscrape.PendingScrapeConfigs.Load(); rdy > 0 {
|
||||
errMsg := fmt.Sprintf("waiting for scrape config to init targets, configs left: %d", rdy)
|
||||
http.Error(w, errMsg, http.StatusTooEarly)
|
||||
} else {
|
||||
|
@ -412,8 +411,8 @@ var (
|
|||
datadogIntakeRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
|
||||
datadogMetadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
|
||||
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vm_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vm_http_requests_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
|
||||
newrelicWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
newrelicWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
|
@ -435,12 +434,12 @@ var (
|
|||
promscrapeConfigReloadRequests = metrics.NewCounter(`vm_http_requests_total{path="/-/reload"}`)
|
||||
|
||||
_ = metrics.NewGauge(`vm_metrics_with_dropped_labels_total`, func() float64 {
|
||||
return float64(atomic.LoadUint64(&storage.MetricsWithDroppedLabels))
|
||||
return float64(storage.MetricsWithDroppedLabels.Load())
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_too_long_label_names_total`, func() float64 {
|
||||
return float64(atomic.LoadUint64(&storage.TooLongLabelNames))
|
||||
return float64(storage.TooLongLabelNames.Load())
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_too_long_label_values_total`, func() float64 {
|
||||
return float64(atomic.LoadUint64(&storage.TooLongLabelValues))
|
||||
return float64(storage.TooLongLabelValues.Load())
|
||||
})
|
||||
)
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -24,10 +25,15 @@ func InsertHandler(req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
var processBody func([]byte) ([]byte, error)
|
||||
if req.Header.Get("Content-Type") == "application/json" {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
if req.Header.Get("X-Amz-Firehose-Protocol-Version") != "" {
|
||||
processBody = firehose.ProcessRequestBody
|
||||
} else {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
}
|
||||
}
|
||||
return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return stream.ParseStream(req.Body, isGzipped, processBody, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return insertRows(tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -3842,18 +3842,18 @@ func nextSeriesConcurrentWrapper(nextSeries nextSeriesFunc, f func(s *series) (*
|
|||
errCh <- err
|
||||
close(errCh)
|
||||
}()
|
||||
var skipProcessing uint32
|
||||
var skipProcessing atomic.Bool
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for s := range seriesCh {
|
||||
if atomic.LoadUint32(&skipProcessing) != 0 {
|
||||
if skipProcessing.Load() {
|
||||
continue
|
||||
}
|
||||
sNew, err := f(s)
|
||||
if err != nil {
|
||||
// Drain the rest of series and do not call f for them in order to conserve CPU time.
|
||||
atomic.StoreUint32(&skipProcessing, 1)
|
||||
skipProcessing.Store(true)
|
||||
resultCh <- &result{
|
||||
err: err,
|
||||
}
|
||||
|
@ -5609,9 +5609,9 @@ func (nsf *nextSeriesFunc) peekStep(step int64) (int64, error) {
|
|||
if s != nil {
|
||||
step = s.step
|
||||
}
|
||||
calls := uint64(0)
|
||||
var calls atomic.Uint64
|
||||
*nsf = func() (*series, error) {
|
||||
if atomic.AddUint64(&calls, 1) == 1 {
|
||||
if calls.Add(1) == 1 {
|
||||
return s, nil
|
||||
}
|
||||
return nextSeries()
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -24,11 +23,14 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 100e3, "The maximum number of tag keys returned from /api/v1/labels")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 100e3, "The maximum number of tag values returned from /api/v1/label/<label_name>/values")
|
||||
maxSamplesPerSeries = flag.Int("search.maxSamplesPerSeries", 30e6, "The maximum number of raw samples a single query can scan per each time series. This option allows limiting memory usage")
|
||||
maxSamplesPerQuery = flag.Int("search.maxSamplesPerQuery", 1e9, "The maximum number of raw samples a single query can process across all time series. This protects from heavy queries, which select unexpectedly high number of raw samples. See also -search.maxSamplesPerSeries")
|
||||
maxWorkersPerQuery = flag.Int("search.maxWorkersPerQuery", defaultMaxWorkersPerQuery, "The maximum number of CPU cores a single query can use. "+
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 100e3, "The maximum number of tag keys returned from /api/v1/labels . "+
|
||||
"See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 100e3, "The maximum number of tag values returned from /api/v1/label/<label_name>/values . "+
|
||||
"See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration")
|
||||
maxSamplesPerSeries = flag.Int("search.maxSamplesPerSeries", 30e6, "The maximum number of raw samples a single query can scan per each time series. This option allows limiting memory usage")
|
||||
maxSamplesPerQuery = flag.Int("search.maxSamplesPerQuery", 1e9, "The maximum number of raw samples a single query can process across all time series. "+
|
||||
"This protects from heavy queries, which select unexpectedly high number of raw samples. See also -search.maxSamplesPerSeries")
|
||||
maxWorkersPerQuery = flag.Int("search.maxWorkersPerQuery", defaultMaxWorkersPerQuery, "The maximum number of CPU cores a single query can use. "+
|
||||
"The default value should work good for most cases. "+
|
||||
"The flag can be set to lower values for improving performance of big number of concurrently executed queries. "+
|
||||
"The flag can be set to bigger values for improving performance of heavy queries, which scan big number of time series (>10K) and/or big number of samples (>100M). "+
|
||||
|
@ -81,7 +83,7 @@ func (rss *Results) mustClose() {
|
|||
}
|
||||
|
||||
type timeseriesWork struct {
|
||||
mustStop *uint32
|
||||
mustStop *atomic.Bool
|
||||
rss *Results
|
||||
pts *packedTimeseries
|
||||
f func(rs *Result, workerID uint) error
|
||||
|
@ -91,22 +93,22 @@ type timeseriesWork struct {
|
|||
}
|
||||
|
||||
func (tsw *timeseriesWork) do(r *Result, workerID uint) error {
|
||||
if atomic.LoadUint32(tsw.mustStop) != 0 {
|
||||
if tsw.mustStop.Load() {
|
||||
return nil
|
||||
}
|
||||
rss := tsw.rss
|
||||
if rss.deadline.Exceeded() {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
tsw.mustStop.Store(true)
|
||||
return fmt.Errorf("timeout exceeded during query execution: %s", rss.deadline.String())
|
||||
}
|
||||
if err := tsw.pts.Unpack(r, rss.tbf, rss.tr); err != nil {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
tsw.mustStop.Store(true)
|
||||
return fmt.Errorf("error during time series unpacking: %w", err)
|
||||
}
|
||||
tsw.rowsProcessed = len(r.Timestamps)
|
||||
if len(r.Timestamps) > 0 {
|
||||
if err := tsw.f(r, workerID); err != nil {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
tsw.mustStop.Store(true)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -238,7 +240,7 @@ func (rss *Results) runParallel(qt *querytracer.Tracer, f func(rs *Result, worke
|
|||
return 0, nil
|
||||
}
|
||||
|
||||
var mustStop uint32
|
||||
var mustStop atomic.Bool
|
||||
initTimeseriesWork := func(tsw *timeseriesWork, pts *packedTimeseries) {
|
||||
tsw.rss = rss
|
||||
tsw.pts = pts
|
||||
|
@ -1008,7 +1010,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
|||
var (
|
||||
errGlobal error
|
||||
errGlobalLock sync.Mutex
|
||||
mustStop uint32
|
||||
mustStop atomic.Bool
|
||||
)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(gomaxprocs)
|
||||
|
@ -1020,7 +1022,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
|||
errGlobalLock.Lock()
|
||||
if errGlobal == nil {
|
||||
errGlobal = err
|
||||
atomic.StoreUint32(&mustStop, 1)
|
||||
mustStop.Store(true)
|
||||
}
|
||||
errGlobalLock.Unlock()
|
||||
}
|
||||
|
@ -1038,7 +1040,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
|||
if deadline.Exceeded() {
|
||||
return fmt.Errorf("timeout exceeded while fetching data block #%d from storage: %s", blocksRead, deadline.String())
|
||||
}
|
||||
if atomic.LoadUint32(&mustStop) != 0 {
|
||||
if mustStop.Load() {
|
||||
break
|
||||
}
|
||||
xw := exportWorkPool.Get().(*exportWork)
|
||||
|
@ -1193,8 +1195,8 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
|
|||
if *maxSamplesPerQuery > 0 && samples > *maxSamplesPerQuery {
|
||||
putTmpBlocksFile(tbf)
|
||||
putStorageSearch(sr)
|
||||
return nil, fmt.Errorf("cannot select more than -search.maxSamplesPerQuery=%d samples; possible solutions: to increase the -search.maxSamplesPerQuery; "+
|
||||
"to reduce time range for the query; to use more specific label filters in order to select lower number of series", *maxSamplesPerQuery)
|
||||
return nil, fmt.Errorf("cannot select more than -search.maxSamplesPerQuery=%d samples; possible solutions: increase the -search.maxSamplesPerQuery; "+
|
||||
"reduce time range for the query; use more specific label filters in order to select fewer series", *maxSamplesPerQuery)
|
||||
}
|
||||
|
||||
buf = br.Marshal(buf[:0])
|
||||
|
@ -1309,9 +1311,11 @@ func canAppendToBlockRefPool(pool, a []blockRef) bool {
|
|||
// a doesn't belong to pool
|
||||
return false
|
||||
}
|
||||
shPool := (*reflect.SliceHeader)(unsafe.Pointer(&pool))
|
||||
shA := (*reflect.SliceHeader)(unsafe.Pointer(&a))
|
||||
return shPool.Data+uintptr(shPool.Len)*unsafe.Sizeof(blockRef{}) == shA.Data+uintptr(shA.Len)*unsafe.Sizeof(blockRef{})
|
||||
return getBlockRefsEnd(pool) == getBlockRefsEnd(a)
|
||||
}
|
||||
|
||||
func getBlockRefsEnd(a []blockRef) uintptr {
|
||||
return uintptr(unsafe.Pointer(unsafe.SliceData(a))) + uintptr(len(a))*unsafe.Sizeof(blockRef{})
|
||||
}
|
||||
|
||||
func setupTfss(qt *querytracer.Tracer, tr storage.TimeRange, tagFilterss [][]storage.TagFilter, maxMetrics int, deadline searchutils.Deadline) ([]*storage.TagFilters, error) {
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
{% endfor %}
|
||||
{% endfunc %}
|
||||
|
||||
{%code const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00" %}
|
||||
{% func exportCSVField(mn *storage.MetricName, fieldName string, timestamp int64, value float64) %}
|
||||
{% if fieldName == "__value__" %}
|
||||
{%f= value %}
|
||||
|
@ -45,7 +46,7 @@
|
|||
{% case "rfc3339" %}
|
||||
{% code
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], time.RFC3339)
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], rfc3339Milli)
|
||||
%}
|
||||
{%z= bb.B %}
|
||||
{% code
|
||||
|
|
|
@ -87,586 +87,589 @@ func ExportCSVLine(xb *exportBlock, fieldNames []string) string {
|
|||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:27
|
||||
func streamexportCSVField(qw422016 *qt422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
||||
const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00"
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:28
|
||||
if fieldName == "__value__" {
|
||||
func streamexportCSVField(qw422016 *qt422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:29
|
||||
qw422016.N().F(value)
|
||||
if fieldName == "__value__" {
|
||||
//line app/vmselect/prometheus/export.qtpl:30
|
||||
return
|
||||
qw422016.N().F(value)
|
||||
//line app/vmselect/prometheus/export.qtpl:31
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:32
|
||||
if fieldName == "__timestamp__" {
|
||||
//line app/vmselect/prometheus/export.qtpl:33
|
||||
qw422016.N().DL(timestamp)
|
||||
//line app/vmselect/prometheus/export.qtpl:34
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:35
|
||||
//line app/vmselect/prometheus/export.qtpl:32
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:33
|
||||
if fieldName == "__timestamp__" {
|
||||
//line app/vmselect/prometheus/export.qtpl:34
|
||||
qw422016.N().DL(timestamp)
|
||||
//line app/vmselect/prometheus/export.qtpl:35
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:36
|
||||
if strings.HasPrefix(fieldName, "__timestamp__:") {
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:37
|
||||
if strings.HasPrefix(fieldName, "__timestamp__:") {
|
||||
//line app/vmselect/prometheus/export.qtpl:38
|
||||
timeFormat := fieldName[len("__timestamp__:"):]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:38
|
||||
switch timeFormat {
|
||||
//line app/vmselect/prometheus/export.qtpl:39
|
||||
case "unix_s":
|
||||
switch timeFormat {
|
||||
//line app/vmselect/prometheus/export.qtpl:40
|
||||
qw422016.N().DL(timestamp / 1000)
|
||||
case "unix_s":
|
||||
//line app/vmselect/prometheus/export.qtpl:41
|
||||
case "unix_ms":
|
||||
qw422016.N().DL(timestamp / 1000)
|
||||
//line app/vmselect/prometheus/export.qtpl:42
|
||||
qw422016.N().DL(timestamp)
|
||||
case "unix_ms":
|
||||
//line app/vmselect/prometheus/export.qtpl:43
|
||||
case "unix_ns":
|
||||
qw422016.N().DL(timestamp)
|
||||
//line app/vmselect/prometheus/export.qtpl:44
|
||||
qw422016.N().DL(timestamp * 1e6)
|
||||
case "unix_ns":
|
||||
//line app/vmselect/prometheus/export.qtpl:45
|
||||
qw422016.N().DL(timestamp * 1e6)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
case "rfc3339":
|
||||
//line app/vmselect/prometheus/export.qtpl:47
|
||||
//line app/vmselect/prometheus/export.qtpl:48
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], time.RFC3339)
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], rfc3339Milli)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:50
|
||||
//line app/vmselect/prometheus/export.qtpl:51
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:52
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:54
|
||||
default:
|
||||
//line app/vmselect/prometheus/export.qtpl:55
|
||||
default:
|
||||
//line app/vmselect/prometheus/export.qtpl:56
|
||||
if strings.HasPrefix(timeFormat, "custom:") {
|
||||
//line app/vmselect/prometheus/export.qtpl:57
|
||||
//line app/vmselect/prometheus/export.qtpl:58
|
||||
layout := timeFormat[len("custom:"):]
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], layout)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:61
|
||||
if bytes.ContainsAny(bb.B, `"`+",\n") {
|
||||
//line app/vmselect/prometheus/export.qtpl:62
|
||||
qw422016.E().QZ(bb.B)
|
||||
if bytes.ContainsAny(bb.B, `"`+",\n") {
|
||||
//line app/vmselect/prometheus/export.qtpl:63
|
||||
} else {
|
||||
qw422016.E().QZ(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:64
|
||||
qw422016.N().Z(bb.B)
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:65
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:66
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:67
|
||||
//line app/vmselect/prometheus/export.qtpl:68
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:69
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:69
|
||||
qw422016.N().S(`Unsupported timeFormat=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:70
|
||||
qw422016.N().S(timeFormat)
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:70
|
||||
qw422016.N().S(`Unsupported timeFormat=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:71
|
||||
}
|
||||
qw422016.N().S(timeFormat)
|
||||
//line app/vmselect/prometheus/export.qtpl:72
|
||||
}
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
return
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:74
|
||||
}
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:75
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:76
|
||||
v := mn.GetTagValue(fieldName)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:76
|
||||
if bytes.ContainsAny(v, `"`+",\n") {
|
||||
//line app/vmselect/prometheus/export.qtpl:77
|
||||
qw422016.N().QZ(v)
|
||||
if bytes.ContainsAny(v, `"`+",\n") {
|
||||
//line app/vmselect/prometheus/export.qtpl:78
|
||||
} else {
|
||||
qw422016.N().QZ(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:79
|
||||
qw422016.N().Z(v)
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
}
|
||||
qw422016.N().Z(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
func writeexportCSVField(qq422016 qtio422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
streamexportCSVField(qw422016, mn, fieldName, timestamp, value)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
func exportCSVField(mn *storage.MetricName, fieldName string, timestamp int64, value float64) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
writeexportCSVField(qb422016, mn, fieldName, timestamp, value)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:83
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
func StreamExportPrometheusLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
if len(xb.timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:85
|
||||
if len(xb.timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:85
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:85
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:86
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:86
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
writeprometheusMetricName(bb, xb.mn)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
//line app/vmselect/prometheus/export.qtpl:88
|
||||
for i, ts := range xb.timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:88
|
||||
//line app/vmselect/prometheus/export.qtpl:89
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:88
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:89
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
qw422016.N().F(xb.values[i])
|
||||
//line app/vmselect/prometheus/export.qtpl:89
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().DL(ts)
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:92
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
func WriteExportPrometheusLine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
StreamExportPrometheusLine(qw422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
func ExportPrometheusLine(xb *exportBlock) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
WriteExportPrometheusLine(qb422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
func StreamExportJSONLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
//line app/vmselect/prometheus/export.qtpl:97
|
||||
if len(xb.timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
//line app/vmselect/prometheus/export.qtpl:97
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
//line app/vmselect/prometheus/export.qtpl:97
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
//line app/vmselect/prometheus/export.qtpl:97
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:98
|
||||
//line app/vmselect/prometheus/export.qtpl:99
|
||||
streammetricNameObject(qw422016, xb.mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:98
|
||||
//line app/vmselect/prometheus/export.qtpl:99
|
||||
qw422016.N().S(`,"values":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:100
|
||||
if len(xb.values) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:101
|
||||
if len(xb.values) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:102
|
||||
values := xb.values
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:102
|
||||
streamconvertValueToSpecialJSON(qw422016, values[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:103
|
||||
streamconvertValueToSpecialJSON(qw422016, values[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:104
|
||||
values = values[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:104
|
||||
for _, v := range values {
|
||||
//line app/vmselect/prometheus/export.qtpl:104
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
streamconvertValueToSpecialJSON(qw422016, v)
|
||||
for _, v := range values {
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:106
|
||||
streamconvertValueToSpecialJSON(qw422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:107
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:107
|
||||
//line app/vmselect/prometheus/export.qtpl:108
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:107
|
||||
//line app/vmselect/prometheus/export.qtpl:108
|
||||
qw422016.N().S(`],"timestamps":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:110
|
||||
if len(xb.timestamps) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:111
|
||||
if len(xb.timestamps) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:112
|
||||
timestamps := xb.timestamps
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:112
|
||||
qw422016.N().DL(timestamps[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:113
|
||||
qw422016.N().DL(timestamps[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:114
|
||||
timestamps = timestamps[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:114
|
||||
for _, ts := range timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:114
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:115
|
||||
qw422016.N().DL(ts)
|
||||
for _, ts := range timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:115
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:116
|
||||
qw422016.N().DL(ts)
|
||||
//line app/vmselect/prometheus/export.qtpl:117
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:117
|
||||
//line app/vmselect/prometheus/export.qtpl:118
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:117
|
||||
//line app/vmselect/prometheus/export.qtpl:118
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:119
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
func WriteExportJSONLine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
StreamExportJSONLine(qw422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
func ExportJSONLine(xb *exportBlock) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
WriteExportJSONLine(qb422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:122
|
||||
//line app/vmselect/prometheus/export.qtpl:123
|
||||
func StreamExportPromAPILine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:122
|
||||
//line app/vmselect/prometheus/export.qtpl:123
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:124
|
||||
//line app/vmselect/prometheus/export.qtpl:125
|
||||
streammetricNameObject(qw422016, xb.mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:124
|
||||
//line app/vmselect/prometheus/export.qtpl:125
|
||||
qw422016.N().S(`,"values":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:125
|
||||
//line app/vmselect/prometheus/export.qtpl:126
|
||||
streamvaluesWithTimestamps(qw422016, xb.values, xb.timestamps)
|
||||
//line app/vmselect/prometheus/export.qtpl:125
|
||||
//line app/vmselect/prometheus/export.qtpl:126
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
func WriteExportPromAPILine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
StreamExportPromAPILine(qw422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
func ExportPromAPILine(xb *exportBlock) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
WriteExportPromAPILine(qb422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:129
|
||||
//line app/vmselect/prometheus/export.qtpl:130
|
||||
func StreamExportPromAPIHeader(qw422016 *qt422016.Writer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:129
|
||||
//line app/vmselect/prometheus/export.qtpl:130
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
func WriteExportPromAPIHeader(qq422016 qtio422016.Writer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
StreamExportPromAPIHeader(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
func ExportPromAPIHeader() string {
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
WriteExportPromAPIHeader(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:137
|
||||
//line app/vmselect/prometheus/export.qtpl:138
|
||||
func StreamExportPromAPIFooter(qw422016 *qt422016.Writer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:137
|
||||
//line app/vmselect/prometheus/export.qtpl:138
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:141
|
||||
//line app/vmselect/prometheus/export.qtpl:142
|
||||
qt.Donef("export format=promapi")
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:143
|
||||
//line app/vmselect/prometheus/export.qtpl:144
|
||||
streamdumpQueryTrace(qw422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:143
|
||||
//line app/vmselect/prometheus/export.qtpl:144
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
func WriteExportPromAPIFooter(qq422016 qtio422016.Writer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
StreamExportPromAPIFooter(qw422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
func ExportPromAPIFooter(qt *querytracer.Tracer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
WriteExportPromAPIFooter(qb422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:147
|
||||
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:148
|
||||
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:149
|
||||
qw422016.N().Z(mn.MetricGroup)
|
||||
//line app/vmselect/prometheus/export.qtpl:149
|
||||
//line app/vmselect/prometheus/export.qtpl:150
|
||||
if len(mn.Tags) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:149
|
||||
//line app/vmselect/prometheus/export.qtpl:150
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/export.qtpl:151
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
tags := mn.Tags
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
qw422016.N().Z(tags[0].Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
streamescapePrometheusLabel(qw422016, tags[0].Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:153
|
||||
qw422016.N().Z(tags[0].Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:153
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:153
|
||||
streamescapePrometheusLabel(qw422016, tags[0].Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:154
|
||||
tags = tags[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:154
|
||||
for i := range tags {
|
||||
//line app/vmselect/prometheus/export.qtpl:155
|
||||
for i := range tags {
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
tag := &tags[i]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:155
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
qw422016.N().Z(tag.Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
streamescapePrometheusLabel(qw422016, tag.Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
//line app/vmselect/prometheus/export.qtpl:158
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
//line app/vmselect/prometheus/export.qtpl:158
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:159
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
func writeprometheusMetricName(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
streamprometheusMetricName(qw422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
func prometheusMetricName(mn *storage.MetricName) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
writeprometheusMetricName(qb422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:162
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
func streamconvertValueToSpecialJSON(qw422016 *qt422016.Writer, v float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:164
|
||||
if math.IsNaN(v) {
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:164
|
||||
qw422016.N().S(`null`)
|
||||
//line app/vmselect/prometheus/export.qtpl:165
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
} else if math.IsInf(v, 0) {
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
//line app/vmselect/prometheus/export.qtpl:167
|
||||
if v > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
//line app/vmselect/prometheus/export.qtpl:167
|
||||
qw422016.N().S(`"Infinity"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:168
|
||||
//line app/vmselect/prometheus/export.qtpl:169
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:168
|
||||
//line app/vmselect/prometheus/export.qtpl:169
|
||||
qw422016.N().S(`"-Infinity"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:171
|
||||
} else {
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:172
|
||||
qw422016.N().F(v)
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:173
|
||||
qw422016.N().F(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
func writeconvertValueToSpecialJSON(qq422016 qtio422016.Writer, v float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
streamconvertValueToSpecialJSON(qw422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
func convertValueToSpecialJSON(v float64) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
writeconvertValueToSpecialJSON(qb422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:176
|
||||
//line app/vmselect/prometheus/export.qtpl:177
|
||||
func streamescapePrometheusLabel(qw422016 *qt422016.Writer, b []byte) {
|
||||
//line app/vmselect/prometheus/export.qtpl:176
|
||||
//line app/vmselect/prometheus/export.qtpl:177
|
||||
qw422016.N().S(`"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:178
|
||||
for len(b) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:179
|
||||
for len(b) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:180
|
||||
n := bytes.IndexAny(b, "\\\n\"")
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:180
|
||||
if n < 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:181
|
||||
qw422016.N().Z(b)
|
||||
if n < 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:182
|
||||
break
|
||||
qw422016.N().Z(b)
|
||||
//line app/vmselect/prometheus/export.qtpl:183
|
||||
}
|
||||
break
|
||||
//line app/vmselect/prometheus/export.qtpl:184
|
||||
qw422016.N().Z(b[:n])
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
switch b[n] {
|
||||
//line app/vmselect/prometheus/export.qtpl:186
|
||||
case '\\':
|
||||
//line app/vmselect/prometheus/export.qtpl:186
|
||||
qw422016.N().S(`\\`)
|
||||
//line app/vmselect/prometheus/export.qtpl:188
|
||||
case '\n':
|
||||
//line app/vmselect/prometheus/export.qtpl:188
|
||||
qw422016.N().S(`\n`)
|
||||
//line app/vmselect/prometheus/export.qtpl:190
|
||||
case '"':
|
||||
//line app/vmselect/prometheus/export.qtpl:190
|
||||
qw422016.N().S(`\"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:192
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
qw422016.N().Z(b[:n])
|
||||
//line app/vmselect/prometheus/export.qtpl:186
|
||||
switch b[n] {
|
||||
//line app/vmselect/prometheus/export.qtpl:187
|
||||
case '\\':
|
||||
//line app/vmselect/prometheus/export.qtpl:187
|
||||
qw422016.N().S(`\\`)
|
||||
//line app/vmselect/prometheus/export.qtpl:189
|
||||
case '\n':
|
||||
//line app/vmselect/prometheus/export.qtpl:189
|
||||
qw422016.N().S(`\n`)
|
||||
//line app/vmselect/prometheus/export.qtpl:191
|
||||
case '"':
|
||||
//line app/vmselect/prometheus/export.qtpl:191
|
||||
qw422016.N().S(`\"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:193
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:194
|
||||
b = b[n+1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:194
|
||||
//line app/vmselect/prometheus/export.qtpl:195
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:194
|
||||
//line app/vmselect/prometheus/export.qtpl:195
|
||||
qw422016.N().S(`"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
func writeescapePrometheusLabel(qq422016 qtio422016.Writer, b []byte) {
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
streamescapePrometheusLabel(qw422016, b)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
func escapePrometheusLabel(b []byte) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
writeescapePrometheusLabel(qb422016, b)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
}
|
||||
|
|
|
@ -47,15 +47,22 @@ var (
|
|||
maxStepForPointsAdjustment = flag.Duration("search.maxStepForPointsAdjustment", time.Minute, "The maximum step when /api/v1/query_range handler adjusts "+
|
||||
"points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data")
|
||||
|
||||
maxUniqueTimeseries = flag.Int("search.maxUniqueTimeseries", 300e3, "The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage")
|
||||
maxFederateSeries = flag.Int("search.maxFederateSeries", 1e6, "The maximum number of time series, which can be returned from /federate. This option allows limiting memory usage")
|
||||
maxExportSeries = flag.Int("search.maxExportSeries", 10e6, "The maximum number of time series, which can be returned from /api/v1/export* APIs. This option allows limiting memory usage")
|
||||
maxTSDBStatusSeries = flag.Int("search.maxTSDBStatusSeries", 10e6, "The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage")
|
||||
maxSeriesLimit = flag.Int("search.maxSeries", 30e3, "The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage")
|
||||
maxUniqueTimeseries = flag.Int("search.maxUniqueTimeseries", 300e3, "The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage")
|
||||
maxFederateSeries = flag.Int("search.maxFederateSeries", 1e6, "The maximum number of time series, which can be returned from /federate. This option allows limiting memory usage")
|
||||
maxExportSeries = flag.Int("search.maxExportSeries", 10e6, "The maximum number of time series, which can be returned from /api/v1/export* APIs. This option allows limiting memory usage")
|
||||
maxTSDBStatusSeries = flag.Int("search.maxTSDBStatusSeries", 10e6, "The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage")
|
||||
maxSeriesLimit = flag.Int("search.maxSeries", 30e3, "The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage")
|
||||
maxLabelsAPISeries = flag.Int("search.maxLabelsAPISeries", 1e6, "The maximum number of time series, which could be scanned when searching for the the matching time series "+
|
||||
"at /api/v1/labels and /api/v1/label/.../values. This option allows limiting memory usage and CPU usage. See also -search.maxLabelsAPIDuration, "+
|
||||
"-search.maxTagKeys, -search.maxTagValues and -search.ignoreExtraFiltersAtLabelsAPI")
|
||||
maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 30e3, "The maximum points per a single timeseries returned from /api/v1/query_range. "+
|
||||
"This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points "+
|
||||
"returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph. "+
|
||||
"See also -search.maxResponseSeries")
|
||||
ignoreExtraFiltersAtLabelsAPI = flag.Bool("search.ignoreExtraFiltersAtLabelsAPI", false, "Whether to ignore match[], extra_filters[] and extra_label query args at "+
|
||||
"/api/v1/labels and /api/v1/label/.../values . This may be useful for decreasing load on VictoriaMetrics when extra filters "+
|
||||
"match too many time series. The downside is that suprflouos labels or series could be returned, which do not match the extra filters. "+
|
||||
"See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration")
|
||||
)
|
||||
|
||||
// Default step used if not set.
|
||||
|
@ -317,21 +324,21 @@ func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, cp *commonPara
|
|||
}
|
||||
} else if format == "promapi" {
|
||||
WriteExportPromAPIHeader(bw)
|
||||
firstLineOnce := uint32(0)
|
||||
firstLineSent := uint32(0)
|
||||
var firstLineOnce atomic.Bool
|
||||
var firstLineSent atomic.Bool
|
||||
writeLineFunc = func(xb *exportBlock, workerID uint) error {
|
||||
bb := sw.getBuffer(workerID)
|
||||
// Use atomic.LoadUint32() in front of atomic.CompareAndSwapUint32() in order to avoid slow inter-CPU synchronization
|
||||
// Use Load() in front of CompareAndSwap() in order to avoid slow inter-CPU synchronization
|
||||
// in fast path after the first line has been already sent.
|
||||
if atomic.LoadUint32(&firstLineOnce) == 0 && atomic.CompareAndSwapUint32(&firstLineOnce, 0, 1) {
|
||||
if !firstLineOnce.Load() && firstLineOnce.CompareAndSwap(false, true) {
|
||||
// Send the first line to sw.bw
|
||||
WriteExportPromAPILine(bb, xb)
|
||||
_, err := sw.bw.Write(bb.B)
|
||||
bb.Reset()
|
||||
atomic.StoreUint32(&firstLineSent, 1)
|
||||
firstLineSent.Store(true)
|
||||
return err
|
||||
}
|
||||
for atomic.LoadUint32(&firstLineSent) == 0 {
|
||||
for !firstLineSent.Load() {
|
||||
// Busy wait until the first line is sent to sw.bw
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
@ -491,7 +498,7 @@ var deleteDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/
|
|||
func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName string, w http.ResponseWriter, r *http.Request) error {
|
||||
defer labelValuesDuration.UpdateDuration(startTime)
|
||||
|
||||
cp, err := getCommonParamsWithDefaultDuration(r, startTime, false)
|
||||
cp, err := getCommonParamsForLabelsAPI(r, startTime, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -499,10 +506,7 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName s
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Do not limit the number of unique time series, which could be scanned
|
||||
// during the search for matching label values, since users expect this API
|
||||
// must always work.
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, -1)
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxLabelsAPISeries)
|
||||
labelValues, err := netstorage.LabelValues(qt, labelName, sq, limit, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain values for label %q: %w", labelName, err)
|
||||
|
@ -591,7 +595,7 @@ var tsdbStatusDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/
|
|||
func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
defer labelsDuration.UpdateDuration(startTime)
|
||||
|
||||
cp, err := getCommonParamsWithDefaultDuration(r, startTime, false)
|
||||
cp, err := getCommonParamsForLabelsAPI(r, startTime, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -599,10 +603,7 @@ func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Do not limit the number of unique time series, which could be scanned
|
||||
// during the search for matching label values, since users expect this API
|
||||
// must always work.
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, -1)
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxLabelsAPISeries)
|
||||
labels, err := netstorage.LabelNames(qt, sq, limit, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels: %w", err)
|
||||
|
@ -647,12 +648,12 @@ var seriesCountDuration = metrics.NewSummary(`vm_request_duration_seconds{path="
|
|||
func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
defer seriesDuration.UpdateDuration(startTime)
|
||||
|
||||
// Do not set start to searchutils.minTimeMsecs by default as Prometheus does,
|
||||
// Do not set start to httputils.minTimeMsecs by default as Prometheus does,
|
||||
// since this leads to fetching and scanning all the data from the storage,
|
||||
// which can take a lot of time for big storages.
|
||||
// It is better setting start as end-defaultStep by default.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/91
|
||||
cp, err := getCommonParamsWithDefaultDuration(r, startTime, true)
|
||||
cp, err := getCommonParamsForLabelsAPI(r, startTime, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1129,14 +1130,15 @@ func getExportParams(r *http.Request, startTime time.Time) (*commonParams, error
|
|||
return cp, nil
|
||||
}
|
||||
|
||||
func getCommonParamsWithDefaultDuration(r *http.Request, startTime time.Time, requireNonEmptyMatch bool) (*commonParams, error) {
|
||||
cp, err := getCommonParams(r, startTime, requireNonEmptyMatch)
|
||||
func getCommonParamsForLabelsAPI(r *http.Request, startTime time.Time, requireNonEmptyMatch bool) (*commonParams, error) {
|
||||
cp, err := getCommonParamsInternal(r, startTime, requireNonEmptyMatch, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cp.start == 0 {
|
||||
cp.start = cp.end - defaultStep
|
||||
}
|
||||
cp.deadline = searchutils.GetDeadlineForExport(r, startTime)
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
|
@ -1149,6 +1151,10 @@ func getCommonParamsWithDefaultDuration(r *http.Request, startTime time.Time, re
|
|||
// - extra_label
|
||||
// - extra_filters[]
|
||||
func getCommonParams(r *http.Request, startTime time.Time, requireNonEmptyMatch bool) (*commonParams, error) {
|
||||
return getCommonParamsInternal(r, startTime, requireNonEmptyMatch, false)
|
||||
}
|
||||
|
||||
func getCommonParamsInternal(r *http.Request, startTime time.Time, requireNonEmptyMatch, isLabelsAPI bool) (*commonParams, error) {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
start, err := httputils.GetTime(r, "start", 0)
|
||||
if err != nil {
|
||||
|
@ -1175,15 +1181,23 @@ func getCommonParams(r *http.Request, startTime time.Time, requireNonEmptyMatch
|
|||
if requireNonEmptyMatch && len(matches) == 0 {
|
||||
return nil, fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
filterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
if len(filterss) > 0 || !isLabelsAPI || !*ignoreExtraFiltersAtLabelsAPI {
|
||||
// If matches isn't empty, then there is no sense in ignoring extra filters
|
||||
// even if ignoreExtraLabelsAtLabelsAPI is set, since extra filters won't slow down
|
||||
// the query - they can only improve query performance by reducing the number
|
||||
// of matching series at the storage level.
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filterss = searchutils.JoinTagFilterss(filterss, etfs)
|
||||
}
|
||||
filterss := searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
|
||||
cp := &commonParams{
|
||||
deadline: deadline,
|
||||
start: start,
|
||||
|
|
|
@ -33,8 +33,8 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
|||
// seriesFetched is string instead of int because of historical reasons.
|
||||
// It cannot be converted to int without breaking backwards compatibility at vmalert :(
|
||||
%}
|
||||
"seriesFetched": "{%dl qs.SeriesFetched %}",
|
||||
"executionTimeMsec": {%dl qs.ExecutionTimeMsec %}
|
||||
"seriesFetched": "{%dl qs.SeriesFetched.Load() %}",
|
||||
"executionTimeMsec": {%dl qs.ExecutionTimeMsec.Load() %}
|
||||
}
|
||||
{% code
|
||||
qt.Printf("generate /api/v1/query_range response for series=%d, points=%d", seriesCount, pointsCount)
|
||||
|
|
|
@ -68,11 +68,11 @@ func StreamQueryRangeResponse(qw422016 *qt422016.Writer, rs []netstorage.Result,
|
|||
//line app/vmselect/prometheus/query_range_response.qtpl:35
|
||||
qw422016.N().S(`"seriesFetched": "`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||
qw422016.N().DL(qs.SeriesFetched)
|
||||
qw422016.N().DL(qs.SeriesFetched.Load())
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||
qw422016.N().S(`","executionTimeMsec":`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:37
|
||||
qw422016.N().DL(qs.ExecutionTimeMsec)
|
||||
qw422016.N().DL(qs.ExecutionTimeMsec.Load())
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:37
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:40
|
||||
|
|
|
@ -35,8 +35,8 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
|||
// seriesFetched is string instead of int because of historical reasons.
|
||||
// It cannot be converted to int without breaking backwards compatibility at vmalert :(
|
||||
%}
|
||||
"seriesFetched": "{%dl qs.SeriesFetched %}",
|
||||
"executionTimeMsec": {%dl qs.ExecutionTimeMsec %}
|
||||
"seriesFetched": "{%dl qs.SeriesFetched.Load() %}",
|
||||
"executionTimeMsec": {%dl qs.ExecutionTimeMsec.Load() %}
|
||||
}
|
||||
{% code
|
||||
qt.Printf("generate /api/v1/query response for series=%d", seriesCount)
|
||||
|
|
|
@ -78,11 +78,11 @@ func StreamQueryResponse(qw422016 *qt422016.Writer, rs []netstorage.Result, qt *
|
|||
//line app/vmselect/prometheus/query_response.qtpl:37
|
||||
qw422016.N().S(`"seriesFetched": "`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||
qw422016.N().DL(qs.SeriesFetched)
|
||||
qw422016.N().DL(qs.SeriesFetched.Load())
|
||||
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||
qw422016.N().S(`","executionTimeMsec":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:39
|
||||
qw422016.N().DL(qs.ExecutionTimeMsec)
|
||||
qw422016.N().DL(qs.ExecutionTimeMsec.Load())
|
||||
//line app/vmselect/prometheus/query_response.qtpl:39
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:42
|
||||
|
|
|
@ -60,7 +60,7 @@ func (aq *activeQueries) Add(ec *EvalConfig, q string) uint64 {
|
|||
aqe.start = ec.Start
|
||||
aqe.end = ec.End
|
||||
aqe.step = ec.Step
|
||||
aqe.qid = atomic.AddUint64(&nextActiveQueryID, 1)
|
||||
aqe.qid = nextActiveQueryID.Add(1)
|
||||
aqe.quotedRemoteAddr = ec.QuotedRemoteAddr
|
||||
aqe.q = q
|
||||
aqe.startTime = time.Now()
|
||||
|
@ -87,4 +87,8 @@ func (aq *activeQueries) GetAll() []activeQueryEntry {
|
|||
return aqes
|
||||
}
|
||||
|
||||
var nextActiveQueryID = uint64(time.Now().UnixNano())
|
||||
var nextActiveQueryID = func() *atomic.Uint64 {
|
||||
var x atomic.Uint64
|
||||
x.Store(uint64(time.Now().UnixNano()))
|
||||
return &x
|
||||
}()
|
||||
|
|
|
@ -171,16 +171,17 @@ func copyEvalConfig(src *EvalConfig) *EvalConfig {
|
|||
// QueryStats contains various stats for the query.
|
||||
type QueryStats struct {
|
||||
// SeriesFetched contains the number of series fetched from storage during the query evaluation.
|
||||
SeriesFetched int64
|
||||
SeriesFetched atomic.Int64
|
||||
|
||||
// ExecutionTimeMsec contains the number of milliseconds the query took to execute.
|
||||
ExecutionTimeMsec int64
|
||||
ExecutionTimeMsec atomic.Int64
|
||||
}
|
||||
|
||||
func (qs *QueryStats) addSeriesFetched(n int) {
|
||||
if qs == nil {
|
||||
return
|
||||
}
|
||||
atomic.AddInt64(&qs.SeriesFetched, int64(n))
|
||||
qs.SeriesFetched.Add(int64(n))
|
||||
}
|
||||
|
||||
func (qs *QueryStats) addExecutionTimeMsec(startTime time.Time) {
|
||||
|
@ -188,7 +189,7 @@ func (qs *QueryStats) addExecutionTimeMsec(startTime time.Time) {
|
|||
return
|
||||
}
|
||||
d := time.Since(startTime).Milliseconds()
|
||||
atomic.AddInt64(&qs.ExecutionTimeMsec, d)
|
||||
qs.ExecutionTimeMsec.Add(d)
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) validate() {
|
||||
|
@ -313,7 +314,7 @@ func evalExprInternal(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr)
|
|||
}
|
||||
rf, err := nrf(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("cannot evaluate args for %q: %w", fe.AppendString(nil), err)
|
||||
}
|
||||
rv, err := evalRollupFunc(qt, ec, fe.Name, rf, e, re, nil)
|
||||
if err != nil {
|
||||
|
@ -394,7 +395,7 @@ func evalAggrFunc(qt *querytracer.Tracer, ec *EvalConfig, ae *metricsql.AggrFunc
|
|||
}
|
||||
rf, err := nrf(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("cannot evaluate args for aggregate func %q: %w", ae.AppendString(nil), err)
|
||||
}
|
||||
iafc := newIncrementalAggrFuncContext(ae, callbacks)
|
||||
return evalRollupFunc(qt, ec, fe.Name, rf, ae, re, iafc)
|
||||
|
@ -949,7 +950,7 @@ func evalRollupFuncWithSubquery(qt *querytracer.Tracer, ec *EvalConfig, funcName
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var samplesScannedTotal uint64
|
||||
var samplesScannedTotal atomic.Uint64
|
||||
keepMetricNames := getKeepMetricNames(expr)
|
||||
tsw := getTimeseriesByWorkerID()
|
||||
seriesByWorkerID := tsw.byWorkerID
|
||||
|
@ -959,13 +960,13 @@ func evalRollupFuncWithSubquery(qt *querytracer.Tracer, ec *EvalConfig, funcName
|
|||
for _, rc := range rcs {
|
||||
if tsm := newTimeseriesMap(funcName, keepMetricNames, sharedTimestamps, &tsSQ.MetricName); tsm != nil {
|
||||
samplesScanned := rc.DoTimeseriesMap(tsm, values, timestamps)
|
||||
atomic.AddUint64(&samplesScannedTotal, samplesScanned)
|
||||
samplesScannedTotal.Add(samplesScanned)
|
||||
seriesByWorkerID[workerID].tss = tsm.AppendTimeseriesTo(seriesByWorkerID[workerID].tss)
|
||||
continue
|
||||
}
|
||||
var ts timeseries
|
||||
samplesScanned := doRollupForTimeseries(funcName, keepMetricNames, rc, &ts, &tsSQ.MetricName, values, timestamps, sharedTimestamps)
|
||||
atomic.AddUint64(&samplesScannedTotal, samplesScanned)
|
||||
samplesScannedTotal.Add(samplesScanned)
|
||||
seriesByWorkerID[workerID].tss = append(seriesByWorkerID[workerID].tss, &ts)
|
||||
}
|
||||
return values, timestamps
|
||||
|
@ -976,8 +977,8 @@ func evalRollupFuncWithSubquery(qt *querytracer.Tracer, ec *EvalConfig, funcName
|
|||
}
|
||||
putTimeseriesByWorkerID(tsw)
|
||||
|
||||
rowsScannedPerQuery.Update(float64(samplesScannedTotal))
|
||||
qt.Printf("rollup %s() over %d series returned by subquery: series=%d, samplesScanned=%d", funcName, len(tssSQ), len(tss), samplesScannedTotal)
|
||||
rowsScannedPerQuery.Update(float64(samplesScannedTotal.Load()))
|
||||
qt.Printf("rollup %s() over %d series returned by subquery: series=%d, samplesScanned=%d", funcName, len(tssSQ), len(tss), samplesScannedTotal.Load())
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
|
@ -1687,7 +1688,7 @@ func evalRollupFuncNoCache(qt *querytracer.Tracer, ec *EvalConfig, funcName stri
|
|||
tfss := searchutils.ToTagFilterss(me.LabelFilterss)
|
||||
tfss = searchutils.JoinTagFilterss(tfss, ec.EnforcedTagFilterss)
|
||||
minTimestamp := ec.Start
|
||||
if needSilenceIntervalForRollupFunc(funcName) {
|
||||
if needSilenceIntervalForRollupFunc[funcName] {
|
||||
minTimestamp -= maxSilenceInterval()
|
||||
}
|
||||
if window > ec.Step {
|
||||
|
@ -1788,62 +1789,12 @@ func maxSilenceInterval() int64 {
|
|||
return d
|
||||
}
|
||||
|
||||
func needSilenceIntervalForRollupFunc(funcName string) bool {
|
||||
// All the rollup functions, which do not rely on the previous sample
|
||||
// before the lookbehind window (aka prevValue and realPrevValue), do not need silence interval.
|
||||
switch strings.ToLower(funcName) {
|
||||
case "default_rollup":
|
||||
// The default_rollup implicitly relies on the previous samples in order to fill gaps.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5388
|
||||
return true
|
||||
case
|
||||
"absent_over_time",
|
||||
"avg_over_time",
|
||||
"count_eq_over_time",
|
||||
"count_gt_over_time",
|
||||
"count_le_over_time",
|
||||
"count_ne_over_time",
|
||||
"count_over_time",
|
||||
"first_over_time",
|
||||
"histogram_over_time",
|
||||
"hoeffding_bound_lower",
|
||||
"hoeffding_bound_upper",
|
||||
"last_over_time",
|
||||
"mad_over_time",
|
||||
"max_over_time",
|
||||
"median_over_time",
|
||||
"min_over_time",
|
||||
"predict_linear",
|
||||
"present_over_time",
|
||||
"quantile_over_time",
|
||||
"quantiles_over_time",
|
||||
"range_over_time",
|
||||
"share_gt_over_time",
|
||||
"share_le_over_time",
|
||||
"share_eq_over_time",
|
||||
"stale_samples_over_time",
|
||||
"stddev_over_time",
|
||||
"stdvar_over_time",
|
||||
"sum_over_time",
|
||||
"tfirst_over_time",
|
||||
"timestamp",
|
||||
"timestamp_with_name",
|
||||
"tlast_over_time",
|
||||
"tmax_over_time",
|
||||
"tmin_over_time",
|
||||
"zscore_over_time":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool,
|
||||
iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
|
||||
qt = qt.NewChild("rollup %s() with incremental aggregation %s() over %d series; rollupConfigs=%s", funcName, iafc.ae.Name, rss.Len(), rcs)
|
||||
defer qt.Done()
|
||||
var samplesScannedTotal uint64
|
||||
var samplesScannedTotal atomic.Uint64
|
||||
err := rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
|
||||
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
|
||||
preFunc(rs.Values, rs.Timestamps)
|
||||
|
@ -1855,12 +1806,12 @@ func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string,
|
|||
for _, ts := range tsm.m {
|
||||
iafc.updateTimeseries(ts, workerID)
|
||||
}
|
||||
atomic.AddUint64(&samplesScannedTotal, samplesScanned)
|
||||
samplesScannedTotal.Add(samplesScanned)
|
||||
continue
|
||||
}
|
||||
ts.Reset()
|
||||
samplesScanned := doRollupForTimeseries(funcName, keepMetricNames, rc, ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps)
|
||||
atomic.AddUint64(&samplesScannedTotal, samplesScanned)
|
||||
samplesScannedTotal.Add(samplesScanned)
|
||||
iafc.updateTimeseries(ts, workerID)
|
||||
|
||||
// ts.Timestamps points to sharedTimestamps. Zero it, so it can be re-used.
|
||||
|
@ -1873,8 +1824,8 @@ func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string,
|
|||
return nil, err
|
||||
}
|
||||
tss := iafc.finalizeTimeseries()
|
||||
rowsScannedPerQuery.Update(float64(samplesScannedTotal))
|
||||
qt.Printf("series after aggregation with %s(): %d; samplesScanned=%d", iafc.ae.Name, len(tss), samplesScannedTotal)
|
||||
rowsScannedPerQuery.Update(float64(samplesScannedTotal.Load()))
|
||||
qt.Printf("series after aggregation with %s(): %d; samplesScanned=%d", iafc.ae.Name, len(tss), samplesScannedTotal.Load())
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
|
@ -1883,7 +1834,7 @@ func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, k
|
|||
qt = qt.NewChild("rollup %s() over %d series; rollupConfigs=%s", funcName, rss.Len(), rcs)
|
||||
defer qt.Done()
|
||||
|
||||
var samplesScannedTotal uint64
|
||||
var samplesScannedTotal atomic.Uint64
|
||||
tsw := getTimeseriesByWorkerID()
|
||||
seriesByWorkerID := tsw.byWorkerID
|
||||
seriesLen := rss.Len()
|
||||
|
@ -1893,13 +1844,13 @@ func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, k
|
|||
for _, rc := range rcs {
|
||||
if tsm := newTimeseriesMap(funcName, keepMetricNames, sharedTimestamps, &rs.MetricName); tsm != nil {
|
||||
samplesScanned := rc.DoTimeseriesMap(tsm, rs.Values, rs.Timestamps)
|
||||
atomic.AddUint64(&samplesScannedTotal, samplesScanned)
|
||||
samplesScannedTotal.Add(samplesScanned)
|
||||
seriesByWorkerID[workerID].tss = tsm.AppendTimeseriesTo(seriesByWorkerID[workerID].tss)
|
||||
continue
|
||||
}
|
||||
var ts timeseries
|
||||
samplesScanned := doRollupForTimeseries(funcName, keepMetricNames, rc, &ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps)
|
||||
atomic.AddUint64(&samplesScannedTotal, samplesScanned)
|
||||
samplesScannedTotal.Add(samplesScanned)
|
||||
seriesByWorkerID[workerID].tss = append(seriesByWorkerID[workerID].tss, &ts)
|
||||
}
|
||||
return nil
|
||||
|
@ -1913,8 +1864,8 @@ func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, k
|
|||
}
|
||||
putTimeseriesByWorkerID(tsw)
|
||||
|
||||
rowsScannedPerQuery.Update(float64(samplesScannedTotal))
|
||||
qt.Printf("samplesScanned=%d", samplesScannedTotal)
|
||||
rowsScannedPerQuery.Update(float64(samplesScannedTotal.Load()))
|
||||
qt.Printf("samplesScanned=%d", samplesScannedTotal.Load())
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -86,14 +86,14 @@ func TestQueryStats_addSeriesFetched(t *testing.T) {
|
|||
}
|
||||
ec.QueryStats.addSeriesFetched(1)
|
||||
|
||||
if qs.SeriesFetched != 1 {
|
||||
t.Fatalf("expected to get 1; got %d instead", qs.SeriesFetched)
|
||||
if n := qs.SeriesFetched.Load(); n != 1 {
|
||||
t.Fatalf("expected to get 1; got %d instead", n)
|
||||
}
|
||||
|
||||
ecNew := copyEvalConfig(ec)
|
||||
ecNew.QueryStats.addSeriesFetched(3)
|
||||
if qs.SeriesFetched != 4 {
|
||||
t.Fatalf("expected to get 4; got %d instead", qs.SeriesFetched)
|
||||
if n := qs.SeriesFetched.Load(); n != 4 {
|
||||
t.Fatalf("expected to get 4; got %d instead", n)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -352,22 +352,19 @@ type parseCacheValue struct {
|
|||
}
|
||||
|
||||
type parseCache struct {
|
||||
// Move atomic counters to the top of struct for 8-byte alignment on 32-bit arch.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
|
||||
|
||||
requests uint64
|
||||
misses uint64
|
||||
requests atomic.Uint64
|
||||
misses atomic.Uint64
|
||||
|
||||
m map[string]*parseCacheValue
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (pc *parseCache) Requests() uint64 {
|
||||
return atomic.LoadUint64(&pc.requests)
|
||||
return pc.requests.Load()
|
||||
}
|
||||
|
||||
func (pc *parseCache) Misses() uint64 {
|
||||
return atomic.LoadUint64(&pc.misses)
|
||||
return pc.misses.Load()
|
||||
}
|
||||
|
||||
func (pc *parseCache) Len() uint64 {
|
||||
|
@ -378,14 +375,14 @@ func (pc *parseCache) Len() uint64 {
|
|||
}
|
||||
|
||||
func (pc *parseCache) Get(q string) *parseCacheValue {
|
||||
atomic.AddUint64(&pc.requests, 1)
|
||||
pc.requests.Add(1)
|
||||
|
||||
pc.mu.RLock()
|
||||
pcv := pc.m[q]
|
||||
pc.mu.RUnlock()
|
||||
|
||||
if pcv == nil {
|
||||
atomic.AddUint64(&pc.misses, 1)
|
||||
pc.misses.Add(1)
|
||||
}
|
||||
return pcv
|
||||
}
|
||||
|
|
|
@ -2307,6 +2307,21 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`label_join dst_label is equal to src_label`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `label_join(label_join(time(), "bar", "sep1", "a", "b"), "bar", "sep2", "a", "bar")`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("bar"),
|
||||
Value: []byte("sep2sep1"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`label_value()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `with (
|
||||
|
@ -5822,6 +5837,60 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`count_values_over_time`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
count_values_over_time("foo", round(label_set(rand(0), "x", "y"), 0.4)[200s:5s]),
|
||||
"foo",
|
||||
)`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{4, 8, 7, 6, 10, 9},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("0"),
|
||||
},
|
||||
{
|
||||
Key: []byte("x"),
|
||||
Value: []byte("y"),
|
||||
},
|
||||
}
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{20, 13, 19, 18, 14, 13},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("0.4"),
|
||||
},
|
||||
{
|
||||
Key: []byte("x"),
|
||||
Value: []byte("y"),
|
||||
},
|
||||
}
|
||||
r3 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{16, 19, 14, 16, 16, 18},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r3.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("0.8"),
|
||||
},
|
||||
{
|
||||
Key: []byte("x"),
|
||||
Value: []byte("y"),
|
||||
},
|
||||
}
|
||||
resultExpected := []netstorage.Result{r1, r2, r3}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_over_time`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]), "vmrange")`
|
||||
|
|
|
@ -4,12 +4,14 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
@ -31,6 +33,7 @@ var rollupFuncs = map[string]newRollupFunc{
|
|||
"count_le_over_time": newRollupCountLE,
|
||||
"count_ne_over_time": newRollupCountNE,
|
||||
"count_over_time": newRollupFuncOneArg(rollupCount),
|
||||
"count_values_over_time": newRollupCountValues,
|
||||
"decreases_over_time": newRollupFuncOneArg(rollupDecreases),
|
||||
"default_rollup": newRollupFuncOneArg(rollupDefault), // default rollup func
|
||||
"delta": newRollupFuncOneArg(rollupDelta),
|
||||
|
@ -103,6 +106,42 @@ var rollupFuncs = map[string]newRollupFunc{
|
|||
"zscore_over_time": newRollupFuncOneArg(rollupZScoreOverTime),
|
||||
}
|
||||
|
||||
// Functions, which need the previous sample before the lookbehind window for proper calculations.
|
||||
//
|
||||
// All the rollup functions, which do not rely on the previous sample
|
||||
// before the lookbehind window (aka prevValue and realPrevValue), do not need silence interval.
|
||||
var needSilenceIntervalForRollupFunc = map[string]bool{
|
||||
"ascent_over_time": true,
|
||||
"changes": true,
|
||||
"decreases_over_time": true,
|
||||
// The default_rollup implicitly relies on the previous samples in order to fill gaps.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5388
|
||||
"default_rollup": true,
|
||||
"delta": true,
|
||||
"deriv_fast": true,
|
||||
"descent_over_time": true,
|
||||
"idelta": true,
|
||||
"ideriv": true,
|
||||
"increase": true,
|
||||
"increase_pure": true,
|
||||
"increases_over_time": true,
|
||||
"integrate": true,
|
||||
"irate": true,
|
||||
"lag": true,
|
||||
"lifetime": true,
|
||||
"rate": true,
|
||||
"resets": true,
|
||||
"rollup": true,
|
||||
"rollup_candlestick": true,
|
||||
"rollup_delta": true,
|
||||
"rollup_deriv": true,
|
||||
"rollup_increase": true,
|
||||
"rollup_rate": true,
|
||||
"rollup_scrape_interval": true,
|
||||
"scrape_interval": true,
|
||||
"tlast_change_over_time": true,
|
||||
}
|
||||
|
||||
// rollupAggrFuncs are functions that can be passed to `aggr_over_time()`
|
||||
var rollupAggrFuncs = map[string]rollupFunc{
|
||||
"absent_over_time": rollupAbsent,
|
||||
|
@ -573,7 +612,7 @@ type timeseriesMap struct {
|
|||
func newTimeseriesMap(funcName string, keepMetricNames bool, sharedTimestamps []int64, mnSrc *storage.MetricName) *timeseriesMap {
|
||||
funcName = strings.ToLower(funcName)
|
||||
switch funcName {
|
||||
case "histogram_over_time", "quantiles_over_time":
|
||||
case "histogram_over_time", "quantiles_over_time", "count_values_over_time":
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@ -607,10 +646,16 @@ func (tsm *timeseriesMap) GetOrCreateTimeseries(labelName, labelValue string) *t
|
|||
if ts != nil {
|
||||
return ts
|
||||
}
|
||||
|
||||
// Make a clone of labelValue in order to use it as map key, since it may point to unsafe string,
|
||||
// which refers some other byte slice, which can change in the future.
|
||||
labelValue = strings.Clone(labelValue)
|
||||
|
||||
ts = ×eries{}
|
||||
ts.CopyFromShallowTimestamps(tsm.origin)
|
||||
ts.MetricName.RemoveTag(labelName)
|
||||
ts.MetricName.AddTag(labelName, labelValue)
|
||||
|
||||
tsm.m[labelValue] = ts
|
||||
return ts
|
||||
}
|
||||
|
@ -957,7 +1002,7 @@ func newRollupHoltWinters(args []interface{}) (rollupFunc, error) {
|
|||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return rfa.prevValue
|
||||
return nan
|
||||
}
|
||||
sf := sfs[rfa.idx]
|
||||
if sf < 0 || sf > 1 {
|
||||
|
@ -1400,6 +1445,42 @@ func mad(values []float64) float64 {
|
|||
return v
|
||||
}
|
||||
|
||||
func newRollupCountValues(args []interface{}) (rollupFunc, error) {
|
||||
if err := expectRollupArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tssLabelNum, ok := args[0].([]*timeseries)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(`unexpected type for labelName arg; got %T; want %T`, args[0], tssLabelNum)
|
||||
}
|
||||
labelName, err := getString(tssLabelNum, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get labelName: %w", err)
|
||||
}
|
||||
f := func(rfa *rollupFuncArg) float64 {
|
||||
tsm := rfa.tsm
|
||||
idx := rfa.idx
|
||||
bb := bbPool.Get()
|
||||
// Note: the code below may create very big number of time series
|
||||
// if the number of unique values in rfa.values is big.
|
||||
for _, v := range rfa.values {
|
||||
bb.B = strconv.AppendFloat(bb.B[:0], v, 'g', -1, 64)
|
||||
labelValue := bytesutil.ToUnsafeString(bb.B)
|
||||
ts := tsm.GetOrCreateTimeseries(labelName, labelValue)
|
||||
count := ts.Values[idx]
|
||||
if math.IsNaN(count) {
|
||||
count = 1
|
||||
} else {
|
||||
count++
|
||||
}
|
||||
ts.Values[idx] = count
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
return nan
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func rollupHistogram(rfa *rollupFuncArg) float64 {
|
||||
values := rfa.values
|
||||
tsm := rfa.tsm
|
||||
|
@ -1586,11 +1667,7 @@ func rollupRateOverSum(rfa *rollupFuncArg) float64 {
|
|||
// before calling rollup funcs.
|
||||
timestamps := rfa.timestamps
|
||||
if len(timestamps) == 0 {
|
||||
if math.IsNaN(rfa.prevValue) {
|
||||
return nan
|
||||
}
|
||||
// Assume that the value didn't change since rfa.prevValue.
|
||||
return 0
|
||||
return nan
|
||||
}
|
||||
sum := float64(0)
|
||||
for _, v := range rfa.values {
|
||||
|
@ -1610,7 +1687,7 @@ func rollupSum2(rfa *rollupFuncArg) float64 {
|
|||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return rfa.prevValue * rfa.prevValue
|
||||
return nan
|
||||
}
|
||||
var sum2 float64
|
||||
for _, v := range values {
|
||||
|
@ -1624,7 +1701,7 @@ func rollupGeomean(rfa *rollupFuncArg) float64 {
|
|||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return rfa.prevValue
|
||||
return nan
|
||||
}
|
||||
p := 1.0
|
||||
for _, v := range values {
|
||||
|
@ -2268,10 +2345,7 @@ func rollupDistinct(rfa *rollupFuncArg) float64 {
|
|||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
if math.IsNaN(rfa.prevValue) {
|
||||
return nan
|
||||
}
|
||||
return 0
|
||||
return nan
|
||||
}
|
||||
m := make(map[float64]struct{})
|
||||
for _, v := range values {
|
||||
|
|
|
@ -45,7 +45,7 @@ func ResetRollupResultCacheIfNeeded(mrs []storage.MetricRow) {
|
|||
rollupResultResetMetricRowSample.Store(&storage.MetricRow{})
|
||||
go checkRollupResultCacheReset()
|
||||
})
|
||||
if atomic.LoadUint32(&needRollupResultCacheReset) != 0 {
|
||||
if needRollupResultCacheReset.Load() {
|
||||
// The cache has been already instructed to reset.
|
||||
return
|
||||
}
|
||||
|
@ -63,14 +63,14 @@ func ResetRollupResultCacheIfNeeded(mrs []storage.MetricRow) {
|
|||
}
|
||||
if needCacheReset {
|
||||
// Do not call ResetRollupResultCache() here, since it may be heavy when frequently called.
|
||||
atomic.StoreUint32(&needRollupResultCacheReset, 1)
|
||||
needRollupResultCacheReset.Store(true)
|
||||
}
|
||||
}
|
||||
|
||||
func checkRollupResultCacheReset() {
|
||||
for {
|
||||
time.Sleep(checkRollupResultCacheResetInterval)
|
||||
if atomic.SwapUint32(&needRollupResultCacheReset, 0) > 0 {
|
||||
if needRollupResultCacheReset.Swap(false) {
|
||||
mr := rollupResultResetMetricRowSample.Load()
|
||||
d := int64(fasttime.UnixTimestamp()*1000) - mr.Timestamp - cacheTimestampOffset.Milliseconds()
|
||||
logger.Warnf("resetting rollup result cache because the metric %s has a timestamp older than -search.cacheTimestampOffset=%s by %.3fs",
|
||||
|
@ -82,7 +82,7 @@ func checkRollupResultCacheReset() {
|
|||
|
||||
const checkRollupResultCacheResetInterval = 5 * time.Second
|
||||
|
||||
var needRollupResultCacheReset uint32
|
||||
var needRollupResultCacheReset atomic.Bool
|
||||
var checkRollupResultCacheResetOnce sync.Once
|
||||
var rollupResultResetMetricRowSample atomic.Pointer[storage.MetricRow]
|
||||
|
||||
|
@ -129,7 +129,7 @@ func InitRollupResultCache(cachePath string) {
|
|||
mustLoadRollupResultCacheKeyPrefix(rollupResultCachePath)
|
||||
} else {
|
||||
c = workingsetcache.New(cacheSize)
|
||||
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||
rollupResultCacheKeyPrefix.Store(newRollupResultCacheKeyPrefix())
|
||||
}
|
||||
if *disableCache {
|
||||
c.Reset()
|
||||
|
@ -211,7 +211,7 @@ var rollupResultCacheResets = metrics.NewCounter(`vm_cache_resets_total{type="pr
|
|||
// ResetRollupResultCache resets rollup result cache.
|
||||
func ResetRollupResultCache() {
|
||||
rollupResultCacheResets.Inc()
|
||||
atomic.AddUint64(&rollupResultCacheKeyPrefix, 1)
|
||||
rollupResultCacheKeyPrefix.Add(1)
|
||||
logger.Infof("rollupResult cache has been cleared")
|
||||
}
|
||||
|
||||
|
@ -438,8 +438,8 @@ func (rrc *rollupResultCache) PutSeries(qt *querytracer.Tracer, ec *EvalConfig,
|
|||
}
|
||||
|
||||
var key rollupResultCacheKey
|
||||
key.prefix = rollupResultCacheKeyPrefix
|
||||
key.suffix = atomic.AddUint64(&rollupResultCacheKeySuffix, 1)
|
||||
key.prefix = rollupResultCacheKeyPrefix.Load()
|
||||
key.suffix = rollupResultCacheKeySuffix.Add(1)
|
||||
|
||||
bb := bbPool.Get()
|
||||
bb.B = key.Marshal(bb.B[:0])
|
||||
|
@ -455,8 +455,12 @@ func (rrc *rollupResultCache) PutSeries(qt *querytracer.Tracer, ec *EvalConfig,
|
|||
}
|
||||
|
||||
var (
|
||||
rollupResultCacheKeyPrefix uint64
|
||||
rollupResultCacheKeySuffix = uint64(time.Now().UnixNano())
|
||||
rollupResultCacheKeyPrefix atomic.Uint64
|
||||
rollupResultCacheKeySuffix = func() *atomic.Uint64 {
|
||||
var x atomic.Uint64
|
||||
x.Store(uint64(time.Now().UnixNano()))
|
||||
return &x
|
||||
}()
|
||||
)
|
||||
|
||||
func (rrc *rollupResultCache) getSeriesFromCache(qt *querytracer.Tracer, key []byte) ([]*timeseries, bool) {
|
||||
|
@ -517,26 +521,26 @@ func newRollupResultCacheKeyPrefix() uint64 {
|
|||
func mustLoadRollupResultCacheKeyPrefix(path string) {
|
||||
path = path + ".key.prefix"
|
||||
if !fs.IsPathExist(path) {
|
||||
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||
rollupResultCacheKeyPrefix.Store(newRollupResultCacheKeyPrefix())
|
||||
return
|
||||
}
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot load %s: %s; reset rollupResult cache", path, err)
|
||||
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||
rollupResultCacheKeyPrefix.Store(newRollupResultCacheKeyPrefix())
|
||||
return
|
||||
}
|
||||
if len(data) != 8 {
|
||||
logger.Errorf("unexpected size of %s; want 8 bytes; got %d bytes; reset rollupResult cache", path, len(data))
|
||||
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||
rollupResultCacheKeyPrefix.Store(newRollupResultCacheKeyPrefix())
|
||||
return
|
||||
}
|
||||
rollupResultCacheKeyPrefix = encoding.UnmarshalUint64(data)
|
||||
rollupResultCacheKeyPrefix.Store(encoding.UnmarshalUint64(data))
|
||||
}
|
||||
|
||||
func mustSaveRollupResultCacheKeyPrefix(path string) {
|
||||
path = path + ".key.prefix"
|
||||
data := encoding.MarshalUint64(nil, rollupResultCacheKeyPrefix)
|
||||
data := encoding.MarshalUint64(nil, rollupResultCacheKeyPrefix.Load())
|
||||
fs.MustWriteAtomic(path, data, true)
|
||||
}
|
||||
|
||||
|
@ -552,7 +556,7 @@ const (
|
|||
|
||||
func marshalRollupResultCacheKeyForSeries(dst []byte, expr metricsql.Expr, window, step int64, etfs [][]storage.TagFilter) []byte {
|
||||
dst = append(dst, rollupResultCacheVersion)
|
||||
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix)
|
||||
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix.Load())
|
||||
dst = append(dst, rollupResultCacheTypeSeries)
|
||||
dst = encoding.MarshalInt64(dst, window)
|
||||
dst = encoding.MarshalInt64(dst, step)
|
||||
|
@ -563,7 +567,7 @@ func marshalRollupResultCacheKeyForSeries(dst []byte, expr metricsql.Expr, windo
|
|||
|
||||
func marshalRollupResultCacheKeyForInstantValues(dst []byte, expr metricsql.Expr, window, step int64, etfs [][]storage.TagFilter) []byte {
|
||||
dst = append(dst, rollupResultCacheVersion)
|
||||
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix)
|
||||
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix.Load())
|
||||
dst = append(dst, rollupResultCacheTypeInstantValues)
|
||||
dst = encoding.MarshalInt64(dst, window)
|
||||
dst = encoding.MarshalInt64(dst, step)
|
||||
|
|
|
@ -2,7 +2,6 @@ package promql
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -308,60 +307,32 @@ func unmarshalBytesFast(src []byte) ([]byte, []byte, error) {
|
|||
return src[n:], src[:n], nil
|
||||
}
|
||||
|
||||
func float64ToByteSlice(a []float64) (b []byte) {
|
||||
if len(a) == 0 {
|
||||
return nil
|
||||
}
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
sh.Data = uintptr(unsafe.Pointer(&a[0]))
|
||||
sh.Len = len(a) * int(unsafe.Sizeof(a[0]))
|
||||
sh.Cap = sh.Len
|
||||
return
|
||||
func float64ToByteSlice(a []float64) []byte {
|
||||
return unsafe.Slice((*byte)(unsafe.Pointer(unsafe.SliceData(a))), len(a)*8)
|
||||
}
|
||||
|
||||
func int64ToByteSlice(a []int64) (b []byte) {
|
||||
if len(a) == 0 {
|
||||
return nil
|
||||
}
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
sh.Data = uintptr(unsafe.Pointer(&a[0]))
|
||||
sh.Len = len(a) * int(unsafe.Sizeof(a[0]))
|
||||
sh.Cap = sh.Len
|
||||
return
|
||||
func int64ToByteSlice(a []int64) []byte {
|
||||
return unsafe.Slice((*byte)(unsafe.Pointer(unsafe.SliceData(a))), len(a)*8)
|
||||
}
|
||||
|
||||
func byteSliceToInt64(b []byte) (a []int64) {
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&a))
|
||||
sh.Data = uintptr(unsafe.Pointer(&b[0]))
|
||||
sh.Len = len(b) / int(unsafe.Sizeof(a[0]))
|
||||
sh.Cap = sh.Len
|
||||
func byteSliceToInt64(b []byte) []int64 {
|
||||
// Make sure that the returned slice is properly aligned to 8 bytes.
|
||||
// This prevents from SIGBUS error on arm architectures, which deny unaligned access.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3927
|
||||
if sh.Data%8 != 0 {
|
||||
if uintptr(unsafe.Pointer(unsafe.SliceData(b)))%8 != 0 {
|
||||
logger.Panicf("BUG: the input byte slice b must be aligned to 8 bytes")
|
||||
}
|
||||
return
|
||||
return unsafe.Slice((*int64)(unsafe.Pointer(unsafe.SliceData(b))), len(b)/8)
|
||||
}
|
||||
|
||||
func byteSliceToFloat64(b []byte) (a []float64) {
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&a))
|
||||
sh.Data = uintptr(unsafe.Pointer(&b[0]))
|
||||
sh.Len = len(b) / int(unsafe.Sizeof(a[0]))
|
||||
sh.Cap = sh.Len
|
||||
func byteSliceToFloat64(b []byte) []float64 {
|
||||
// Make sure that the returned slice is properly aligned to 8 bytes.
|
||||
// This prevents from SIGBUS error on arm architectures, which deny unaligned access.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3927
|
||||
if sh.Data%8 != 0 {
|
||||
if uintptr(unsafe.Pointer(unsafe.SliceData(b)))%8 != 0 {
|
||||
logger.Panicf("BUG: the input byte slice b must be aligned to 8 bytes")
|
||||
}
|
||||
return
|
||||
return unsafe.Slice((*float64)(unsafe.Pointer(unsafe.SliceData(b))), len(b)/8)
|
||||
}
|
||||
|
||||
func stringMetricName(mn *storage.MetricName) string {
|
||||
|
|
|
@ -52,7 +52,9 @@ func TestMarshalTimeseriesFast(t *testing.T) {
|
|||
MetricName: storage.MetricName{
|
||||
MetricGroup: []byte{},
|
||||
},
|
||||
denyReuse: true,
|
||||
Values: []float64{},
|
||||
Timestamps: []int64{},
|
||||
denyReuse: true,
|
||||
}})
|
||||
f([]*timeseries{{
|
||||
MetricName: storage.MetricName{
|
||||
|
|
|
@ -1940,8 +1940,7 @@ func transformLabelJoin(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
for _, ts := range rvs {
|
||||
mn := &ts.MetricName
|
||||
dstValue := getDstValue(mn, dstLabel)
|
||||
b := *dstValue
|
||||
b = b[:0]
|
||||
var b []byte
|
||||
for j, srcLabel := range srcLabels {
|
||||
srcValue := mn.GetTagValue(srcLabel)
|
||||
b = append(b, srcValue...)
|
||||
|
|
|
@ -17,6 +17,8 @@ var (
|
|||
maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call")
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution")
|
||||
maxStatusRequestDuration = flag.Duration("search.maxStatusRequestDuration", time.Minute*5, "The maximum duration for /api/v1/status/* requests")
|
||||
maxLabelsAPIDuration = flag.Duration("search.maxLabelsAPIDuration", time.Second*5, "The maximum duration for /api/v1/labels, /api/v1/label/.../values and /api/v1/series requests. "+
|
||||
"See also -search.maxLabelsAPISeries and -search.ignoreExtraFiltersAtLabelsAPI")
|
||||
)
|
||||
|
||||
// GetMaxQueryDuration returns the maximum duration for query from r.
|
||||
|
@ -50,6 +52,12 @@ func GetDeadlineForExport(r *http.Request, startTime time.Time) Deadline {
|
|||
return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxExportDuration")
|
||||
}
|
||||
|
||||
// GetDeadlineForLabelsAPI returns deadline for the given request to /api/v1/labels, /api/v1/label/.../values or /api/v1/series
|
||||
func GetDeadlineForLabelsAPI(r *http.Request, startTime time.Time) Deadline {
|
||||
dMax := maxLabelsAPIDuration.Milliseconds()
|
||||
return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxLabelsAPIDuration")
|
||||
}
|
||||
|
||||
func getDeadlineWithMaxDuration(r *http.Request, startTime time.Time, dMax int64, flagHint string) Deadline {
|
||||
d, err := httputils.GetDuration(r, "timeout", 0)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.be4fee7a.css",
|
||||
"main.js": "./static/js/main.fd9d9e16.js",
|
||||
"static/js/522.da77e7b3.chunk.js": "./static/js/522.da77e7b3.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.8a01ddf56e4e6bc1ccf1.md",
|
||||
"main.css": "./static/css/main.dee51b1d.css",
|
||||
"main.js": "./static/js/main.81b9e607.js",
|
||||
"static/js/685.bebe1265.chunk.js": "./static/js/685.bebe1265.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.61a686c0661a23e4f2eb.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.be4fee7a.css",
|
||||
"static/js/main.fd9d9e16.js"
|
||||
"static/css/main.dee51b1d.css",
|
||||
"static/js/main.81b9e607.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.fd9d9e16.js"></script><link href="./static/css/main.be4fee7a.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.81b9e607.js"></script><link href="./static/css/main.dee51b1d.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
1
app/vmselect/vmui/static/css/main.dee51b1d.css
Normal file
1
app/vmselect/vmui/static/css/main.dee51b1d.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
app/vmselect/vmui/static/js/685.bebe1265.chunk.js
Normal file
1
app/vmselect/vmui/static/js/685.bebe1265.chunk.js
Normal file
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/main.81b9e607.js
Normal file
2
app/vmselect/vmui/static/js/main.81b9e607.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -4,10 +4,8 @@
|
|||
http://jedwatson.github.io/classnames
|
||||
*/
|
||||
|
||||
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
||||
|
||||
/**
|
||||
* @remix-run/router v1.10.0
|
||||
* @remix-run/router v1.15.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
|
@ -18,7 +16,7 @@
|
|||
*/
|
||||
|
||||
/**
|
||||
* React Router DOM v6.17.0
|
||||
* React Router DOM v6.22.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
|
@ -29,7 +27,7 @@
|
|||
*/
|
||||
|
||||
/**
|
||||
* React Router v6.17.0
|
||||
* React Router v6.22.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
File diff suppressed because one or more lines are too long
|
@ -26,12 +26,18 @@ and introduction into [basic querying via MetricsQL](https://docs.victoriametric
|
|||
|
||||
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
||||
|
||||
* MetricsQL takes into account the previous point before the window in square brackets for range functions such as [rate](#rate) and [increase](#increase).
|
||||
This allows returning the exact results users expect for `increase(metric[$__interval])` queries instead of incomplete results Prometheus returns for such queries.
|
||||
* MetricsQL doesn't extrapolate range function results. This addresses [this issue from Prometheus](https://github.com/prometheus/prometheus/issues/3746).
|
||||
* MetricsQL takes into account the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) before the lookbehind window
|
||||
in square brackets for [increase](#increase) and [rate](#rate) functions. This allows returning the exact results users expect for `increase(metric[$__interval])` queries
|
||||
instead of incomplete results Prometheus returns for such queries. Prometheus misses the increase between the last sample before the lookbehind window
|
||||
and the first sample inside the lookbehind window.
|
||||
* MetricsQL doesn't extrapolate [rate](#rate) and [increase](#increase) function results, so it always returns the expected results. For example, it returns
|
||||
integer results from `increase()` over slow-changing integer counter. Prometheus in this case returns unexpected fractional results,
|
||||
which may significantly differ from the expected results. This addresses [this issue from Prometheus](https://github.com/prometheus/prometheus/issues/3746).
|
||||
See technical details about VictoriaMetrics and Prometheus calculations for [rate](#rate)
|
||||
and [increase](#increase) [in this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1215#issuecomment-850305711).
|
||||
* MetricsQL returns the expected non-empty responses for [rate](#rate) with `step` values smaller than scrape interval.
|
||||
* MetricsQL returns the expected non-empty responses for [rate](#rate) function when Grafana or [vmui](https://docs.victoriametrics.com/#vmui)
|
||||
passes `step` values smaller than the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
This addresses [this issue from Grafana](https://github.com/grafana/grafana/issues/11451).
|
||||
See also [this blog post](https://www.percona.com/blog/2020/02/28/better-prometheus-rate-function-with-victoriametrics/).
|
||||
* MetricsQL treats `scalar` type the same as `instant vector` without labels, since subtle differences between these types usually confuse users.
|
||||
|
@ -61,16 +67,17 @@ The list of MetricsQL features on top of PromQL:
|
|||
|
||||
* Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax.
|
||||
See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics).
|
||||
VictoriaMetrics also can be used as Graphite datasource in Grafana.
|
||||
See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)).
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
and the real interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) (aka `scrape_interval`).
|
||||
For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`.
|
||||
It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
It is roughly equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
The difference is documented in [rate() docs](#rate).
|
||||
* Numeric values can contain `_` delimiters for better readability. For example, `1_234_567_890` can be used in queries instead of `1234567890`.
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
selects series with either `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
selects series with `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
See [these docs](https://docs.victoriametrics.com/keyConcepts.html#filtering-by-multiple-or-filters) for details.
|
||||
* Support for `group_left(*)` and `group_right(*)` for copying all the labels from time series on the `one` side
|
||||
of [many-to-one operations](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches).
|
||||
|
@ -117,7 +124,8 @@ The list of MetricsQL features on top of PromQL:
|
|||
Go to [WITH templates playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/expand-with-exprs) and try it.
|
||||
* String literals may be concatenated. This is useful with `WITH` templates:
|
||||
`WITH (commonPrefix="long_metric_prefix_") {__name__=commonPrefix+"suffix1"} / {__name__=commonPrefix+"suffix2"}`.
|
||||
* `keep_metric_names` modifier can be applied to all the [rollup functions](#rollup-functions), [transform functions](#transform-functions) and [binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
|
||||
* `keep_metric_names` modifier can be applied to all the [rollup functions](#rollup-functions), [transform functions](#transform-functions)
|
||||
and [binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
|
||||
This modifier prevents from dropping metric names in function results. See [these docs](#keep_metric_names).
|
||||
|
||||
## keep_metric_names
|
||||
|
@ -155,14 +163,15 @@ Additional details:
|
|||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) returns multiple time series,
|
||||
then rollups are calculated individually per each returned series.
|
||||
* If lookbehind window in square brackets is missing, then MetricsQL automatically sets the lookbehind window
|
||||
to the interval between points on the graph (aka `step` query arg at [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query),
|
||||
`$__interval` value from Grafana or `1i` duration in MetricsQL).
|
||||
For example, `rate(http_requests_total)` is equivalent to `rate(http_requests_total[$__interval])` in Grafana.
|
||||
It is also equivalent to `rate(http_requests_total[1i])`.
|
||||
* If lookbehind window in square brackets is missing, then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
Otherwise, it is automatically wrapped into [default_rollup](#default_rollup). For example, `foo{bar="baz"}`
|
||||
is automatically converted to `default_rollup(foo{bar="baz"}[1i])` before performing the calculations.
|
||||
is automatically converted to `default_rollup(foo{bar="baz"})` before performing the calculations.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) is passed to rollup function,
|
||||
then the inner arg is automatically converted to a [subquery](#subqueries).
|
||||
* All the rollup functions accept optional `keep_metric_names` modifier. If it is set, then the function keeps metric names in results.
|
||||
|
@ -177,7 +186,9 @@ The list of supported rollup functions:
|
|||
`absent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1
|
||||
if the given lookbehind window `d` doesn't contain raw samples. Otherwise, it returns an empty result.
|
||||
|
||||
This function is supported by PromQL. See also [present_over_time](#present_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [present_over_time](#present_over_time).
|
||||
|
||||
#### aggr_over_time
|
||||
|
||||
|
@ -207,7 +218,9 @@ See also [descent_over_time](#descent_over_time).
|
|||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [median_over_time](#median_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [median_over_time](#median_over_time).
|
||||
|
||||
#### changes
|
||||
|
||||
|
@ -220,7 +233,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [changes_prometheus](#changes_prometheus).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [changes_prometheus](#changes_prometheus).
|
||||
|
||||
#### changes_prometheus
|
||||
|
||||
|
@ -233,7 +248,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [changes](#changes).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [changes](#changes).
|
||||
|
||||
#### count_eq_over_time
|
||||
|
||||
|
@ -243,7 +260,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_over_time](#count_over_time).
|
||||
See also [count_over_time](#count_over_time), [share_eq_over_time](#share_eq_over_time) and [count_values_over_time](#count_values_over_time).
|
||||
|
||||
#### count_gt_over_time
|
||||
|
||||
|
@ -253,7 +270,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_over_time](#count_over_time).
|
||||
See also [count_over_time](#count_over_time) and [share_gt_over_time](#share_gt_over_time).
|
||||
|
||||
#### count_le_over_time
|
||||
|
||||
|
@ -263,7 +280,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_over_time](#count_over_time).
|
||||
See also [count_over_time](#count_over_time) and [share_le_over_time](#share_le_over_time).
|
||||
|
||||
#### count_ne_over_time
|
||||
|
||||
|
@ -282,8 +299,19 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_gt_over_time),
|
||||
[count_eq_over_time](#count_eq_over_time) and [count_ne_over_time](#count_ne_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_gt_over_time), [count_eq_over_time](#count_eq_over_time) and [count_ne_over_time](#count_ne_over_time).
|
||||
|
||||
#### count_values_over_time
|
||||
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of raw samples
|
||||
with the same value over the given lookbehind window and stores the counts in a time series with an additional `label`, which contains each initial value.
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_eq_over_time](#count_eq_over_time), [count_values](#count_values) and [distinct_over_time](#distinct_over_time) and [label_match](#label_match).
|
||||
|
||||
#### decreases_over_time
|
||||
|
||||
|
@ -299,6 +327,11 @@ See also [increases_over_time](#increases_over_time).
|
|||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
while `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the selected time series.
|
||||
This allows avoiding unexpected gaps on the graph when `step` is smaller than the `scrape_interval`.
|
||||
|
||||
#### delta
|
||||
|
||||
`delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
|
@ -310,7 +343,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
|
||||
#### delta_prometheus
|
||||
|
||||
|
@ -333,7 +368,9 @@ The derivative is calculated using linear regression.
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
|
||||
#### deriv_fast
|
||||
|
||||
|
@ -364,6 +401,8 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_values_over_time](#count_values_over_time).
|
||||
|
||||
#### duration_over_time
|
||||
|
||||
`duration_over_time(series_selector[d], max_interval)` is a [rollup function](#rollup-functions), which returns the duration in seconds
|
||||
|
@ -423,7 +462,9 @@ over the given lookbehind window `d` using the given smoothing factor `sf` and t
|
|||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
|
||||
This function is supported by PromQL. See also [range_linear_regression](#range_linear_regression).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### idelta
|
||||
|
||||
|
@ -432,7 +473,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [delta](#delta).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [delta](#delta).
|
||||
|
||||
#### ideriv
|
||||
|
||||
|
@ -455,7 +498,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||
|
||||
#### increase_prometheus
|
||||
|
||||
|
@ -499,7 +544,9 @@ It is expected that the `series_selector` returns time series of [counter type](
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### lag
|
||||
|
||||
|
@ -516,7 +563,9 @@ See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
|
|||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_time).
|
||||
|
||||
#### lifetime
|
||||
|
||||
|
@ -539,7 +588,9 @@ See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outli
|
|||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [tmax_over_time](#tmax_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tmax_over_time](#tmax_over_time).
|
||||
|
||||
#### median_over_time
|
||||
|
||||
|
@ -554,7 +605,9 @@ See also [avg_over_time](#avg_over_time).
|
|||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [tmin_over_time](#tmin_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tmin_over_time](#tmin_over_time).
|
||||
|
||||
#### mode_over_time
|
||||
|
||||
|
@ -580,7 +633,9 @@ See also [outliers_iqr](#outliers_iqr).
|
|||
linear interpolation over raw samples on the given lookbehind window `d`. The predicted value is calculated individually per each time series
|
||||
returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [range_linear_regression](#range_linear_regression).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### present_over_time
|
||||
|
||||
|
@ -597,7 +652,9 @@ This function is supported by PromQL.
|
|||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The `phi` value must be in the range `[0...1]`.
|
||||
|
||||
This function is supported by PromQL. See also [quantiles_over_time](#quantiles_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [quantiles_over_time](#quantiles_over_time).
|
||||
|
||||
#### quantiles_over_time
|
||||
|
||||
|
@ -622,9 +679,16 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
while `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the selected time series.
|
||||
This allows avoiding unexpected gaps on the graph when `step` is smaller than the `scrape_interval`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### rate_over_sum
|
||||
|
||||
|
@ -652,6 +716,7 @@ on the given lookbehind window `d` and returns them in time series with `rollup=
|
|||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_candlestick
|
||||
|
||||
|
@ -660,7 +725,8 @@ over raw samples on the given lookbehind window `d` and returns them in time ser
|
|||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). This function is useful for financial applications.
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
Optional 2nd argument `"open"`, `"high"` or `"low"` or `"close"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_delta
|
||||
|
||||
|
@ -670,6 +736,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -683,6 +750,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -694,6 +762,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names. See also [rollup_delta](#rollup_delta).
|
||||
|
||||
|
@ -707,10 +776,10 @@ See [this article](https://valyala.medium.com/why-irate-from-prometheus-doesnt-c
|
|||
when to use `rollup_rate()`.
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### rollup_scrape_interval
|
||||
|
@ -721,6 +790,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names. See also [scrape_interval](#scrape_interval).
|
||||
|
||||
|
@ -743,7 +813,7 @@ This function is useful for calculating SLI and SLO. Example: `share_gt_over_tim
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [share_le_over_time](#share_le_over_time).
|
||||
See also [share_le_over_time](#share_le_over_time) and [count_gt_over_time](#count_gt_over_time).
|
||||
|
||||
#### share_le_over_time
|
||||
|
||||
|
@ -756,7 +826,7 @@ the share of time series values for the last 24 hours when memory usage was belo
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [share_gt_over_time](#share_gt_over_time).
|
||||
See also [share_gt_over_time](#share_gt_over_time) and [count_le_over_time](#count_le_over_time).
|
||||
|
||||
#### share_eq_over_time
|
||||
|
||||
|
@ -766,6 +836,8 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_eq_over_time](#count_eq_over_time).
|
||||
|
||||
#### stale_samples_over_time
|
||||
|
||||
`stale_samples_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number
|
||||
|
@ -781,7 +853,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [stdvar_over_time](#stdvar_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [stdvar_over_time](#stdvar_over_time).
|
||||
|
||||
#### stdvar_over_time
|
||||
|
||||
|
@ -790,7 +864,36 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [stddev_over_time](#stddev_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [stddev_over_time](#stddev_over_time).
|
||||
|
||||
#### sum_eq_over_time
|
||||
|
||||
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values equal to `eq`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [sum_over_time](#sum_over_time) and [count_eq_over_time](#count_eq_over_time).
|
||||
|
||||
#### sum_gt_over_time
|
||||
|
||||
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values bigger than `gt`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [sum_over_time](#sum_over_time) and [count_gt_over_time](#count_gt_over_time).
|
||||
|
||||
#### sum_le_over_time
|
||||
|
||||
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values smaller or equal to `le`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [sum_over_time](#sum_over_time) and [count_le_over_time](#count_le_over_time).
|
||||
|
||||
#### sum_over_time
|
||||
|
||||
|
@ -815,7 +918,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [timestamp_with_name](#timestamp_with_name).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [time](#time) and [now](#now).
|
||||
|
||||
#### timestamp_with_name
|
||||
|
||||
|
@ -824,7 +929,7 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are preserved in the resulting rollups.
|
||||
|
||||
See also [timestamp](#timestamp).
|
||||
See also [timestamp](#timestamp) and [keep_metric_names](#keep_metric_names) modifier.
|
||||
|
||||
#### tfirst_over_time
|
||||
|
||||
|
@ -891,7 +996,7 @@ Additional details:
|
|||
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the transformations.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature[1i]))`.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature))`.
|
||||
* All the transform functions accept optional `keep_metric_names` modifier. If it is set,
|
||||
then the function doesn't drop metric names from the resulting time series. See [these docs](#keep_metric_names).
|
||||
|
||||
|
@ -909,7 +1014,9 @@ This function is supported by PromQL.
|
|||
|
||||
`absent(q)` is a [transform function](#transform-functions), which returns 1 if `q` has no points. Otherwise, returns an empty result.
|
||||
|
||||
This function is supported by PromQL. See also [absent_over_time](#absent_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [absent_over_time](#absent_over_time).
|
||||
|
||||
#### acos
|
||||
|
||||
|
@ -918,7 +1025,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [asin](#asin) and [cos](#cos).
|
||||
|
||||
#### acosh
|
||||
|
||||
|
@ -927,7 +1036,9 @@ This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sinh](#cosh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sinh](#cosh).
|
||||
|
||||
#### asin
|
||||
|
||||
|
@ -936,7 +1047,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [acos](#acos) and [sin](#sin).
|
||||
|
||||
#### asinh
|
||||
|
||||
|
@ -945,7 +1058,9 @@ This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sinh](#sinh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sinh](#sinh).
|
||||
|
||||
#### atan
|
||||
|
||||
|
@ -954,7 +1069,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [tan](#tan).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tan](#tan).
|
||||
|
||||
#### atanh
|
||||
|
||||
|
@ -963,7 +1080,9 @@ This function is supported by PromQL. See also [tan](#tan).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [tanh](#tanh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tanh](#tanh).
|
||||
|
||||
#### bitmap_and
|
||||
|
||||
|
@ -994,25 +1113,33 @@ See also [prometheus_buckets](#prometheus_buckets) and [histogram_quantile](#his
|
|||
|
||||
`ceil(q)` is a [transform function](#transform-functions), which rounds every point for every time series returned by `q` to the upper nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [floor](#floor) and [round](#round).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [floor](#floor) and [round](#round).
|
||||
|
||||
#### clamp
|
||||
|
||||
`clamp(q, min, max)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `min` and `max` values.
|
||||
|
||||
This function is supported by PromQL. See also [clamp_min](#clamp_min) and [clamp_max](#clamp_max).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp_min](#clamp_min) and [clamp_max](#clamp_max).
|
||||
|
||||
#### clamp_max
|
||||
|
||||
`clamp_max(q, max)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `max` value.
|
||||
|
||||
This function is supported by PromQL. See also [clamp](#clamp) and [clamp_min](#clamp_min).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp](#clamp) and [clamp_min](#clamp_min).
|
||||
|
||||
#### clamp_min
|
||||
|
||||
`clamp_min(q, min)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `min` value.
|
||||
|
||||
This function is supported by PromQL. See also [clamp](#clamp) and [clamp_max](#clamp_max).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp](#clamp) and [clamp_max](#clamp_max).
|
||||
|
||||
#### cos
|
||||
|
||||
|
@ -1020,7 +1147,9 @@ This function is supported by PromQL. See also [clamp](#clamp) and [clamp_max](#
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sin](#sin).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sin](#sin).
|
||||
|
||||
#### cosh
|
||||
|
||||
|
@ -1029,7 +1158,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [acosh](#acosh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [acosh](#acosh).
|
||||
|
||||
#### day_of_month
|
||||
|
||||
|
@ -1040,6 +1171,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_week](#day_of_week) and [day_of_year](#day_of_year).
|
||||
|
||||
#### day_of_week
|
||||
|
||||
`day_of_week(q)` is a [transform function](#transform-functions), which returns the day of week for every point of every time series returned by `q`.
|
||||
|
@ -1049,6 +1182,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_month](#day_of_month) and [day_of_year](#day_of_year).
|
||||
|
||||
#### day_of_year
|
||||
|
||||
`day_of_year(q)` is a [transform function](#transform-functions), which returns the day of year for every point of every time series returned by `q`.
|
||||
|
@ -1058,6 +1193,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_week](#day_of_week) and [day_of_month](#day_of_month).
|
||||
|
||||
#### days_in_month
|
||||
|
||||
`days_in_month(q)` is a [transform function](#transform-functions), which returns the number of days in the month identified
|
||||
|
@ -1075,7 +1212,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [rad](#rad).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [rad](#rad).
|
||||
|
||||
#### drop_empty_series
|
||||
|
||||
|
@ -1101,13 +1240,17 @@ See also [start](#start), [time](#time) and [now](#now).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [ln](#ln).
|
||||
|
||||
#### floor
|
||||
|
||||
`floor(q)` is a [transform function](#transform-functions), which rounds every point for every time series returned by `q` to the lower nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [ceil](#ceil) and [round](#round).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [ceil](#ceil) and [round](#round).
|
||||
|
||||
#### histogram_avg
|
||||
|
||||
|
@ -1130,8 +1273,9 @@ When the [percentile](https://en.wikipedia.org/wiki/Percentile) is calculated ov
|
|||
then all the input histograms **must** have buckets with identical boundaries, e.g. they must have the same set of `le` or `vmrange` labels.
|
||||
Otherwise, the returned result may be invalid. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3231) for details.
|
||||
|
||||
This function is supported by PromQL (except of the `boundLabel` arg). See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share)
|
||||
and [quantile](#quantile).
|
||||
This function is supported by PromQL (except of the `boundLabel` arg).
|
||||
|
||||
See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share) and [quantile](#quantile).
|
||||
|
||||
#### histogram_quantiles
|
||||
|
||||
|
@ -1203,7 +1347,9 @@ This allows implementing simple paging for `q` time series. See also [limitk](#l
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [exp](#exp) and [log2](#log2).
|
||||
|
||||
#### log2
|
||||
|
||||
|
@ -1211,7 +1357,9 @@ This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [log10](#log10) and [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [log10](#log10) and [ln](#ln).
|
||||
|
||||
#### log10
|
||||
|
||||
|
@ -1219,7 +1367,9 @@ This function is supported by PromQL. See also [log10](#log10) and [ln](#ln).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [log2](#log2) and [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [log2](#log2) and [ln](#ln).
|
||||
|
||||
#### minute
|
||||
|
||||
|
@ -1258,7 +1408,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [deg](#deg).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [deg](#deg).
|
||||
|
||||
#### prometheus_buckets
|
||||
|
||||
|
@ -1386,7 +1538,9 @@ for points returned by `q`, e.g. it is equivalent to the following query: `(q -
|
|||
`round(q, nearest)` is a [transform function](#transform-functions), which rounds every point of every time series returned by `q` to the `nearest` multiple.
|
||||
If `nearest` is missing then the rounding is performed to the nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [floor](#floor) and [ceil](#ceil).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [floor](#floor) and [ceil](#ceil).
|
||||
|
||||
#### ru
|
||||
|
||||
|
@ -1430,7 +1584,9 @@ This function is supported by PromQL.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [cos](#cos).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [cos](#cos).
|
||||
|
||||
#### sinh
|
||||
|
||||
|
@ -1439,7 +1595,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [cosh](#cosh).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [cosh](#cosh).
|
||||
|
||||
#### tan
|
||||
|
||||
|
@ -1447,7 +1605,9 @@ This function is supported by MetricsQL. See also [cosh](#cosh).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [atan](#atan).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [atan](#atan).
|
||||
|
||||
#### tanh
|
||||
|
||||
|
@ -1456,7 +1616,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [atanh](#atanh).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [atanh](#atanh).
|
||||
|
||||
#### smooth_exponential
|
||||
|
||||
|
@ -1467,13 +1629,17 @@ by `q` using [exponential moving average](https://en.wikipedia.org/wiki/Moving_a
|
|||
|
||||
`sort(q)` is a [transform function](#transform-functions), which sorts series in ascending order by the last point in every time series returned by `q`.
|
||||
|
||||
This function is supported by PromQL. See also [sort_desc](#sort_desc) and [sort_by_label](#sort_by_label).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sort_desc](#sort_desc) and [sort_by_label](#sort_by_label).
|
||||
|
||||
#### sort_desc
|
||||
|
||||
`sort_desc(q)` is a [transform function](#transform-functions), which sorts series in descending order by the last point in every time series returned by `q`.
|
||||
|
||||
This function is supported by PromQL. See also [sort](#sort) and [sort_by_label](#sort_by_label_desc).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sort](#sort) and [sort_by_label](#sort_by_label_desc).
|
||||
|
||||
#### sqrt
|
||||
|
||||
|
@ -1502,7 +1668,9 @@ See also [start](#start) and [end](#end).
|
|||
|
||||
`time()` is a [transform function](#transform-functions), which returns unix timestamp for every returned point.
|
||||
|
||||
This function is supported by PromQL. See also [now](#now), [start](#start) and [end](#end).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [timestamp](#timestamp), [now](#now), [start](#start) and [end](#end).
|
||||
|
||||
#### timezone_offset
|
||||
|
||||
|
@ -1551,7 +1719,7 @@ Additional details:
|
|||
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before performing the label transformation.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature[1i]), "foo")`.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature), "foo")`.
|
||||
|
||||
See also [implicit query conversions](#implicit-query-conversions).
|
||||
|
||||
|
@ -1728,7 +1896,7 @@ Additional details:
|
|||
Multiple labels can be put in `by` and `without` modifiers.
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the aggregate.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up[1i]))`.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up))`.
|
||||
* Aggregate functions accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point
|
||||
across time series returned by `q1`, `q2` and `q3`.
|
||||
* Aggregate functions support optional `limit N` suffix, which can be used for limiting the number of output groups.
|
||||
|
@ -1756,7 +1924,9 @@ This function is supported by PromQL.
|
|||
`bottomk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` points with the smallest values across all the time series returned by `q`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [topk](#topk).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [topk](#topk), [bottomk_min](#bottomk_min) and [#bottomk_last](#bottomk_last).
|
||||
|
||||
#### bottomk_avg
|
||||
|
||||
|
@ -1818,10 +1988,14 @@ The aggregate is calculated individually per each group of points with the same
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [count_values_over_time](#count_values_over_time) and [label_match](#label_match).
|
||||
|
||||
#### distinct
|
||||
|
||||
`distinct(q)` is [aggregate function](#aggregate-functions), which calculates the number of unique values per each group of points with the same timestamp.
|
||||
|
||||
See also [distinct_over_time](#distinct_over_time).
|
||||
|
||||
#### geomean
|
||||
|
||||
`geomean(q)` is [aggregate function](#aggregate-functions), which calculates geometric mean per each group of points with the same timestamp.
|
||||
|
@ -1913,7 +2087,9 @@ See also [outliers_iqr](#outliers_iqr) and [outliers_mad](#outliers_mad).
|
|||
for all the time series returned by `q`. `phi` must be in the range `[0...1]`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [quantiles](#quantiles) and [histogram_quantile](#histogram_quantile).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [quantiles](#quantiles) and [histogram_quantile](#histogram_quantile).
|
||||
|
||||
#### quantiles
|
||||
|
||||
|
@ -1972,7 +2148,9 @@ for all the time series returned by `q`. The aggregate is calculated individuall
|
|||
`topk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` points with the biggest values across all the time series returned by `q`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [bottomk](#bottomk).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [bottomk](#bottomk), [topk_max](#topk_max) and [topk_last](#topk_last).
|
||||
|
||||
#### topk_avg
|
||||
|
||||
|
@ -2032,7 +2210,7 @@ See also [zscore_over_time](#zscore_over_time), [range_trim_zscore](#range_trim_
|
|||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) form a subquery.
|
||||
Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions).
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m[1i]))[1i:1i])`, so it becomes a subquery,
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m))[1i:1i])`, so it becomes a subquery,
|
||||
since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
|
||||
VictoriaMetrics performs subqueries in the following way:
|
||||
|
@ -2047,21 +2225,23 @@ VictoriaMetrics performs subqueries in the following way:
|
|||
|
||||
VictoriaMetrics performs the following implicit conversions for incoming queries before starting the calculations:
|
||||
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions),
|
||||
then `[1i]` is automatically added there. The `[1i]` means one `step` value, which is passed
|
||||
to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is also known as `$__interval` in Grafana. For example, `rate(http_requests_count)` is automatically transformed to `rate(http_requests_count[1i])`.
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions), then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
which aren't wrapped into [rollup functions](#rollup-functions), are automatically wrapped into [default_rollup](#default_rollup) function.
|
||||
Examples:
|
||||
* `foo` is transformed to `default_rollup(foo[1i])`
|
||||
* `foo + bar` is transformed to `default_rollup(foo[1i]) + default_rollup(bar[1i])`
|
||||
* `count(up)` is transformed to `count(default_rollup(up[1i]))`, because [count](#count) isn't a [rollup function](#rollup-functions) -
|
||||
* `foo` is transformed to `default_rollup(foo)`
|
||||
* `foo + bar` is transformed to `default_rollup(foo) + default_rollup(bar)`
|
||||
* `count(up)` is transformed to `count(default_rollup(up))`, because [count](#count) isn't a [rollup function](#rollup-functions) -
|
||||
it is [aggregate function](#aggregate-functions)
|
||||
* `abs(temperature)` is transformed to `abs(default_rollup(temperature[1i]))`, because [abs](#abs) isn't a [rollup function](#rollup-functions) -
|
||||
* `abs(temperature)` is transformed to `abs(default_rollup(temperature))`, because [abs](#abs) isn't a [rollup function](#rollup-functions) -
|
||||
it is [transform function](#transform-functions)
|
||||
* If `step` in square brackets is missing inside [subquery](#subqueries), then `1i` step is automatically added there.
|
||||
For example, `avg_over_time(rate(http_requests_total[5m])[1h])` is automatically converted to `avg_over_time(rate(http_requests_total[5m])[1h:1i])`.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
is passed to [rollup function](#rollup-functions), then a [subquery](#subqueries) with `1i` lookbehind window and `1i` step is automatically formed.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up[1i])))[1i:1i])`.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
|
@ -26,12 +26,12 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", "1", "Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See also -retentionFilter")
|
||||
snapshotAuthKey = flagutil.NewPassword("snapshotAuthKey", "authKey, which must be passed in query string to /snapshot* pages")
|
||||
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge pages")
|
||||
forceFlushAuthKey = flagutil.NewPassword("forceFlushAuthKey", "authKey, which must be passed in query string to /internal/force_flush pages")
|
||||
snapshotsMaxAge = flagutil.NewDuration("snapshotsMaxAge", "0", "Automatically delete snapshots older than -snapshotsMaxAge if it is set to non-zero duration. Make sure that backup process has enough time to finish the backup before the corresponding snapshot is automatically deleted")
|
||||
snapshotCreateTimeout = flag.Duration("snapshotCreateTimeout", 0, "The timeout for creating new snapshot. If set, make sure that timeout is lower than backup period")
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", "1", "Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See also -retentionFilter")
|
||||
snapshotAuthKey = flagutil.NewPassword("snapshotAuthKey", "authKey, which must be passed in query string to /snapshot* pages")
|
||||
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge pages")
|
||||
forceFlushAuthKey = flagutil.NewPassword("forceFlushAuthKey", "authKey, which must be passed in query string to /internal/force_flush pages")
|
||||
snapshotsMaxAge = flagutil.NewDuration("snapshotsMaxAge", "0", "Automatically delete snapshots older than -snapshotsMaxAge if it is set to non-zero duration. Make sure that backup process has enough time to finish the backup before the corresponding snapshot is automatically deleted")
|
||||
_ = flag.Duration("snapshotCreateTimeout", 0, "Deprecated: this flag does nothing")
|
||||
|
||||
precisionBits = flag.Int("precisionBits", 64, "The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss")
|
||||
|
||||
|
@ -299,11 +299,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
case "/create":
|
||||
snapshotsCreateTotal.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
deadline := uint64(0)
|
||||
if *snapshotCreateTimeout > 0 {
|
||||
deadline = fasttime.UnixTimestamp() + uint64(snapshotCreateTimeout.Seconds())
|
||||
}
|
||||
snapshotPath, err := Storage.CreateSnapshot(deadline)
|
||||
snapshotPath, err := Storage.CreateSnapshot()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot create snapshot: %w", err)
|
||||
jsonResponseError(w, err)
|
||||
|
@ -524,6 +520,7 @@ func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
|
|||
|
||||
metrics.WriteCounterUint64(w, `vm_rows_added_to_storage_total`, m.RowsAddedTotal)
|
||||
metrics.WriteCounterUint64(w, `vm_deduplicated_samples_total{type="merge"}`, m.DedupsDuringMerge)
|
||||
metrics.WriteGaugeUint64(w, `vm_snapshots`, m.SnapshotsCount)
|
||||
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="big_timestamp"}`, m.TooBigTimestampRows)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="small_timestamp"}`, m.TooSmallTimestampRows)
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
FROM node:18-alpine3.17
|
||||
|
||||
# Sets a custom location for the npm cache, preventing access errors in system directories
|
||||
ENV NPM_CONFIG_CACHE=/build/.npm
|
||||
|
||||
RUN apk update && apk upgrade
|
||||
RUN apk add --no-cache bash bash-doc bash-completion libtool autoconf automake nasm pkgconfig libpng gcc make g++ zlib-dev gawk
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.22.0 as build-web-stage
|
||||
FROM golang:1.22.1 as build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
|
2535
app/vmui/packages/vmui/package-lock.json
generated
2535
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -26,7 +26,7 @@
|
|||
"sass": "^1.56.0",
|
||||
"source-map-explorer": "^2.5.3",
|
||||
"typescript": "~4.6.2",
|
||||
"uplot": "^1.6.19",
|
||||
"uplot": "^1.6.30",
|
||||
"web-vitals": "^3.3.2"
|
||||
},
|
||||
"scripts": {
|
||||
|
|
|
@ -26,12 +26,18 @@ and introduction into [basic querying via MetricsQL](https://docs.victoriametric
|
|||
|
||||
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
||||
|
||||
* MetricsQL takes into account the previous point before the window in square brackets for range functions such as [rate](#rate) and [increase](#increase).
|
||||
This allows returning the exact results users expect for `increase(metric[$__interval])` queries instead of incomplete results Prometheus returns for such queries.
|
||||
* MetricsQL doesn't extrapolate range function results. This addresses [this issue from Prometheus](https://github.com/prometheus/prometheus/issues/3746).
|
||||
* MetricsQL takes into account the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) before the lookbehind window
|
||||
in square brackets for [increase](#increase) and [rate](#rate) functions. This allows returning the exact results users expect for `increase(metric[$__interval])` queries
|
||||
instead of incomplete results Prometheus returns for such queries. Prometheus misses the increase between the last sample before the lookbehind window
|
||||
and the first sample inside the lookbehind window.
|
||||
* MetricsQL doesn't extrapolate [rate](#rate) and [increase](#increase) function results, so it always returns the expected results. For example, it returns
|
||||
integer results from `increase()` over slow-changing integer counter. Prometheus in this case returns unexpected fractional results,
|
||||
which may significantly differ from the expected results. This addresses [this issue from Prometheus](https://github.com/prometheus/prometheus/issues/3746).
|
||||
See technical details about VictoriaMetrics and Prometheus calculations for [rate](#rate)
|
||||
and [increase](#increase) [in this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1215#issuecomment-850305711).
|
||||
* MetricsQL returns the expected non-empty responses for [rate](#rate) with `step` values smaller than scrape interval.
|
||||
* MetricsQL returns the expected non-empty responses for [rate](#rate) function when Grafana or [vmui](https://docs.victoriametrics.com/#vmui)
|
||||
passes `step` values smaller than the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
This addresses [this issue from Grafana](https://github.com/grafana/grafana/issues/11451).
|
||||
See also [this blog post](https://www.percona.com/blog/2020/02/28/better-prometheus-rate-function-with-victoriametrics/).
|
||||
* MetricsQL treats `scalar` type the same as `instant vector` without labels, since subtle differences between these types usually confuse users.
|
||||
|
@ -61,13 +67,14 @@ The list of MetricsQL features on top of PromQL:
|
|||
|
||||
* Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax.
|
||||
See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics).
|
||||
VictoriaMetrics also can be used as Graphite datasource in Grafana.
|
||||
See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)).
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
and the real interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) (aka `scrape_interval`).
|
||||
For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`.
|
||||
It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
It is roughly equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
The difference is documented in [rate() docs](#rate).
|
||||
* Numeric values can contain `_` delimiters for better readability. For example, `1_234_567_890` can be used in queries instead of `1234567890`.
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
selects series with `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
|
@ -117,7 +124,8 @@ The list of MetricsQL features on top of PromQL:
|
|||
Go to [WITH templates playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/expand-with-exprs) and try it.
|
||||
* String literals may be concatenated. This is useful with `WITH` templates:
|
||||
`WITH (commonPrefix="long_metric_prefix_") {__name__=commonPrefix+"suffix1"} / {__name__=commonPrefix+"suffix2"}`.
|
||||
* `keep_metric_names` modifier can be applied to all the [rollup functions](#rollup-functions), [transform functions](#transform-functions) and [binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
|
||||
* `keep_metric_names` modifier can be applied to all the [rollup functions](#rollup-functions), [transform functions](#transform-functions)
|
||||
and [binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
|
||||
This modifier prevents from dropping metric names in function results. See [these docs](#keep_metric_names).
|
||||
|
||||
## keep_metric_names
|
||||
|
@ -155,14 +163,15 @@ Additional details:
|
|||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) returns multiple time series,
|
||||
then rollups are calculated individually per each returned series.
|
||||
* If lookbehind window in square brackets is missing, then MetricsQL automatically sets the lookbehind window
|
||||
to the interval between points on the graph (aka `step` query arg at [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query),
|
||||
`$__interval` value from Grafana or `1i` duration in MetricsQL).
|
||||
For example, `rate(http_requests_total)` is equivalent to `rate(http_requests_total[$__interval])` in Grafana.
|
||||
It is also equivalent to `rate(http_requests_total[1i])`.
|
||||
* If lookbehind window in square brackets is missing, then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
Otherwise, it is automatically wrapped into [default_rollup](#default_rollup). For example, `foo{bar="baz"}`
|
||||
is automatically converted to `default_rollup(foo{bar="baz"}[1i])` before performing the calculations.
|
||||
is automatically converted to `default_rollup(foo{bar="baz"})` before performing the calculations.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) is passed to rollup function,
|
||||
then the inner arg is automatically converted to a [subquery](#subqueries).
|
||||
* All the rollup functions accept optional `keep_metric_names` modifier. If it is set, then the function keeps metric names in results.
|
||||
|
@ -177,7 +186,9 @@ The list of supported rollup functions:
|
|||
`absent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1
|
||||
if the given lookbehind window `d` doesn't contain raw samples. Otherwise, it returns an empty result.
|
||||
|
||||
This function is supported by PromQL. See also [present_over_time](#present_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [present_over_time](#present_over_time).
|
||||
|
||||
#### aggr_over_time
|
||||
|
||||
|
@ -207,7 +218,9 @@ See also [descent_over_time](#descent_over_time).
|
|||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [median_over_time](#median_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [median_over_time](#median_over_time).
|
||||
|
||||
#### changes
|
||||
|
||||
|
@ -220,7 +233,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [changes_prometheus](#changes_prometheus).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [changes_prometheus](#changes_prometheus).
|
||||
|
||||
#### changes_prometheus
|
||||
|
||||
|
@ -233,7 +248,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [changes](#changes).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [changes](#changes).
|
||||
|
||||
#### count_eq_over_time
|
||||
|
||||
|
@ -243,7 +260,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_over_time](#count_over_time) and [share_eq_over_time](#share_eq_over_time).
|
||||
See also [count_over_time](#count_over_time), [share_eq_over_time](#share_eq_over_time) and [count_values_over_time](#count_values_over_time).
|
||||
|
||||
#### count_gt_over_time
|
||||
|
||||
|
@ -282,8 +299,19 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_gt_over_time),
|
||||
[count_eq_over_time](#count_eq_over_time) and [count_ne_over_time](#count_ne_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_gt_over_time), [count_eq_over_time](#count_eq_over_time) and [count_ne_over_time](#count_ne_over_time).
|
||||
|
||||
#### count_values_over_time
|
||||
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of raw samples
|
||||
with the same value over the given lookbehind window and stores the counts in a time series with an additional `label`, which contains each initial value.
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_eq_over_time](#count_eq_over_time), [count_values](#count_values) and [distinct_over_time](#distinct_over_time) and [label_match](#label_match).
|
||||
|
||||
#### decreases_over_time
|
||||
|
||||
|
@ -299,6 +327,11 @@ See also [increases_over_time](#increases_over_time).
|
|||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
while `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the selected time series.
|
||||
This allows avoiding unexpected gaps on the graph when `step` is smaller than the `scrape_interval`.
|
||||
|
||||
#### delta
|
||||
|
||||
`delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
|
@ -310,7 +343,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
|
||||
#### delta_prometheus
|
||||
|
||||
|
@ -333,7 +368,9 @@ The derivative is calculated using linear regression.
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
|
||||
#### deriv_fast
|
||||
|
||||
|
@ -364,6 +401,8 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_values_over_time](#count_values_over_time).
|
||||
|
||||
#### duration_over_time
|
||||
|
||||
`duration_over_time(series_selector[d], max_interval)` is a [rollup function](#rollup-functions), which returns the duration in seconds
|
||||
|
@ -423,7 +462,9 @@ over the given lookbehind window `d` using the given smoothing factor `sf` and t
|
|||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
|
||||
This function is supported by PromQL. See also [range_linear_regression](#range_linear_regression).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### idelta
|
||||
|
||||
|
@ -432,7 +473,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [delta](#delta).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [delta](#delta).
|
||||
|
||||
#### ideriv
|
||||
|
||||
|
@ -455,7 +498,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||
|
||||
#### increase_prometheus
|
||||
|
||||
|
@ -499,7 +544,9 @@ It is expected that the `series_selector` returns time series of [counter type](
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### lag
|
||||
|
||||
|
@ -516,7 +563,9 @@ See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
|
|||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_time).
|
||||
|
||||
#### lifetime
|
||||
|
||||
|
@ -539,7 +588,9 @@ See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outli
|
|||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [tmax_over_time](#tmax_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tmax_over_time](#tmax_over_time).
|
||||
|
||||
#### median_over_time
|
||||
|
||||
|
@ -554,7 +605,9 @@ See also [avg_over_time](#avg_over_time).
|
|||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [tmin_over_time](#tmin_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tmin_over_time](#tmin_over_time).
|
||||
|
||||
#### mode_over_time
|
||||
|
||||
|
@ -580,7 +633,9 @@ See also [outliers_iqr](#outliers_iqr).
|
|||
linear interpolation over raw samples on the given lookbehind window `d`. The predicted value is calculated individually per each time series
|
||||
returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [range_linear_regression](#range_linear_regression).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### present_over_time
|
||||
|
||||
|
@ -597,7 +652,9 @@ This function is supported by PromQL.
|
|||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The `phi` value must be in the range `[0...1]`.
|
||||
|
||||
This function is supported by PromQL. See also [quantiles_over_time](#quantiles_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [quantiles_over_time](#quantiles_over_time).
|
||||
|
||||
#### quantiles_over_time
|
||||
|
||||
|
@ -622,9 +679,16 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
while `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the selected time series.
|
||||
This allows avoiding unexpected gaps on the graph when `step` is smaller than the `scrape_interval`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### rate_over_sum
|
||||
|
||||
|
@ -652,6 +716,7 @@ on the given lookbehind window `d` and returns them in time series with `rollup=
|
|||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_candlestick
|
||||
|
||||
|
@ -660,7 +725,8 @@ over raw samples on the given lookbehind window `d` and returns them in time ser
|
|||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). This function is useful for financial applications.
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
Optional 2nd argument `"open"`, `"high"` or `"low"` or `"close"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_delta
|
||||
|
||||
|
@ -670,6 +736,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -683,6 +750,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -694,6 +762,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names. See also [rollup_delta](#rollup_delta).
|
||||
|
||||
|
@ -707,10 +776,10 @@ See [this article](https://valyala.medium.com/why-irate-from-prometheus-doesnt-c
|
|||
when to use `rollup_rate()`.
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### rollup_scrape_interval
|
||||
|
@ -721,6 +790,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
|||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names. See also [scrape_interval](#scrape_interval).
|
||||
|
||||
|
@ -783,7 +853,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [stdvar_over_time](#stdvar_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [stdvar_over_time](#stdvar_over_time).
|
||||
|
||||
#### stdvar_over_time
|
||||
|
||||
|
@ -792,7 +864,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [stddev_over_time](#stddev_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [stddev_over_time](#stddev_over_time).
|
||||
|
||||
#### sum_eq_over_time
|
||||
|
||||
|
@ -844,7 +918,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [timestamp_with_name](#timestamp_with_name).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [time](#time) and [now](#now).
|
||||
|
||||
#### timestamp_with_name
|
||||
|
||||
|
@ -853,7 +929,7 @@ on the given lookbehind window `d` per each time series returned from the given
|
|||
|
||||
Metric names are preserved in the resulting rollups.
|
||||
|
||||
See also [timestamp](#timestamp).
|
||||
See also [timestamp](#timestamp) and [keep_metric_names](#keep_metric_names) modifier.
|
||||
|
||||
#### tfirst_over_time
|
||||
|
||||
|
@ -920,7 +996,7 @@ Additional details:
|
|||
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the transformations.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature[1i]))`.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature))`.
|
||||
* All the transform functions accept optional `keep_metric_names` modifier. If it is set,
|
||||
then the function doesn't drop metric names from the resulting time series. See [these docs](#keep_metric_names).
|
||||
|
||||
|
@ -938,7 +1014,9 @@ This function is supported by PromQL.
|
|||
|
||||
`absent(q)` is a [transform function](#transform-functions), which returns 1 if `q` has no points. Otherwise, returns an empty result.
|
||||
|
||||
This function is supported by PromQL. See also [absent_over_time](#absent_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [absent_over_time](#absent_over_time).
|
||||
|
||||
#### acos
|
||||
|
||||
|
@ -947,7 +1025,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [asin](#asin) and [cos](#cos).
|
||||
|
||||
#### acosh
|
||||
|
||||
|
@ -956,7 +1036,9 @@ This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sinh](#cosh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sinh](#cosh).
|
||||
|
||||
#### asin
|
||||
|
||||
|
@ -965,7 +1047,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [acos](#acos) and [sin](#sin).
|
||||
|
||||
#### asinh
|
||||
|
||||
|
@ -974,7 +1058,9 @@ This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sinh](#sinh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sinh](#sinh).
|
||||
|
||||
#### atan
|
||||
|
||||
|
@ -983,7 +1069,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [tan](#tan).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tan](#tan).
|
||||
|
||||
#### atanh
|
||||
|
||||
|
@ -992,7 +1080,9 @@ This function is supported by PromQL. See also [tan](#tan).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [tanh](#tanh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tanh](#tanh).
|
||||
|
||||
#### bitmap_and
|
||||
|
||||
|
@ -1023,25 +1113,33 @@ See also [prometheus_buckets](#prometheus_buckets) and [histogram_quantile](#his
|
|||
|
||||
`ceil(q)` is a [transform function](#transform-functions), which rounds every point for every time series returned by `q` to the upper nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [floor](#floor) and [round](#round).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [floor](#floor) and [round](#round).
|
||||
|
||||
#### clamp
|
||||
|
||||
`clamp(q, min, max)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `min` and `max` values.
|
||||
|
||||
This function is supported by PromQL. See also [clamp_min](#clamp_min) and [clamp_max](#clamp_max).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp_min](#clamp_min) and [clamp_max](#clamp_max).
|
||||
|
||||
#### clamp_max
|
||||
|
||||
`clamp_max(q, max)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `max` value.
|
||||
|
||||
This function is supported by PromQL. See also [clamp](#clamp) and [clamp_min](#clamp_min).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp](#clamp) and [clamp_min](#clamp_min).
|
||||
|
||||
#### clamp_min
|
||||
|
||||
`clamp_min(q, min)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `min` value.
|
||||
|
||||
This function is supported by PromQL. See also [clamp](#clamp) and [clamp_max](#clamp_max).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp](#clamp) and [clamp_max](#clamp_max).
|
||||
|
||||
#### cos
|
||||
|
||||
|
@ -1049,7 +1147,9 @@ This function is supported by PromQL. See also [clamp](#clamp) and [clamp_max](#
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sin](#sin).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sin](#sin).
|
||||
|
||||
#### cosh
|
||||
|
||||
|
@ -1058,7 +1158,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [acosh](#acosh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [acosh](#acosh).
|
||||
|
||||
#### day_of_month
|
||||
|
||||
|
@ -1069,6 +1171,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_week](#day_of_week) and [day_of_year](#day_of_year).
|
||||
|
||||
#### day_of_week
|
||||
|
||||
`day_of_week(q)` is a [transform function](#transform-functions), which returns the day of week for every point of every time series returned by `q`.
|
||||
|
@ -1078,6 +1182,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_month](#day_of_month) and [day_of_year](#day_of_year).
|
||||
|
||||
#### day_of_year
|
||||
|
||||
`day_of_year(q)` is a [transform function](#transform-functions), which returns the day of year for every point of every time series returned by `q`.
|
||||
|
@ -1087,6 +1193,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_week](#day_of_week) and [day_of_month](#day_of_month).
|
||||
|
||||
#### days_in_month
|
||||
|
||||
`days_in_month(q)` is a [transform function](#transform-functions), which returns the number of days in the month identified
|
||||
|
@ -1104,7 +1212,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [rad](#rad).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [rad](#rad).
|
||||
|
||||
#### drop_empty_series
|
||||
|
||||
|
@ -1130,13 +1240,17 @@ See also [start](#start), [time](#time) and [now](#now).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [ln](#ln).
|
||||
|
||||
#### floor
|
||||
|
||||
`floor(q)` is a [transform function](#transform-functions), which rounds every point for every time series returned by `q` to the lower nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [ceil](#ceil) and [round](#round).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [ceil](#ceil) and [round](#round).
|
||||
|
||||
#### histogram_avg
|
||||
|
||||
|
@ -1159,8 +1273,9 @@ When the [percentile](https://en.wikipedia.org/wiki/Percentile) is calculated ov
|
|||
then all the input histograms **must** have buckets with identical boundaries, e.g. they must have the same set of `le` or `vmrange` labels.
|
||||
Otherwise, the returned result may be invalid. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3231) for details.
|
||||
|
||||
This function is supported by PromQL (except of the `boundLabel` arg). See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share)
|
||||
and [quantile](#quantile).
|
||||
This function is supported by PromQL (except of the `boundLabel` arg).
|
||||
|
||||
See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share) and [quantile](#quantile).
|
||||
|
||||
#### histogram_quantiles
|
||||
|
||||
|
@ -1232,7 +1347,9 @@ This allows implementing simple paging for `q` time series. See also [limitk](#l
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [exp](#exp) and [log2](#log2).
|
||||
|
||||
#### log2
|
||||
|
||||
|
@ -1240,7 +1357,9 @@ This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [log10](#log10) and [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [log10](#log10) and [ln](#ln).
|
||||
|
||||
#### log10
|
||||
|
||||
|
@ -1248,7 +1367,9 @@ This function is supported by PromQL. See also [log10](#log10) and [ln](#ln).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [log2](#log2) and [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [log2](#log2) and [ln](#ln).
|
||||
|
||||
#### minute
|
||||
|
||||
|
@ -1287,7 +1408,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [deg](#deg).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [deg](#deg).
|
||||
|
||||
#### prometheus_buckets
|
||||
|
||||
|
@ -1415,7 +1538,9 @@ for points returned by `q`, e.g. it is equivalent to the following query: `(q -
|
|||
`round(q, nearest)` is a [transform function](#transform-functions), which rounds every point of every time series returned by `q` to the `nearest` multiple.
|
||||
If `nearest` is missing then the rounding is performed to the nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [floor](#floor) and [ceil](#ceil).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [floor](#floor) and [ceil](#ceil).
|
||||
|
||||
#### ru
|
||||
|
||||
|
@ -1459,7 +1584,9 @@ This function is supported by PromQL.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [cos](#cos).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [cos](#cos).
|
||||
|
||||
#### sinh
|
||||
|
||||
|
@ -1468,7 +1595,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [cosh](#cosh).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [cosh](#cosh).
|
||||
|
||||
#### tan
|
||||
|
||||
|
@ -1476,7 +1605,9 @@ This function is supported by MetricsQL. See also [cosh](#cosh).
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [atan](#atan).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [atan](#atan).
|
||||
|
||||
#### tanh
|
||||
|
||||
|
@ -1485,7 +1616,9 @@ for every point of every time series returned by `q`.
|
|||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [atanh](#atanh).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [atanh](#atanh).
|
||||
|
||||
#### smooth_exponential
|
||||
|
||||
|
@ -1496,13 +1629,17 @@ by `q` using [exponential moving average](https://en.wikipedia.org/wiki/Moving_a
|
|||
|
||||
`sort(q)` is a [transform function](#transform-functions), which sorts series in ascending order by the last point in every time series returned by `q`.
|
||||
|
||||
This function is supported by PromQL. See also [sort_desc](#sort_desc) and [sort_by_label](#sort_by_label).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sort_desc](#sort_desc) and [sort_by_label](#sort_by_label).
|
||||
|
||||
#### sort_desc
|
||||
|
||||
`sort_desc(q)` is a [transform function](#transform-functions), which sorts series in descending order by the last point in every time series returned by `q`.
|
||||
|
||||
This function is supported by PromQL. See also [sort](#sort) and [sort_by_label](#sort_by_label_desc).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sort](#sort) and [sort_by_label](#sort_by_label_desc).
|
||||
|
||||
#### sqrt
|
||||
|
||||
|
@ -1531,7 +1668,9 @@ See also [start](#start) and [end](#end).
|
|||
|
||||
`time()` is a [transform function](#transform-functions), which returns unix timestamp for every returned point.
|
||||
|
||||
This function is supported by PromQL. See also [now](#now), [start](#start) and [end](#end).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [timestamp](#timestamp), [now](#now), [start](#start) and [end](#end).
|
||||
|
||||
#### timezone_offset
|
||||
|
||||
|
@ -1580,7 +1719,7 @@ Additional details:
|
|||
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before performing the label transformation.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature[1i]), "foo")`.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature), "foo")`.
|
||||
|
||||
See also [implicit query conversions](#implicit-query-conversions).
|
||||
|
||||
|
@ -1757,7 +1896,7 @@ Additional details:
|
|||
Multiple labels can be put in `by` and `without` modifiers.
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the aggregate.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up[1i]))`.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up))`.
|
||||
* Aggregate functions accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point
|
||||
across time series returned by `q1`, `q2` and `q3`.
|
||||
* Aggregate functions support optional `limit N` suffix, which can be used for limiting the number of output groups.
|
||||
|
@ -1785,7 +1924,9 @@ This function is supported by PromQL.
|
|||
`bottomk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` points with the smallest values across all the time series returned by `q`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [topk](#topk).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [topk](#topk), [bottomk_min](#bottomk_min) and [#bottomk_last](#bottomk_last).
|
||||
|
||||
#### bottomk_avg
|
||||
|
||||
|
@ -1847,10 +1988,14 @@ The aggregate is calculated individually per each group of points with the same
|
|||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [count_values_over_time](#count_values_over_time) and [label_match](#label_match).
|
||||
|
||||
#### distinct
|
||||
|
||||
`distinct(q)` is [aggregate function](#aggregate-functions), which calculates the number of unique values per each group of points with the same timestamp.
|
||||
|
||||
See also [distinct_over_time](#distinct_over_time).
|
||||
|
||||
#### geomean
|
||||
|
||||
`geomean(q)` is [aggregate function](#aggregate-functions), which calculates geometric mean per each group of points with the same timestamp.
|
||||
|
@ -1942,7 +2087,9 @@ See also [outliers_iqr](#outliers_iqr) and [outliers_mad](#outliers_mad).
|
|||
for all the time series returned by `q`. `phi` must be in the range `[0...1]`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [quantiles](#quantiles) and [histogram_quantile](#histogram_quantile).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [quantiles](#quantiles) and [histogram_quantile](#histogram_quantile).
|
||||
|
||||
#### quantiles
|
||||
|
||||
|
@ -2001,7 +2148,9 @@ for all the time series returned by `q`. The aggregate is calculated individuall
|
|||
`topk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` points with the biggest values across all the time series returned by `q`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [bottomk](#bottomk).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [bottomk](#bottomk), [topk_max](#topk_max) and [topk_last](#topk_last).
|
||||
|
||||
#### topk_avg
|
||||
|
||||
|
@ -2061,7 +2210,7 @@ See also [zscore_over_time](#zscore_over_time), [range_trim_zscore](#range_trim_
|
|||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) form a subquery.
|
||||
Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions).
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m[1i]))[1i:1i])`, so it becomes a subquery,
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m))[1i:1i])`, so it becomes a subquery,
|
||||
since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
|
||||
VictoriaMetrics performs subqueries in the following way:
|
||||
|
@ -2076,21 +2225,23 @@ VictoriaMetrics performs subqueries in the following way:
|
|||
|
||||
VictoriaMetrics performs the following implicit conversions for incoming queries before starting the calculations:
|
||||
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions),
|
||||
then `[1i]` is automatically added there. The `[1i]` means one `step` value, which is passed
|
||||
to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is also known as `$__interval` in Grafana. For example, `rate(http_requests_count)` is automatically transformed to `rate(http_requests_count[1i])`.
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions), then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
which aren't wrapped into [rollup functions](#rollup-functions), are automatically wrapped into [default_rollup](#default_rollup) function.
|
||||
Examples:
|
||||
* `foo` is transformed to `default_rollup(foo[1i])`
|
||||
* `foo + bar` is transformed to `default_rollup(foo[1i]) + default_rollup(bar[1i])`
|
||||
* `count(up)` is transformed to `count(default_rollup(up[1i]))`, because [count](#count) isn't a [rollup function](#rollup-functions) -
|
||||
* `foo` is transformed to `default_rollup(foo)`
|
||||
* `foo + bar` is transformed to `default_rollup(foo) + default_rollup(bar)`
|
||||
* `count(up)` is transformed to `count(default_rollup(up))`, because [count](#count) isn't a [rollup function](#rollup-functions) -
|
||||
it is [aggregate function](#aggregate-functions)
|
||||
* `abs(temperature)` is transformed to `abs(default_rollup(temperature[1i]))`, because [abs](#abs) isn't a [rollup function](#rollup-functions) -
|
||||
* `abs(temperature)` is transformed to `abs(default_rollup(temperature))`, because [abs](#abs) isn't a [rollup function](#rollup-functions) -
|
||||
it is [transform function](#transform-functions)
|
||||
* If `step` in square brackets is missing inside [subquery](#subqueries), then `1i` step is automatically added there.
|
||||
For example, `avg_over_time(rate(http_requests_total[5m])[1h])` is automatically converted to `avg_over_time(rate(http_requests_total[5m])[1h:1i])`.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
is passed to [rollup function](#rollup-functions), then a [subquery](#subqueries) with `1i` lookbehind window and `1i` step is automatically formed.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up[1i])))[1i:1i])`.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
||||
|
|
|
@ -41,6 +41,7 @@ export interface LineChartProps {
|
|||
layoutSize: ElementSize;
|
||||
height?: number;
|
||||
anomalyView?: boolean;
|
||||
spanGaps?: boolean;
|
||||
}
|
||||
|
||||
const LineChart: FC<LineChartProps> = ({
|
||||
|
@ -53,7 +54,8 @@ const LineChart: FC<LineChartProps> = ({
|
|||
setPeriod,
|
||||
layoutSize,
|
||||
height,
|
||||
anomalyView
|
||||
anomalyView,
|
||||
spanGaps = false
|
||||
}) => {
|
||||
const { isDarkTheme } = useAppState();
|
||||
|
||||
|
@ -106,10 +108,10 @@ const LineChart: FC<LineChartProps> = ({
|
|||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
delSeries(uPlotInst);
|
||||
addSeries(uPlotInst, series);
|
||||
addSeries(uPlotInst, series, spanGaps);
|
||||
setBand(uPlotInst, series);
|
||||
uPlotInst.redraw();
|
||||
}, [series]);
|
||||
}, [series, spanGaps]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
|
|
|
@ -7,16 +7,21 @@ import Popper from "../../Main/Popper/Popper";
|
|||
import "./style.scss";
|
||||
import Tooltip from "../../Main/Tooltip/Tooltip";
|
||||
import useBoolean from "../../../hooks/useBoolean";
|
||||
import LinesConfigurator from "./LinesConfigurator/LinesConfigurator";
|
||||
|
||||
const title = "Axes settings";
|
||||
const title = "Graph settings";
|
||||
|
||||
interface GraphSettingsProps {
|
||||
yaxis: YaxisState,
|
||||
setYaxisLimits: (limits: AxisRange) => void,
|
||||
toggleEnableLimits: () => void
|
||||
toggleEnableLimits: () => void,
|
||||
spanGaps: {
|
||||
value: boolean,
|
||||
onChange: (value: boolean) => void,
|
||||
},
|
||||
}
|
||||
|
||||
const GraphSettings: FC<GraphSettingsProps> = ({ yaxis, setYaxisLimits, toggleEnableLimits }) => {
|
||||
const GraphSettings: FC<GraphSettingsProps> = ({ yaxis, setYaxisLimits, toggleEnableLimits, spanGaps }) => {
|
||||
const popperRef = useRef<HTMLDivElement>(null);
|
||||
const buttonRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
|
@ -55,6 +60,10 @@ const GraphSettings: FC<GraphSettingsProps> = ({ yaxis, setYaxisLimits, toggleEn
|
|||
setYaxisLimits={setYaxisLimits}
|
||||
toggleEnableLimits={toggleEnableLimits}
|
||||
/>
|
||||
<LinesConfigurator
|
||||
spanGaps={spanGaps.value}
|
||||
onChange={spanGaps.onChange}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</Popper>
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
import React, { FC } from "preact/compat";
|
||||
import Switch from "../../../Main/Switch/Switch";
|
||||
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
|
||||
|
||||
interface Props {
|
||||
spanGaps: boolean,
|
||||
onChange: (value: boolean) => void,
|
||||
}
|
||||
|
||||
const LinesConfigurator: FC<Props> = ({ spanGaps, onChange }) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
return <div>
|
||||
<Switch
|
||||
value={spanGaps}
|
||||
onChange={onChange}
|
||||
label="Connect null values"
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
</div>;
|
||||
};
|
||||
|
||||
export default LinesConfigurator;
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
&__body {
|
||||
display: grid;
|
||||
gap: $padding-small;
|
||||
gap: $padding-large;
|
||||
padding: 0 $padding-global;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
|||
import Button from "../Button/Button";
|
||||
import { CloseIcon } from "../Icons";
|
||||
import { useLocation, useNavigate } from "react-router-dom";
|
||||
import useBoolean from "../../../hooks/useBoolean";
|
||||
import useEventListener from "../../../hooks/useEventListener";
|
||||
import { useCallback } from "preact/compat";
|
||||
|
||||
|
@ -43,12 +42,7 @@ const Popper: FC<PopperProps> = ({
|
|||
const navigate = useNavigate();
|
||||
const location = useLocation();
|
||||
const [popperSize, setPopperSize] = useState({ width: 0, height: 0 });
|
||||
|
||||
const {
|
||||
value: isOpen,
|
||||
setValue: setIsOpen,
|
||||
setFalse: handleClose,
|
||||
} = useBoolean(false);
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
|
||||
const popperRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
|
@ -113,6 +107,7 @@ const Popper: FC<PopperProps> = ({
|
|||
|
||||
if (fullWidth) position.width = `${buttonPos.width}px`;
|
||||
if (position.top < 0) position.top = 20;
|
||||
if (position.left < 0) position.left = 20;
|
||||
|
||||
return position;
|
||||
},[buttonRef, placement, isOpen, children, fullWidth]);
|
||||
|
@ -122,7 +117,15 @@ const Popper: FC<PopperProps> = ({
|
|||
onClose();
|
||||
};
|
||||
|
||||
if (clickOutside) useClickOutside(popperRef, () => setIsOpen(false), buttonRef);
|
||||
const handleClose = () => {
|
||||
setIsOpen(false);
|
||||
onClose();
|
||||
};
|
||||
|
||||
const handleClickOutside = () => {
|
||||
if (!clickOutside) return;
|
||||
handleClose();
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (!popperRef.current || !isOpen || (isMobile && !disabledFullScreen)) return;
|
||||
|
@ -142,6 +145,7 @@ const Popper: FC<PopperProps> = ({
|
|||
|
||||
useEventListener("scroll", handleClose);
|
||||
useEventListener("popstate", handlePopstate);
|
||||
useClickOutside(popperRef, handleClickOutside, buttonRef);
|
||||
|
||||
return (
|
||||
<>
|
||||
|
|
|
@ -3,11 +3,14 @@
|
|||
$color-base-nested-nav: $color-tropical-blue;
|
||||
$color-base-nested-nav-dark: $color-background-body;
|
||||
$width-line: 2px;
|
||||
$left-position: calc(-1 * $padding-small);
|
||||
$left-block-offset: $padding-large;
|
||||
$left-line-offset: calc($width-line * 2);
|
||||
$left-position: calc(-1 * ($left-block-offset - $left-line-offset));
|
||||
$gap-section: calc($padding-large * 2);
|
||||
|
||||
.vm-nested-nav {
|
||||
position: relative;
|
||||
margin-left: $padding-small;
|
||||
margin-left: $left-block-offset;
|
||||
border-radius: $border-radius-small;
|
||||
|
||||
&_dark &-header {
|
||||
|
@ -51,7 +54,7 @@ $left-position: calc(-1 * $padding-small);
|
|||
position: absolute;
|
||||
top: calc(50% - 1px);
|
||||
height: $width-line;
|
||||
width: $padding-small;
|
||||
width: calc($left-block-offset - $left-line-offset);
|
||||
background-color: $color-base-nested-nav;
|
||||
left: $left-position;
|
||||
}
|
||||
|
@ -125,7 +128,7 @@ $left-position: calc(-1 * $padding-small);
|
|||
position: absolute;
|
||||
top: 0;
|
||||
left: $left-position;
|
||||
height: 100%;
|
||||
height: calc(100% + $gap-section);
|
||||
width: $width-line;
|
||||
background-color: $color-base-nested-nav;
|
||||
}
|
||||
|
@ -136,4 +139,8 @@ $left-position: calc(-1 * $padding-small);
|
|||
background-color: $color-base-nested-nav-dark;
|
||||
}
|
||||
}
|
||||
|
||||
&__childrens > .vm-nested-nav:last-child {
|
||||
margin-bottom: $gap-section;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ export interface GraphViewProps {
|
|||
height?: number;
|
||||
isHistogram?: boolean;
|
||||
anomalyView?: boolean;
|
||||
spanGaps?: boolean;
|
||||
}
|
||||
|
||||
const GraphView: FC<GraphViewProps> = ({
|
||||
|
@ -58,6 +59,7 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
height,
|
||||
isHistogram,
|
||||
anomalyView,
|
||||
spanGaps
|
||||
}) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
const { timezone } = useTimeState();
|
||||
|
@ -196,6 +198,7 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
layoutSize={containerSize}
|
||||
height={height}
|
||||
anomalyView={anomalyView}
|
||||
spanGaps={spanGaps}
|
||||
/>
|
||||
)}
|
||||
{isHistogram && (
|
||||
|
|
|
@ -2,7 +2,7 @@ import React, { FC } from "preact/compat";
|
|||
import classNames from "classnames";
|
||||
import GlobalSettings from "../../components/Configurators/GlobalSettings/GlobalSettings";
|
||||
import { ControlsProps } from "../Header/HeaderControls/HeaderControls";
|
||||
|
||||
import { TimeSelector } from "../../components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector";
|
||||
|
||||
const ControlsLogsLayout: FC<ControlsProps> = ({ isMobile }) => {
|
||||
|
||||
|
@ -13,6 +13,7 @@ const ControlsLogsLayout: FC<ControlsProps> = ({ isMobile }) => {
|
|||
"vm-header-controls_mobile": isMobile,
|
||||
})}
|
||||
>
|
||||
<TimeSelector/>
|
||||
<GlobalSettings/>
|
||||
</div>
|
||||
);
|
||||
|
|
|
@ -20,7 +20,7 @@ type Props = {
|
|||
const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, anomalyView }) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const { customStep, yaxis } = useGraphState();
|
||||
const { customStep, yaxis, spanGaps } = useGraphState();
|
||||
const { period } = useTimeState();
|
||||
const { query } = useQueryState();
|
||||
|
||||
|
@ -35,6 +35,10 @@ const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, anomalyView
|
|||
graphDispatch({ type: "TOGGLE_ENABLE_YAXIS_LIMITS" });
|
||||
};
|
||||
|
||||
const setSpanGaps = (value: boolean) => {
|
||||
graphDispatch({ type: "SET_SPAN_GAPS", payload: value });
|
||||
};
|
||||
|
||||
const setPeriod = ({ from, to }: {from: Date, to: Date}) => {
|
||||
timeDispatch({ type: "SET_PERIOD", payload: { from, to } });
|
||||
};
|
||||
|
@ -46,6 +50,7 @@ const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, anomalyView
|
|||
yaxis={yaxis}
|
||||
setYaxisLimits={setYaxisLimits}
|
||||
toggleEnableLimits={toggleEnableLimits}
|
||||
spanGaps={{ value: spanGaps, onChange: setSpanGaps }}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
@ -64,6 +69,7 @@ const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, anomalyView
|
|||
height={isMobile ? window.innerHeight * 0.5 : 500}
|
||||
isHistogram={isHistogram}
|
||||
anomalyView={anomalyView}
|
||||
spanGaps={spanGaps}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue