mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
v1.101.0
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEkhL6N9vmSTjg0VSVO/dfN0HKlkAFAmYqbhsACgkQO/dfN0HK lkBDhRAAhUO7SsbCCHZo4Azdw+G32J05LsYvKMGl0r+j2fYRPovl7Sgf/HdZUKxk 4aOXPPl8YogHWHVv8qrmFXl7gPWRNaFtCxmVlVIv+eEzwzN18tH2Umn+PwfQTtmN VM7ujy54rH8z28AGII8P8h4s/0kNVPGwPP9gEifm8ICIXtKpdnvbtkpAoCFEZvYf b5chm/3NQRA1R7c+yRxVs9YH15+XgYG0z/onVaVnjUxPXvme64v3RL+nt/ezimMo PXDBt5HRXa6lWIxM+g3oaGJ9/qFKwTrHykXgx3oPPWsphJMVW8ltt8sqg6sGuRJz fD/iRjpHIGAfD/2BX90TOMyYbC+s921rPU0+aQ70U5mPU+f8E1fI1HNVlsJiZ9NL Xhj9GOJzNQP2moql1dsDibZXhO0aIMfweHduXN7KRK88IPtnQdy1Sj6lhAJdJ2iH q1s5ShDx9gLLA2ecuL4COA9tQxTncnTZdsU4Y1bnSif0Iuct03L84ovaCSAuJ5BP XrwVo0Vk2albDpw8n2Dzq7Xquiewyb9IlaQ8U5B/tdKSpH4aAydy56PgdC+gHaZk 6c2aBf0HKmg3qxsp/xb593cWloToPgsgB0KB2m7b+nEPBLP62obzBEeS8P5ahrJB UmPA7tw6BlYT93JttotFn+gykZjAELcbHkO8Yoe7JnVQMA8irFs= =ecyf -----END PGP SIGNATURE----- gpgsig -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEkhL6N9vmSTjg0VSVO/dfN0HKlkAFAmYrkEYACgkQO/dfN0HK lkAj0w//WUXbB/gR3P47t6dNSX0P0qb3D/+AQ2+he/wo3mJ1msd3XtkpiUHcpP0k qtrYFrY5wQQ4lC82VMOWdlw7YY5E4ah2tbYCAUBpgpp3Lu7iD/muiLlfwQslwYUy PzwKH5HBQQmgtKgGX4miSeVk95TUyzE5m70dF4atIY8ydK73ZqiSV+IC9/cYah2y Q4Y9xJZnSMR1cKdMfTpYR0s9gPg5bB9yAKq9qB8TQfxMnW2A8wkhvwf675mJJCZ+ spRTXzrcKp3thKWmDowTtzu/ONYTRcpQfgiE5MxzySnHQcv4nQnf3jxeT1+1K+pk 5jI5bFAjkRVy1u1oDsbjHySdFyt1jZA6Klw9dlGf9EVjXfr0jbAUO++8T538CCfW UUSx2h83GTvSMvVaCDCtbYdlHZwxgLTwJvFDdcpm4nci9u3wKsfnoKw6doskt1fs Sp241F46Ck5embAdtv1FJaGYvH8PVe4j24slBWvn5vhN8TcP7mcsw8DCGjKWfSVS JQQxxjCmAyxQOZj+9k2v3wlRjmHoRQ0ELu7EbYQW3eyiYi3bWVQkK5Mg4Z0knwGl HWWum7LMwkM59hIAc771VGOz3jELGWXBTZWP8FcoRmtzcvBnzVjndDAiwH89xhR6 Kym5JKrCkVvytee6xNxkBOIyuiavcB2qoZ7IqhHAYnqF9uhMx/E= =Ppg1 -----END PGP SIGNATURE----- Merge tag 'v1.101.0' into pmm-6401-read-prometheus-data-files v1.101.0 Signed-off-by: hagen1778 <roman@victoriametrics.com> # gpg: Signature made чт 25 кві 16:52:11 2024 CEST # gpg: using RSA key 9212FA37DBE64938E0D154953BF75F3741CA9640 # gpg: Good signature from "hagen1778 (VM GPG key) <roman@victoriametrics.com>" [ultimate] # Conflicts: # go.mod
This commit is contained in:
commit
381d4494e9
545 changed files with 15249 additions and 21448 deletions
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
@ -8,7 +8,7 @@ body:
|
|||
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/#how-to-upgrade)
|
||||
to [the latest available release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
and verify whether the bug is reproducible there.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html) first.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/troubleshooting/) first.
|
||||
- type: textarea
|
||||
id: describe-the-bug
|
||||
attributes:
|
||||
|
@ -65,7 +65,7 @@ body:
|
|||
|
||||
See how to setup monitoring here:
|
||||
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/#monitoring)
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
|
|
8
.github/ISSUE_TEMPLATE/question.yml
vendored
8
.github/ISSUE_TEMPLATE/question.yml
vendored
|
@ -24,9 +24,9 @@ body:
|
|||
label: Troubleshooting docs
|
||||
description: I am familiar with the following troubleshooting docs
|
||||
options:
|
||||
- label: General - https://docs.victoriametrics.com/Troubleshooting.html
|
||||
- label: General - https://docs.victoriametrics.com/troubleshooting/
|
||||
required: false
|
||||
- label: vmagent - https://docs.victoriametrics.com/vmagent.html#troubleshooting
|
||||
- label: vmagent - https://docs.victoriametrics.com/vmagent/#troubleshooting
|
||||
required: false
|
||||
- label: vmalert - https://docs.victoriametrics.com/vmalert/#troubleshooting
|
||||
required: false
|
||||
- label: vmalert - https://docs.victoriametrics.com/vmalert.html#troubleshooting
|
||||
required: false
|
|
@ -1,35 +0,0 @@
|
|||
### Describe Your Changes
|
||||
|
||||
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
|
||||
|
||||
### Checklist
|
||||
|
||||
The following checks are mandatory:
|
||||
|
||||
- [ ] I have read the [Contributing Guidelines](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/CONTRIBUTING.md)
|
||||
- [ ] All commits are signed and include `Signed-off-by` line. Use `git commit -s` to include `Signed-off-by` your commits. See this [doc](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work) about how to sign your commits.
|
||||
- [ ] Tests are passing locally. Use `make test` to run all tests locally.
|
||||
- [ ] Linting is passing locally. Use `make check-all` to run all linters locally.
|
||||
|
||||
Further checks are optional for External Contributions:
|
||||
|
||||
- [ ] Include a link to the GitHub issue in the commit message, if issue exists.
|
||||
- [ ] Mention the change in the [Changelog](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md). Explain what has changed and why. If there is a related issue or documentation change - link them as well.
|
||||
|
||||
Tips for writing a good changelog message::
|
||||
|
||||
* Write a human-readable changelog message that describes the problem and solution.
|
||||
* Include a link to the issue or pull request in your changelog message.
|
||||
* Use specific language identifying the fix, such as an error message, metric name, or flag name.
|
||||
* Provide a link to the relevant documentation for any new features you add or modify.
|
||||
|
||||
- [ ] After your pull request is merged, please add a message to the issue with instructions for how to test the fix or try the feature you added. Here is an [example](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4048#issuecomment-1546453726)
|
||||
- [ ] Do not close the original issue before the change is released. Please note, in some cases Github can automatically close the issue once PR is merged. Re-open the issue in such case.
|
||||
- [ ] If the change somehow affects public interfaces (a new flag was added or updated, or some behavior has changed) - add the corresponding change to documentation.
|
||||
|
||||
|
||||
Examples of good changelog messages:
|
||||
|
||||
1. FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [VictoriaMetrics remote write protocol](https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol) when [sending / receiving data to / from Kafka](https://docs.victoriametrics.com/vmagent.html#kafka-integration). This protocol allows saving egress network bandwidth costs when sending data from `vmagent` to `Kafka` located in another datacenter or availability zone. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1225).
|
||||
|
||||
2. BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html): suppress `series after dedup` error message in logs when `-remoteWrite.streamAggr.dedupInterval` command-line flag is set at [vmagent](https://docs.victoriametrics.com/vmgent.html) or when `-streamAggr.dedupInterval` command-line flag is set at [single-node VictoriaMetrics](https://docs.victoriametrics.com/).
|
9
.github/pull_request_template.md
vendored
Normal file
9
.github/pull_request_template.md
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
### Describe Your Changes
|
||||
|
||||
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
|
||||
|
||||
### Checklist
|
||||
|
||||
The following checks are **mandatory**:
|
||||
|
||||
- [ ] My change adheres [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/contributing/).
|
15
.github/workflows/sync-docs.yml
vendored
15
.github/workflows/sync-docs.yml
vendored
|
@ -6,9 +6,6 @@ on:
|
|||
paths:
|
||||
- 'docs/**'
|
||||
workflow_dispatch: {}
|
||||
env:
|
||||
PAGEFIND_VERSION: "1.0.4"
|
||||
HUGO_VERSION: "latest"
|
||||
permissions:
|
||||
contents: read # This is required for actions/checkout and to commit back image update
|
||||
deployments: write
|
||||
|
@ -27,16 +24,6 @@ jobs:
|
|||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
path: docs
|
||||
- uses: peaceiris/actions-hugo@v2
|
||||
with:
|
||||
hugo-version: ${{env.HUGO_VERSION}}
|
||||
extended: true
|
||||
- name: Install PageFind #install the static search engine for index build
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: "https://github.com/CloudCannon/pagefind/releases/download/v${{env.PAGEFIND_VERSION}}/pagefind-v${{env.PAGEFIND_VERSION}}-x86_64-unknown-linux-musl.tar.gz"
|
||||
name: "pagefind"
|
||||
version: ${{env.PAGEFIND_VERSION}}
|
||||
- name: Import GPG key
|
||||
uses: crazy-max/ghaction-import-gpg@v5
|
||||
with:
|
||||
|
@ -51,13 +38,11 @@ jobs:
|
|||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "short_sha=$calculatedSha" >> $GITHUB_OUTPUT
|
||||
working-directory: main
|
||||
|
||||
- name: update code and commit
|
||||
run: |
|
||||
rm -rf content
|
||||
cp -r ../main/docs content
|
||||
make clean-after-copy
|
||||
make build-search-index
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
git add .
|
||||
|
|
|
@ -1,21 +1 @@
|
|||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
||||
- Filing issues and feature requests [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
- Spreading a word about VictoriaMetrics: conference talks, articles, comments, experience sharing with colleagues.
|
||||
- Updating documentation.
|
||||
|
||||
We are open to third-party pull requests provided they follow [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
||||
- Prefer simple code and architecture.
|
||||
- Avoid complex abstractions.
|
||||
- Avoid magic code and fancy algorithms.
|
||||
- Avoid [big external dependencies](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d).
|
||||
- Minimize the number of moving parts in the distributed system.
|
||||
- Avoid automated decisions, which may hurt cluster availability, consistency or performance.
|
||||
|
||||
Adhering `KISS` principle simplifies the resulting code and architecture, so it can be reviewed, understood and verified by many people.
|
||||
|
||||
Before sending a pull request please check the following:
|
||||
- [ ] All commits are signed and include `Signed-off-by` line. Use `git commit -s` to include `Signed-off-by` your commits. See this [doc](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work) about how to sign your commits.
|
||||
- [ ] Tests are passing locally. Use `make test` to run all tests locally.
|
||||
- [ ] Linting is passing locally. Use `make check-all` to run all linters locally.
|
||||
The document has been moved [here](https://docs.victoriametrics.com/contributing/).
|
||||
|
|
|
@ -6,7 +6,7 @@ The following versions of VictoriaMetrics receive regular security fixes:
|
|||
|
||||
| Version | Supported |
|
||||
|---------|--------------------|
|
||||
| [latest release](https://docs.victoriametrics.com/CHANGELOG.html) | :white_check_mark: |
|
||||
| [latest release](https://docs.victoriametrics.com/changelog/) | :white_check_mark: |
|
||||
| v1.97.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| v1.93.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| other releases | :x: |
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
ARG base_image
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8428
|
||||
EXPOSE 9428
|
||||
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG src_binary
|
||||
|
|
|
@ -6,7 +6,7 @@ RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
|||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8428
|
||||
EXPOSE 9428
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY victoria-logs-linux-${TARGETARCH}-prod ./victoria-logs-prod
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.bc07cc78.css",
|
||||
"main.js": "./static/js/main.034044a7.js",
|
||||
"main.js": "./static/js/main.8e7757ef.js",
|
||||
"static/js/685.bebe1265.chunk.js": "./static/js/685.bebe1265.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.10add6e7bdf0f1d98cf7.md",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.da86c2db4f0b05e286b0.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.bc07cc78.css",
|
||||
"static/js/main.034044a7.js"
|
||||
"static/js/main.8e7757ef.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.034044a7.js"></script><link href="./static/css/main.bc07cc78.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.8e7757ef.js"></script><link href="./static/css/main.bc07cc78.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.8e7757ef.js
Normal file
2
app/vlselect/vmui/static/js/main.8e7757ef.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load diff
|
@ -22,7 +22,7 @@ However, there are some [intentional differences](https://medium.com/@romanhavro
|
|||
[Standalone MetricsQL package](https://godoc.org/github.com/VictoriaMetrics/metricsql) can be used for parsing MetricsQL in external apps.
|
||||
|
||||
If you are unfamiliar with PromQL, then it is suggested reading [this tutorial for beginners](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085)
|
||||
and introduction into [basic querying via MetricsQL](https://docs.victoriametrics.com/keyConcepts.html#metricsql).
|
||||
and introduction into [basic querying via MetricsQL](https://docs.victoriametrics.com/keyconcepts/#metricsql).
|
||||
|
||||
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
||||
|
||||
|
@ -70,15 +70,15 @@ The list of MetricsQL features on top of PromQL:
|
|||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query)
|
||||
and the real interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) (aka `scrape_interval`).
|
||||
For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`.
|
||||
It is roughly equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
The difference is documented in [rate() docs](#rate).
|
||||
* Numeric values can contain `_` delimiters for better readability. For example, `1_234_567_890` can be used in queries instead of `1234567890`.
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyconcepts/#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
selects series with `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
See [these docs](https://docs.victoriametrics.com/keyConcepts.html#filtering-by-multiple-or-filters) for details.
|
||||
See [these docs](https://docs.victoriametrics.com/keyconcepts/#filtering-by-multiple-or-filters) for details.
|
||||
* Support for `group_left(*)` and `group_right(*)` for copying all the labels from time series on the `one` side
|
||||
of [many-to-one operations](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches).
|
||||
The copied label names may clash with the existing label names, so MetricsQL provides an ability to add prefix to the copied metric names
|
||||
|
@ -153,26 +153,26 @@ MetricsQL provides the following functions:
|
|||
### Rollup functions
|
||||
|
||||
**Rollup functions** (aka range functions or window functions) calculate rollups over **raw samples**
|
||||
on the given lookbehind window for the [selected time series](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window for the [selected time series](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
For example, `avg_over_time(temperature[24h])` calculates the average temperature over raw samples for the last 24 hours.
|
||||
|
||||
Additional details:
|
||||
|
||||
* If rollup functions are used for building graphs in Grafana, then the rollup is calculated independently per each point on the graph.
|
||||
For example, every point for `avg_over_time(temperature[24h])` graph shows the average temperature for the last 24 hours ending at this point.
|
||||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) returns multiple time series,
|
||||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) returns multiple time series,
|
||||
then rollups are calculated individually per each returned series.
|
||||
* If lookbehind window in square brackets is missing, then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
Otherwise, it is automatically wrapped into [default_rollup](#default_rollup). For example, `foo{bar="baz"}`
|
||||
is automatically converted to `default_rollup(foo{bar="baz"})` before performing the calculations.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) is passed to rollup function,
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) is passed to rollup function,
|
||||
then the inner arg is automatically converted to a [subquery](#subqueries).
|
||||
* All the rollup functions accept optional `keep_metric_names` modifier. If it is set, then the function keeps metric names in results.
|
||||
See [these docs](#keep_metric_names).
|
||||
|
@ -195,7 +195,7 @@ See also [present_over_time](#present_over_time).
|
|||
`aggr_over_time(("rollup_func1", "rollup_func2", ...), series_selector[d])` is a [rollup function](#rollup-functions),
|
||||
which calculates all the listed `rollup_func*` for raw samples on the given lookbehind window `d`.
|
||||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
`rollup_func*` can contain any rollup function. For instance, `aggr_over_time(("min_over_time", "max_over_time", "rate"), m[d])`
|
||||
would calculate [min_over_time](#min_over_time), [max_over_time](#max_over_time) and [rate](#rate) for `m[d]`.
|
||||
|
@ -204,7 +204,7 @@ would calculate [min_over_time](#min_over_time), [max_over_time](#max_over_time)
|
|||
|
||||
`ascent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates
|
||||
ascent of raw sample values on the given lookbehind window `d`. The calculations are performed individually
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for tracking height gains in GPS tracking. Metric names are stripped from the resulting rollups.
|
||||
|
||||
|
@ -216,7 +216,7 @@ See also [descent_over_time](#descent_over_time).
|
|||
|
||||
`avg_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average value
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -226,7 +226,7 @@ See also [median_over_time](#median_over_time).
|
|||
|
||||
`changes(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of times
|
||||
the raw samples changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Unlike `changes()` in Prometheus it takes into account the change from the last sample before the given lookbehind window `d`.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -241,7 +241,7 @@ See also [changes_prometheus](#changes_prometheus).
|
|||
|
||||
`changes_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of times
|
||||
the raw samples changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
It doesn't take into account the change from the last sample before the given lookbehind window `d` in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -256,7 +256,7 @@ See also [changes](#changes).
|
|||
|
||||
`count_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
on the given lookbehind window `d`, which are equal to `eq`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -266,7 +266,7 @@ See also [count_over_time](#count_over_time), [share_eq_over_time](#share_eq_ove
|
|||
|
||||
`count_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
on the given lookbehind window `d`, which are bigger than `gt`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -276,7 +276,7 @@ See also [count_over_time](#count_over_time) and [share_gt_over_time](#share_gt_
|
|||
|
||||
`count_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
on the given lookbehind window `d`, which don't exceed `le`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -286,7 +286,7 @@ See also [count_over_time](#count_over_time) and [share_le_over_time](#share_le_
|
|||
|
||||
`count_ne_over_time(series_selector[d], ne)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
on the given lookbehind window `d`, which aren't equal to `ne`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -295,7 +295,7 @@ See also [count_over_time](#count_over_time).
|
|||
#### count_over_time
|
||||
|
||||
`count_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -307,7 +307,7 @@ See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_
|
|||
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of raw samples
|
||||
with the same value over the given lookbehind window and stores the counts in a time series with an additional `label`, which contains each initial value.
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -316,7 +316,7 @@ See also [count_eq_over_time](#count_eq_over_time), [count_values](#count_values
|
|||
#### decreases_over_time
|
||||
|
||||
`decreases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw sample value decreases
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -325,7 +325,7 @@ See also [increases_over_time](#increases_over_time).
|
|||
#### default_rollup
|
||||
|
||||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
|
@ -336,7 +336,7 @@ This allows avoiding unexpected gaps on the graph when `step` is smaller than th
|
|||
|
||||
`delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
the last sample before the given lookbehind window `d` and the last sample at the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
The behaviour of `delta()` function in MetricsQL is slightly different to the behaviour of `delta()` function in Prometheus.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -351,7 +351,7 @@ See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
|||
|
||||
`delta_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
the first and the last samples at the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
The behaviour of `delta_prometheus()` is close to the behaviour of `delta()` function in Prometheus.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -363,7 +363,7 @@ See also [delta](#delta).
|
|||
#### deriv
|
||||
|
||||
`deriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivative over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The derivative is calculated using linear regression.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
@ -376,7 +376,7 @@ See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
|||
|
||||
`deriv_fast(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivative
|
||||
using the first and the last raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -386,7 +386,7 @@ See also [deriv](#deriv) and [ideriv](#ideriv).
|
|||
|
||||
`descent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates descent of raw sample values
|
||||
on the given lookbehind window `d`. The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for tracking height loss in GPS tracking.
|
||||
|
||||
|
@ -397,7 +397,7 @@ See also [ascent_over_time](#ascent_over_time).
|
|||
#### distinct_over_time
|
||||
|
||||
`distinct_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number of distinct raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -406,7 +406,7 @@ See also [count_values_over_time](#count_values_over_time).
|
|||
#### duration_over_time
|
||||
|
||||
`duration_over_time(series_selector[d], max_interval)` is a [rollup function](#rollup-functions), which returns the duration in seconds
|
||||
when time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) were present
|
||||
when time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering) were present
|
||||
over the given lookbehind window `d`. It is expected that intervals between adjacent samples per each series don't exceed the `max_interval`.
|
||||
Otherwise, such intervals are considered as gaps and aren't counted.
|
||||
|
||||
|
@ -417,7 +417,7 @@ See also [lifetime](#lifetime) and [lag](#lag).
|
|||
#### first_over_time
|
||||
|
||||
`first_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the first raw sample value
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [last_over_time](#last_over_time) and [tfirst_over_time](#tfirst_over_time).
|
||||
|
||||
|
@ -425,7 +425,7 @@ See also [last_over_time](#last_over_time) and [tfirst_over_time](#tfirst_over_t
|
|||
|
||||
`geomean_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [geometric mean](https://en.wikipedia.org/wiki/Geometric_mean)
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -433,9 +433,9 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
|
||||
`histogram_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates
|
||||
[VictoriaMetrics histogram](https://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) over raw samples on the given lookbehind window `d`.
|
||||
It is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The resulting histograms are useful to pass to [histogram_quantile](#histogram_quantile) for calculating quantiles
|
||||
over multiple [gauges](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
over multiple [gauges](https://docs.victoriametrics.com/keyconcepts/#gauge).
|
||||
For example, the following query calculates median temperature by country over the last 24 hours:
|
||||
|
||||
`histogram_quantile(0.5, sum(histogram_over_time(temperature[24h])) by (vmrange,country))`.
|
||||
|
@ -459,8 +459,8 @@ See also [hoeffding_bound_lower](#hoeffding_bound_lower).
|
|||
`holt_winters(series_selector[d], sf, tf)` is a [rollup function](#rollup-functions), which calculates Holt-Winters value
|
||||
(aka [double exponential smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing)) for raw samples
|
||||
over the given lookbehind window `d` using the given smoothing factor `sf` and the given trend factor `tf`.
|
||||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyconcepts/#gauge).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -469,7 +469,7 @@ See also [range_linear_regression](#range_linear_regression).
|
|||
#### idelta
|
||||
|
||||
`idelta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between the last two raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -481,7 +481,7 @@ See also [delta](#delta).
|
|||
|
||||
`ideriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the per-second derivative based on the last two raw samples
|
||||
over the given lookbehind window `d`. The derivative is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -490,8 +490,8 @@ See also [deriv](#deriv).
|
|||
#### increase
|
||||
|
||||
`increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the increase over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Unlike Prometheus, it takes into account the last sample before the given lookbehind window `d` when calculating the result.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -505,8 +505,8 @@ See also [increase_pure](#increase_pure), [increase_prometheus](#increase_promet
|
|||
#### increase_prometheus
|
||||
|
||||
`increase_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the increase
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
It doesn't take into account the last sample before the given lookbehind window `d` when calculating the result in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
||||
|
@ -517,13 +517,13 @@ See also [increase_pure](#increase_pure) and [increase](#increase).
|
|||
#### increase_pure
|
||||
|
||||
`increase_pure(series_selector[d])` is a [rollup function](#rollup-functions), which works the same as [increase](#increase) except
|
||||
of the following corner case - it assumes that [counters](https://docs.victoriametrics.com/keyConcepts.html#counter) always start from 0,
|
||||
of the following corner case - it assumes that [counters](https://docs.victoriametrics.com/keyconcepts/#counter) always start from 0,
|
||||
while [increase](#increase) ignores the first value in a series if it is too big.
|
||||
|
||||
#### increases_over_time
|
||||
|
||||
`increases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw sample value increases
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -532,15 +532,15 @@ See also [decreases_over_time](#decreases_over_time).
|
|||
#### integrate
|
||||
|
||||
`integrate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the integral over raw samples on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### irate
|
||||
|
||||
`irate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the "instant" per-second increase rate over the last two raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -552,7 +552,7 @@ See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
|||
|
||||
`lag(series_selector[d])` is a [rollup function](#rollup-functions), which returns the duration in seconds between the last sample
|
||||
on the given lookbehind window `d` and the timestamp of the current point. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -561,7 +561,7 @@ See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
|
|||
#### last_over_time
|
||||
|
||||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -570,7 +570,7 @@ See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_t
|
|||
#### lifetime
|
||||
|
||||
`lifetime(series_selector[d])` is a [rollup function](#rollup-functions), which returns the duration in seconds between the last and the first sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -579,14 +579,14 @@ See also [duration_over_time](#duration_over_time) and [lag](#lag).
|
|||
#### mad_over_time
|
||||
|
||||
`mad_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
|
||||
over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outlier_iqr_over_time).
|
||||
|
||||
#### max_over_time
|
||||
|
||||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -596,14 +596,14 @@ See also [tmax_over_time](#tmax_over_time).
|
|||
|
||||
`median_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates median value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [avg_over_time](#avg_over_time).
|
||||
|
||||
#### min_over_time
|
||||
|
||||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -613,7 +613,7 @@ See also [tmin_over_time](#tmin_over_time).
|
|||
|
||||
`mode_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [mode](https://en.wikipedia.org/wiki/Mode_(statistics))
|
||||
for raw samples on the given lookbehind window `d`. It is calculated individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). It is expected that raw sample values are discrete.
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). It is expected that raw sample values are discrete.
|
||||
|
||||
#### outlier_iqr_over_time
|
||||
|
||||
|
@ -631,7 +631,7 @@ See also [outliers_iqr](#outliers_iqr).
|
|||
|
||||
`predict_linear(series_selector[d], t)` is a [rollup function](#rollup-functions), which calculates the value `t` seconds in the future using
|
||||
linear interpolation over raw samples on the given lookbehind window `d`. The predicted value is calculated individually per each time series
|
||||
returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -649,7 +649,7 @@ This function is supported by PromQL.
|
|||
#### quantile_over_time
|
||||
|
||||
`quantile_over_time(phi, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi`-quantile over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The `phi` value must be in the range `[0...1]`.
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
@ -660,7 +660,7 @@ See also [quantiles_over_time](#quantiles_over_time).
|
|||
|
||||
`quantiles_over_time("phiLabel", phi1, ..., phiN, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi*`-quantiles
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The function returns individual series per each `phi*` with `{phiLabel="phi*"}` label. `phi*` values must be in the range `[0...1]`.
|
||||
|
||||
See also [quantile_over_time](#quantile_over_time).
|
||||
|
@ -668,7 +668,7 @@ See also [quantile_over_time](#quantile_over_time).
|
|||
#### range_over_time
|
||||
|
||||
`range_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates value range over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
E.g. it calculates `max_over_time(series_selector[d]) - min_over_time(series_selector[d])`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
@ -676,8 +676,8 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
#### rate
|
||||
|
||||
`rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average per-second increase rate
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
|
@ -694,16 +694,16 @@ See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
|||
|
||||
`rate_over_sum(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second rate over the sum of raw samples
|
||||
on the given lookbehind window `d`. The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### resets
|
||||
|
||||
`resets(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number
|
||||
of [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) resets over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
of [counter](https://docs.victoriametrics.com/keyconcepts/#counter) resets over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -713,7 +713,7 @@ This function is supported by PromQL.
|
|||
|
||||
`rollup(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `min`, `max` and `avg` values for raw samples
|
||||
on the given lookbehind window `d` and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -723,7 +723,7 @@ See also [label_match](#label_match).
|
|||
`rollup_candlestick(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `open`, `high`, `low` and `close` values (aka OHLC)
|
||||
over raw samples on the given lookbehind window `d` and returns them in time series with `rollup="open"`, `rollup="high"`, `rollup="low"` and `rollup="close"` additional labels.
|
||||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). This function is useful for financial applications.
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). This function is useful for financial applications.
|
||||
|
||||
Optional 2nd argument `"open"`, `"high"` or `"low"` or `"close"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -733,7 +733,7 @@ See also [label_match](#label_match).
|
|||
`rollup_delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates differences between adjacent raw samples
|
||||
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated differences
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -747,7 +747,7 @@ See also [rollup_increase](#rollup_increase).
|
|||
`rollup_deriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivatives
|
||||
for adjacent raw samples on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated per-second derivatives
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -759,7 +759,7 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
`rollup_increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates increases for adjacent raw samples
|
||||
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated increases
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -778,7 +778,7 @@ when to use `rollup_rate()`.
|
|||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -787,7 +787,7 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
`rollup_scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the interval in seconds between
|
||||
adjacent raw samples on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated interval
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -797,7 +797,7 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
#### scrape_interval
|
||||
|
||||
`scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average interval in seconds between raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -807,7 +807,7 @@ See also [rollup_scrape_interval](#rollup_scrape_interval).
|
|||
|
||||
`share_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
on the given lookbehind window `d`, which are bigger than `gt`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for calculating SLI and SLO. Example: `share_gt_over_time(up[24h], 0)` - returns service availability for the last 24 hours.
|
||||
|
||||
|
@ -819,7 +819,7 @@ See also [share_le_over_time](#share_le_over_time) and [count_gt_over_time](#cou
|
|||
|
||||
`share_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
on the given lookbehind window `d`, which are smaller or equal to `le`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for calculating SLI and SLO. Example: `share_le_over_time(memory_usage_bytes[24h], 100*1024*1024)` returns
|
||||
the share of time series values for the last 24 hours when memory usage was below or equal to 100MB.
|
||||
|
@ -832,7 +832,7 @@ See also [share_gt_over_time](#share_gt_over_time) and [count_le_over_time](#cou
|
|||
|
||||
`share_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
on the given lookbehind window `d`, which are equal to `eq`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -841,15 +841,15 @@ See also [count_eq_over_time](#count_eq_over_time).
|
|||
#### stale_samples_over_time
|
||||
|
||||
`stale_samples_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number
|
||||
of [staleness markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) on the given lookbehind window `d`
|
||||
per each time series matching the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
of [staleness markers](https://docs.victoriametrics.com/vmagent/#prometheus-staleness-markers) on the given lookbehind window `d`
|
||||
per each time series matching the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### stddev_over_time
|
||||
|
||||
`stddev_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard deviation over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -860,7 +860,7 @@ See also [stdvar_over_time](#stdvar_over_time).
|
|||
#### stdvar_over_time
|
||||
|
||||
`stdvar_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard variance over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -871,7 +871,7 @@ See also [stddev_over_time](#stddev_over_time).
|
|||
#### sum_eq_over_time
|
||||
|
||||
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values equal to `eq`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -880,7 +880,7 @@ See also [sum_over_time](#sum_over_time) and [count_eq_over_time](#count_eq_over
|
|||
#### sum_gt_over_time
|
||||
|
||||
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values bigger than `gt`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -889,7 +889,7 @@ See also [sum_over_time](#sum_over_time) and [count_gt_over_time](#count_gt_over
|
|||
#### sum_le_over_time
|
||||
|
||||
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values smaller or equal to `le`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -898,7 +898,7 @@ See also [sum_over_time](#sum_over_time) and [count_le_over_time](#count_le_over
|
|||
#### sum_over_time
|
||||
|
||||
`sum_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -907,14 +907,14 @@ This function is supported by PromQL.
|
|||
#### sum2_over_time
|
||||
|
||||
`sum2_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of squares for raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### timestamp
|
||||
|
||||
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -925,7 +925,7 @@ See also [time](#time) and [now](#now).
|
|||
#### timestamp_with_name
|
||||
|
||||
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are preserved in the resulting rollups.
|
||||
|
||||
|
@ -934,7 +934,7 @@ See also [timestamp](#timestamp) and [keep_metric_names](#keep_metric_names) mod
|
|||
#### tfirst_over_time
|
||||
|
||||
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the first raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -943,7 +943,7 @@ See also [first_over_time](#first_over_time).
|
|||
#### tlast_change_over_time
|
||||
|
||||
`tlast_change_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last change
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) on the given lookbehind window `d`.
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering) on the given lookbehind window `d`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -959,7 +959,7 @@ See also [tlast_change_over_time](#tlast_change_over_time).
|
|||
|
||||
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
|
||||
with the maximum value on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -969,7 +969,7 @@ See also [max_over_time](#max_over_time).
|
|||
|
||||
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
|
||||
with the minimum value on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -979,7 +979,7 @@ See also [min_over_time](#min_over_time).
|
|||
|
||||
`zscore_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns [z-score](https://en.wikipedia.org/wiki/Standard_score)
|
||||
for raw samples on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -994,7 +994,7 @@ returned from the rollup `delta(temperature[24h])`.
|
|||
|
||||
Additional details:
|
||||
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the transformations.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature))`.
|
||||
* All the transform functions accept optional `keep_metric_names` modifier. If it is set,
|
||||
|
@ -1230,7 +1230,7 @@ by replacing all the values bigger or equal to 30 with 40.
|
|||
#### end
|
||||
|
||||
`end()` is a [transform function](#transform-functions), which returns the unix timestamp in seconds for the last point.
|
||||
It is known as `end` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `end` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [start](#start), [time](#time) and [now](#now).
|
||||
|
||||
|
@ -1653,14 +1653,14 @@ This function is supported by PromQL.
|
|||
|
||||
`start()` is a [transform function](#transform-functions), which returns unix timestamp in seconds for the first point.
|
||||
|
||||
It is known as `start` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `start` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [end](#end), [time](#time) and [now](#now).
|
||||
|
||||
#### step
|
||||
|
||||
`step()` is a [transform function](#transform-functions), which returns the step in seconds (aka interval) between the returned points.
|
||||
It is known as `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [start](#start) and [end](#end).
|
||||
|
||||
|
@ -1717,7 +1717,7 @@ This function is supported by PromQL.
|
|||
|
||||
Additional details:
|
||||
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before performing the label transformation.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature), "foo")`.
|
||||
|
||||
|
@ -1894,7 +1894,7 @@ Additional details:
|
|||
and calculate the [count](#count) aggregate function independently per each group, while `count(up) without (instance)`
|
||||
would group [rollup results](#rollup-functions) by all the labels except `instance` before calculating [count](#count) aggregate function independently per each group.
|
||||
Multiple labels can be put in `by` and `without` modifiers.
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the aggregate.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up))`.
|
||||
* Aggregate functions accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point
|
||||
|
@ -2104,7 +2104,7 @@ See also [quantile](#quantile).
|
|||
`share(q) by (group_labels)` is [aggregate function](#aggregate-functions), which returns shares in the range `[0..1]`
|
||||
for every non-negative points returned by `q` per each timestamp, so the sum of shares per each `group_labels` equals 1.
|
||||
|
||||
This function is useful for normalizing [histogram bucket](https://docs.victoriametrics.com/keyConcepts.html#histogram) shares
|
||||
This function is useful for normalizing [histogram bucket](https://docs.victoriametrics.com/keyconcepts/#histogram) shares
|
||||
into `[0..1]` range:
|
||||
|
||||
```metricsql
|
||||
|
@ -2208,7 +2208,7 @@ See also [zscore_over_time](#zscore_over_time), [range_trim_zscore](#range_trim_
|
|||
## Subqueries
|
||||
|
||||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) form a subquery.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) form a subquery.
|
||||
Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions).
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m))[1i:1i])`, so it becomes a subquery,
|
||||
since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
|
@ -2219,19 +2219,19 @@ VictoriaMetrics performs subqueries in the following way:
|
|||
For example, for expression `max_over_time(rate(http_requests_total[5m])[1h:30s])` the inner function `rate(http_requests_total[5m])`
|
||||
is calculated with `step=30s`. The resulting data points are aligned by the `step`.
|
||||
* It calculates the outer rollup function over the results of the inner rollup function using the `step` value
|
||||
passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
## Implicit query conversions
|
||||
|
||||
VictoriaMetrics performs the following implicit conversions for incoming queries before starting the calculations:
|
||||
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions), then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
which aren't wrapped into [rollup functions](#rollup-functions), are automatically wrapped into [default_rollup](#default_rollup) function.
|
||||
Examples:
|
||||
* `foo` is transformed to `default_rollup(foo)`
|
||||
|
@ -2242,6 +2242,6 @@ VictoriaMetrics performs the following implicit conversions for incoming queries
|
|||
it is [transform function](#transform-functions)
|
||||
* If `step` in square brackets is missing inside [subquery](#subqueries), then `1i` step is automatically added there.
|
||||
For example, `avg_over_time(rate(http_requests_total[5m])[1h])` is automatically converted to `avg_over_time(rate(http_requests_total[5m])[1h:1i])`.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
|
||||
is passed to [rollup function](#rollup-functions), then a [subquery](#subqueries) with `1i` lookbehind window and `1i` step is automatically formed.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
|
@ -1,3 +1,3 @@
|
|||
See vmagent docs [here](https://docs.victoriametrics.com/vmagent.html).
|
||||
See vmagent docs [here](https://docs.victoriametrics.com/vmagent/).
|
||||
|
||||
vmagent docs can be edited at [docs/vmagent.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmagent.md).
|
||||
vmagent docs can be edited at [docs/vmagent.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmagent.md).
|
||||
|
|
|
@ -3,13 +3,15 @@ package common
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
|
||||
// PushCtx is a context used for populating WriteRequest.
|
||||
type PushCtx struct {
|
||||
// WriteRequest contains the WriteRequest, which must be pushed later to remote storage.
|
||||
//
|
||||
// The actual labels and samples for the time series are stored in Labels and Samples fields.
|
||||
WriteRequest prompbmarshal.WriteRequest
|
||||
|
||||
// Labels contains flat list of all the labels used in WriteRequest.
|
||||
|
@ -21,13 +23,7 @@ type PushCtx struct {
|
|||
|
||||
// Reset resets ctx.
|
||||
func (ctx *PushCtx) Reset() {
|
||||
tss := ctx.WriteRequest.Timeseries
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
ts.Labels = nil
|
||||
ts.Samples = nil
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = ctx.WriteRequest.Timeseries[:0]
|
||||
ctx.WriteRequest.Reset()
|
||||
|
||||
promrelabel.CleanLabels(ctx.Labels)
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
|
@ -39,15 +35,10 @@ func (ctx *PushCtx) Reset() {
|
|||
//
|
||||
// Call PutPushCtx when the ctx is no longer needed.
|
||||
func GetPushCtx() *PushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*PushCtx)
|
||||
}
|
||||
return &PushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*PushCtx)
|
||||
}
|
||||
return &PushCtx{}
|
||||
}
|
||||
|
||||
// PutPushCtx returns ctx to the pool.
|
||||
|
@ -55,12 +46,7 @@ func GetPushCtx() *PushCtx {
|
|||
// ctx mustn't be used after returning to the pool.
|
||||
func PutPushCtx(ctx *PushCtx) {
|
||||
ctx.Reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *PushCtx, cgroup.AvailableCPUs())
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
|
@ -160,25 +159,15 @@ func (ctx *pushCtx) reset() {
|
|||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
|
|
@ -225,7 +225,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>vmagent</h2>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/vmagent.html'>https://docs.victoriametrics.com/vmagent.html</a></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/vmagent/'>https://docs.victoriametrics.com/vmagent/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
{"targets", "status for discovered active targets"},
|
||||
|
@ -718,7 +718,7 @@ func usage() {
|
|||
const s = `
|
||||
vmagent collects metrics data via popular data ingestion protocols and routes it to VictoriaMetrics.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmagent/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
|
||||
var (
|
||||
forcePromProto = flagutil.NewArrayBool("remoteWrite.forcePromProto", "Whether to force Prometheus remote write protocol for sending data "+
|
||||
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol")
|
||||
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol")
|
||||
forceVMProto = flagutil.NewArrayBool("remoteWrite.forceVMProto", "Whether to force VictoriaMetrics remote write protocol for sending data "+
|
||||
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol")
|
||||
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol")
|
||||
|
||||
rateLimit = flagutil.NewArrayInt("remoteWrite.rateLimit", 0, "Optional rate limit in bytes per second for data sent to the corresponding -remoteWrite.url. "+
|
||||
"By default, the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+
|
||||
|
@ -164,7 +164,7 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
|||
useVMProto = common.HandleVMProtoClientHandshake(c.remoteWriteURL, doRequest)
|
||||
if !useVMProto {
|
||||
logger.Infof("the remote storage at %q doesn't support VictoriaMetrics remote write protocol. Switching to Prometheus remote write protocol. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol", sanitizedURL)
|
||||
"See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol", sanitizedURL)
|
||||
}
|
||||
}
|
||||
c.useVMProto = useVMProto
|
||||
|
|
|
@ -28,7 +28,7 @@ var (
|
|||
maxRowsPerBlock = flag.Int("remoteWrite.maxRowsPerBlock", 10000, "The maximum number of samples to send in each block to remote storage. Higher number may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxBlockSize")
|
||||
vmProtoCompressLevel = flag.Int("remoteWrite.vmProtoCompressLevel", 0, "The compression level for VictoriaMetrics remote write protocol. "+
|
||||
"Higher values reduce network traffic at the cost of higher CPU usage. Negative values reduce CPU usage at the cost of increased network traffic. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol")
|
||||
"See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol")
|
||||
)
|
||||
|
||||
type pendingSeries struct {
|
||||
|
@ -122,11 +122,7 @@ func (wr *writeRequest) reset() {
|
|||
|
||||
wr.wr.Timeseries = nil
|
||||
|
||||
for i := range wr.tss {
|
||||
ts := &wr.tss[i]
|
||||
ts.Labels = nil
|
||||
ts.Samples = nil
|
||||
}
|
||||
clear(wr.tss)
|
||||
wr.tss = wr.tss[:0]
|
||||
|
||||
promrelabel.CleanLabels(wr.labels)
|
||||
|
|
|
@ -19,10 +19,10 @@ var (
|
|||
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabeling configs, which are applied "+
|
||||
"to all the metrics before sending them to -remoteWrite.url. See also -remoteWrite.urlRelabelConfig. "+
|
||||
"The path can point either to local file or to http url. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#relabeling")
|
||||
"See https://docs.victoriametrics.com/vmagent/#relabeling")
|
||||
relabelConfigPaths = flagutil.NewArrayString("remoteWrite.urlRelabelConfig", "Optional path to relabel configs for the corresponding -remoteWrite.url. "+
|
||||
"See also -remoteWrite.relabelConfig. The path can point either to local file or to http url. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#relabeling")
|
||||
"See https://docs.victoriametrics.com/vmagent/#relabeling")
|
||||
|
||||
usePromCompatibleNaming = flag.Bool("usePromCompatibleNaming", false, "Whether to replace characters unsupported by Prometheus with underscores "+
|
||||
"in the ingested metric names and label names. For example, foo.bar{a.b='c'} is transformed into foo_bar{a_b='c'} during data ingestion if this flag is set. "+
|
||||
|
|
|
@ -40,21 +40,26 @@ var (
|
|||
"Pass multiple -remoteWrite.url options in order to replicate the collected data to multiple remote storage systems. "+
|
||||
"The data can be sharded among the configured remote storage systems if -remoteWrite.shardByURL flag is set")
|
||||
remoteWriteMultitenantURLs = flagutil.NewArrayString("remoteWrite.multitenantURL", "Base path for multitenant remote storage URL to write data to. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#multitenancy for details. Example url: http://<vminsert>:8480 . "+
|
||||
"See https://docs.victoriametrics.com/vmagent/#multitenancy for details. Example url: http://<vminsert>:8480 . "+
|
||||
"Pass multiple -remoteWrite.multitenantURL flags in order to replicate data to multiple remote storage systems. "+
|
||||
"This flag is deprecated in favor of -enableMultitenantHandlers . See https://docs.victoriametrics.com/vmagent.html#multitenancy")
|
||||
"This flag is deprecated in favor of -enableMultitenantHandlers . See https://docs.victoriametrics.com/vmagent/#multitenancy")
|
||||
enableMultitenantHandlers = flag.Bool("enableMultitenantHandlers", false, "Whether to process incoming data via multitenant insert handlers according to "+
|
||||
"https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format . By default incoming data is processed via single-node insert handlers "+
|
||||
"https://docs.victoriametrics.com/cluster-victoriametrics/#url-format . By default incoming data is processed via single-node insert handlers "+
|
||||
"according to https://docs.victoriametrics.com/#how-to-import-time-series-data ."+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#multitenancy for details")
|
||||
"See https://docs.victoriametrics.com/vmagent/#multitenancy for details")
|
||||
|
||||
shardByURL = flag.Bool("remoteWrite.shardByURL", false, "Whether to shard outgoing series across all the remote storage systems enumerated via -remoteWrite.url . "+
|
||||
"By default the data is replicated across all the -remoteWrite.url . See https://docs.victoriametrics.com/vmagent.html#sharding-among-remote-storages")
|
||||
"By default the data is replicated across all the -remoteWrite.url . See https://docs.victoriametrics.com/vmagent/#sharding-among-remote-storages . "+
|
||||
"See also -remoteWrite.shardByURLReplicas")
|
||||
shardByURLReplicas = flag.Int("remoteWrite.shardByURLReplicas", 1, "How many copies of data to make among remote storage systems enumerated via -remoteWrite.url "+
|
||||
"when -remoteWrite.shardByURL is set. See https://docs.victoriametrics.com/vmagent/#sharding-among-remote-storages")
|
||||
shardByURLLabels = flagutil.NewArrayString("remoteWrite.shardByURL.labels", "Optional list of labels, which must be used for sharding outgoing samples "+
|
||||
"among remote storage systems if -remoteWrite.shardByURL command-line flag is set. By default all the labels are used for sharding in order to gain "+
|
||||
"even distribution of series over the specified -remoteWrite.url systems. See also -remoteWrite.shardByURL.ignoreLabels")
|
||||
shardByURLIgnoreLabels = flagutil.NewArrayString("remoteWrite.shardByURL.ignoreLabels", "Optional list of labels, which must be ignored when sharding outgoing samples "+
|
||||
"among remote storage systems if -remoteWrite.shardByURL command-line flag is set. By default all the labels are used for sharding in order to gain "+
|
||||
"even distribution of series over the specified -remoteWrite.url systems. See also -remoteWrite.shardByURL.labels")
|
||||
|
||||
tmpDataPath = flag.String("remoteWrite.tmpDataPath", "vmagent-remotewrite-data", "Path to directory for storing pending data, which isn't sent to the configured -remoteWrite.url . "+
|
||||
"See also -remoteWrite.maxDiskUsagePerURL and -remoteWrite.disableOnDiskQueue")
|
||||
keepDanglingQueues = flag.Bool("remoteWrite.keepDanglingQueues", false, "Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. "+
|
||||
|
@ -81,33 +86,35 @@ var (
|
|||
`For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}`+
|
||||
`Enabled sorting for labels can slow down ingestion performance a bit`)
|
||||
maxHourlySeries = flag.Int("remoteWrite.maxHourlySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last hour. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter")
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent/#cardinality-limiter")
|
||||
maxDailySeries = flag.Int("remoteWrite.maxDailySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter")
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent/#cardinality-limiter")
|
||||
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmagent can receive per second. Data ingestion is paused when the limit is exceeded. "+
|
||||
"By default there are no limits on samples ingestion rate. See also -remoteWrite.rateLimit")
|
||||
|
||||
streamAggrConfig = flagutil.NewArrayString("remoteWrite.streamAggr.config", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation.html . "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/ . "+
|
||||
"See also -remoteWrite.streamAggr.keepInput, -remoteWrite.streamAggr.dropInput and -remoteWrite.streamAggr.dedupInterval")
|
||||
streamAggrKeepInput = flagutil.NewArrayBool("remoteWrite.streamAggr.keepInput", "Whether to keep all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrDropInput = flagutil.NewArrayBool("remoteWrite.streamAggr.dropInput", "Whether to drop all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation "+
|
||||
"with -remoteWrite.streamAggr.config . See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication")
|
||||
"with -remoteWrite.streamAggr.config . See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation/#deduplication")
|
||||
streamAggrIgnoreOldSamples = flagutil.NewArrayBool("remoteWrite.streamAggr.ignoreOldSamples", "Whether to ignore input samples with old timestamps outside the current aggregation interval "+
|
||||
"for the corresponding -remoteWrite.streamAggr.config . See https://docs.victoriametrics.com/stream-aggregation.html#ignoring-old-samples")
|
||||
"for the corresponding -remoteWrite.streamAggr.config . See https://docs.victoriametrics.com/stream-aggregation/#ignoring-old-samples")
|
||||
streamAggrIgnoreFirstIntervals = flag.Int("remoteWrite.streamAggr.ignoreFirstIntervals", 0, "Number of aggregation intervals to skip after the start. Increase this value if you observe incorrect aggregation results after vmagent restarts. It could be caused by receiving unordered delayed data from clients pushing data into the vmagent. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/#ignore-aggregation-intervals-on-start")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels")
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation/#dropping-unneeded-labels")
|
||||
|
||||
disableOnDiskQueue = flag.Bool("remoteWrite.disableOnDiskQueue", false, "Whether to disable storing pending data to -remoteWrite.tmpDataPath "+
|
||||
"when the configured remote storage systems cannot keep up with the data ingestion rate. See https://docs.victoriametrics.com/vmagent.html#disabling-on-disk-persistence ."+
|
||||
"when the configured remote storage systems cannot keep up with the data ingestion rate. See https://docs.victoriametrics.com/vmagent/#disabling-on-disk-persistence ."+
|
||||
"See also -remoteWrite.dropSamplesOnOverload")
|
||||
dropSamplesOnOverload = flag.Bool("remoteWrite.dropSamplesOnOverload", false, "Whether to drop samples when -remoteWrite.disableOnDiskQueue is set and if the samples "+
|
||||
"cannot be pushed into the configured remote storage systems in a timely manner. See https://docs.victoriametrics.com/vmagent.html#disabling-on-disk-persistence")
|
||||
"cannot be pushed into the configured remote storage systems in a timely manner. See https://docs.victoriametrics.com/vmagent/#disabling-on-disk-persistence")
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -125,7 +132,7 @@ var (
|
|||
ErrQueueFullHTTPRetry = &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("remote storage systems cannot keep up with the data ingestion rate; retry the request later " +
|
||||
"or remove -remoteWrite.disableOnDiskQueue from vmagent command-line flags, so it could save pending data to -remoteWrite.tmpDataPath; " +
|
||||
"see https://docs.victoriametrics.com/vmagent.html#disabling-on-disk-persistence"),
|
||||
"see https://docs.victoriametrics.com/vmagent/#disabling-on-disk-persistence"),
|
||||
StatusCode: http.StatusTooManyRequests,
|
||||
}
|
||||
)
|
||||
|
@ -252,6 +259,9 @@ func dropDanglingQueues() {
|
|||
// This is required for the case when the number of queues has been changed or URL have been changed.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4014
|
||||
//
|
||||
// In case if there were many persistent queues with identical *remoteWriteURLs
|
||||
// the queue with the last index will be dropped.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6140
|
||||
existingQueues := make(map[string]struct{}, len(rwctxsDefault))
|
||||
for _, rwctx := range rwctxsDefault {
|
||||
existingQueues[rwctx.fq.Dirname()] = struct{}{}
|
||||
|
@ -338,7 +348,7 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
|||
}
|
||||
sanitizedURL := fmt.Sprintf("%d:secret-url", i+1)
|
||||
if at != nil {
|
||||
// Construct full remote_write url for the given tenant according to https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format
|
||||
// Construct full remote_write url for the given tenant according to https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
|
||||
remoteWriteURL.Path = fmt.Sprintf("%s/insert/%d:%d/prometheus/api/v1/write", remoteWriteURL.Path, at.AccountID, at.ProjectID)
|
||||
sanitizedURL = fmt.Sprintf("%s:%d:%d", sanitizedURL, at.AccountID, at.ProjectID)
|
||||
}
|
||||
|
@ -562,58 +572,17 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
|||
|
||||
// We need to push tssBlock to multiple remote storages.
|
||||
// This is either sharding or replication depending on -remoteWrite.shardByURL command-line flag value.
|
||||
if *shardByURL {
|
||||
// Shard the data among rwctxs
|
||||
tssByURL := make([][]prompbmarshal.TimeSeries, len(rwctxs))
|
||||
tmpLabels := promutils.GetLabels()
|
||||
for _, ts := range tssBlock {
|
||||
hashLabels := ts.Labels
|
||||
if len(shardByURLLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLLabelsMap[label.Name]; ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
} else if len(shardByURLIgnoreLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLIgnoreLabelsMap[label.Name]; !ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
}
|
||||
h := getLabelsHash(hashLabels)
|
||||
idx := h % uint64(len(tssByURL))
|
||||
tssByURL[idx] = append(tssByURL[idx], ts)
|
||||
if *shardByURL && *shardByURLReplicas < len(rwctxs) {
|
||||
// Shard tssBlock samples among rwctxs.
|
||||
replicas := *shardByURLReplicas
|
||||
if replicas <= 0 {
|
||||
replicas = 1
|
||||
}
|
||||
promutils.PutLabels(tmpLabels)
|
||||
|
||||
// Push sharded data to remote storages in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
var anyPushFailed atomic.Bool
|
||||
for i, rwctx := range rwctxs {
|
||||
tssShard := tssByURL[i]
|
||||
if len(tssShard) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(rwctx *remoteWriteCtx, tss []prompbmarshal.TimeSeries) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tss) {
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx, tssShard)
|
||||
}
|
||||
wg.Wait()
|
||||
return !anyPushFailed.Load()
|
||||
return tryShardingBlockAmongRemoteStorages(rwctxs, tssBlock, replicas)
|
||||
}
|
||||
|
||||
// Replicate data among rwctxs.
|
||||
// Push block to remote storages in parallel in order to reduce
|
||||
// Replicate tssBlock samples among rwctxs.
|
||||
// Push tssBlock to remote storage systems in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rwctxs))
|
||||
|
@ -630,6 +599,97 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
|||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
func tryShardingBlockAmongRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmarshal.TimeSeries, replicas int) bool {
|
||||
x := getTSSShards(len(rwctxs))
|
||||
defer putTSSShards(x)
|
||||
|
||||
shards := x.shards
|
||||
tmpLabels := promutils.GetLabels()
|
||||
for _, ts := range tssBlock {
|
||||
hashLabels := ts.Labels
|
||||
if len(shardByURLLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLLabelsMap[label.Name]; ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
} else if len(shardByURLIgnoreLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLIgnoreLabelsMap[label.Name]; !ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
}
|
||||
h := getLabelsHash(hashLabels)
|
||||
idx := h % uint64(len(shards))
|
||||
i := 0
|
||||
for {
|
||||
shards[idx] = append(shards[idx], ts)
|
||||
i++
|
||||
if i >= replicas {
|
||||
break
|
||||
}
|
||||
idx++
|
||||
if idx >= uint64(len(shards)) {
|
||||
idx = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
promutils.PutLabels(tmpLabels)
|
||||
|
||||
// Push sharded samples to remote storage systems in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
var anyPushFailed atomic.Bool
|
||||
for i, rwctx := range rwctxs {
|
||||
shard := shards[i]
|
||||
if len(shard) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(rwctx *remoteWriteCtx, tss []prompbmarshal.TimeSeries) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tss) {
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx, shard)
|
||||
}
|
||||
wg.Wait()
|
||||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
type tssShards struct {
|
||||
shards [][]prompbmarshal.TimeSeries
|
||||
}
|
||||
|
||||
func getTSSShards(n int) *tssShards {
|
||||
v := tssShardsPool.Get()
|
||||
if v == nil {
|
||||
v = &tssShards{}
|
||||
}
|
||||
x := v.(*tssShards)
|
||||
if cap(x.shards) < n {
|
||||
x.shards = make([][]prompbmarshal.TimeSeries, n)
|
||||
}
|
||||
x.shards = x.shards[:n]
|
||||
return x
|
||||
}
|
||||
|
||||
func putTSSShards(x *tssShards) {
|
||||
shards := x.shards
|
||||
for i := range shards {
|
||||
clear(shards[i])
|
||||
shards[i] = shards[i][:0]
|
||||
}
|
||||
tssShardsPool.Put(x)
|
||||
}
|
||||
|
||||
var tssShardsPool sync.Pool
|
||||
|
||||
// sortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set.
|
||||
func sortLabelsIfNeeded(tss []prompbmarshal.TimeSeries) {
|
||||
if !*sortLabels {
|
||||
|
@ -802,9 +862,10 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
|||
ignoreOldSamples := streamAggrIgnoreOldSamples.GetOptionalArg(argIdx)
|
||||
if sasFile != "" {
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: dedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: ignoreOldSamples,
|
||||
DedupInterval: dedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: ignoreOldSamples,
|
||||
IgnoreFirstIntervals: *streamAggrIgnoreFirstIntervals,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, opts)
|
||||
if err != nil {
|
||||
|
|
48
app/vmagent/remotewrite/remotewrite_test.go
Normal file
48
app/vmagent/remotewrite/remotewrite_test.go
Normal file
|
@ -0,0 +1,48 @@
|
|||
package remotewrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func TestGetLabelsHash_Distribution(t *testing.T) {
|
||||
f := func(bucketsCount int) {
|
||||
t.Helper()
|
||||
|
||||
// Distribute itemsCount hashes returned by getLabelsHash() across bucketsCount buckets.
|
||||
itemsCount := 1_000 * bucketsCount
|
||||
m := make([]int, bucketsCount)
|
||||
var labels []prompbmarshal.Label
|
||||
for i := 0; i < itemsCount; i++ {
|
||||
labels = append(labels[:0], prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: fmt.Sprintf("some_name_%d", i),
|
||||
})
|
||||
for j := 0; j < 10; j++ {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: fmt.Sprintf("label_%d", j),
|
||||
Value: fmt.Sprintf("value_%d_%d", i, j),
|
||||
})
|
||||
}
|
||||
h := getLabelsHash(labels)
|
||||
m[h%uint64(bucketsCount)]++
|
||||
}
|
||||
|
||||
// Verify that the distribution is even
|
||||
expectedItemsPerBucket := itemsCount / bucketsCount
|
||||
for _, n := range m {
|
||||
if math.Abs(1-float64(n)/float64(expectedItemsPerBucket)) > 0.04 {
|
||||
t.Fatalf("unexpected items in the bucket for %d buckets; got %d; want around %d", bucketsCount, n, expectedItemsPerBucket)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f(2)
|
||||
f(3)
|
||||
f(4)
|
||||
f(5)
|
||||
f(10)
|
||||
}
|
|
@ -3,34 +3,15 @@ package remotewrite
|
|||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
func getStdDialer() *net.Dialer {
|
||||
stdDialerOnce.Do(func() {
|
||||
stdDialer = &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: netutil.TCP6Enabled(),
|
||||
}
|
||||
})
|
||||
return stdDialer
|
||||
}
|
||||
|
||||
var (
|
||||
stdDialer *net.Dialer
|
||||
stdDialerOnce sync.Once
|
||||
)
|
||||
|
||||
func statDial(ctx context.Context, _, addr string) (conn net.Conn, err error) {
|
||||
network := netutil.GetTCPNetwork()
|
||||
d := getStdDialer()
|
||||
conn, err = d.DialContext(ctx, network, addr)
|
||||
conn, err = netutil.DialMaybeSRV(ctx, network, addr)
|
||||
dialsTotal.Inc()
|
||||
if err != nil {
|
||||
dialErrors.Inc()
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
See vmalert docs [here](https://docs.victoriametrics.com/vmalert.html).
|
||||
See vmalert docs [here](https://docs.victoriametrics.com/vmalert/).
|
||||
|
||||
vmalert docs can be edited at [docs/vmalert.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmalert.md).
|
||||
|
|
|
@ -44,7 +44,7 @@ Enterprise version of vmalert supports S3 and GCS paths to rules.
|
|||
For example: gs://bucket/path/to/rules, s3://bucket/path/to/rules
|
||||
S3 and GCS paths support only matching by prefix, e.g. s3://bucket/dir/rule_ matches
|
||||
all files with prefix rule_ in folder dir.
|
||||
See https://docs.victoriametrics.com/vmalert.html#reading-rules-from-object-storage
|
||||
See https://docs.victoriametrics.com/vmalert/#reading-rules-from-object-storage
|
||||
`)
|
||||
|
||||
ruleTemplatesPath = flagutil.NewArrayString("rule.templates", `Path or glob pattern to location with go template definitions `+
|
||||
|
@ -71,7 +71,7 @@ absolute path to all .tpl files in root.
|
|||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier. By default, hostname is used as address.")
|
||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
|
||||
`for cases where you want to build a custom link to Grafana, Prometheus or any other service. `+
|
||||
`Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . `+
|
||||
`Supports templating - see https://docs.victoriametrics.com/vmalert/#templating . `+
|
||||
`For example, link to Grafana: -external.alert.source='explore?orgId=1&left={"datasource":"VictoriaMetrics","queries":[{"expr":{{$expr|jsonEscape|queryEscape}},"refId":"A"}],"range":{"from":"now-1h","to":"now"}}'. `+
|
||||
`Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. `+
|
||||
`If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`)
|
||||
|
@ -319,7 +319,7 @@ func usage() {
|
|||
const s = `
|
||||
vmalert processes alerts and recording rules.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmalert.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmalert/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ func (am *AlertManager) send(ctx context.Context, alerts []Alert, headers map[st
|
|||
if *showNotifierURL {
|
||||
amURL = am.addr.String()
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode/100 != 2 {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response from %q: %w", amURL, err)
|
||||
|
|
|
@ -314,23 +314,20 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
|||
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
||||
}
|
||||
for _, s := range res.Data {
|
||||
ls, err := ar.toLabels(s, qFn)
|
||||
ls, as, err := ar.expandTemplates(s, qFn, time.Time{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to expand labels: %s", err)
|
||||
}
|
||||
h := hash(ls.processed)
|
||||
a, err := ar.newAlert(s, nil, time.Time{}, qFn) // initial alert
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create alert: %w", err)
|
||||
return nil, fmt.Errorf("failed to expand templates: %s", err)
|
||||
}
|
||||
alertID := hash(ls.processed)
|
||||
a := ar.newAlert(s, time.Time{}, ls.processed, as) // initial alert
|
||||
|
||||
prevT := time.Time{}
|
||||
for i := range s.Values {
|
||||
at := time.Unix(s.Timestamps[i], 0)
|
||||
// try to restore alert's state on the first iteration
|
||||
if at.Equal(start) {
|
||||
if _, ok := ar.alerts[h]; ok {
|
||||
a = ar.alerts[h]
|
||||
if _, ok := ar.alerts[alertID]; ok {
|
||||
a = ar.alerts[alertID]
|
||||
prevT = at
|
||||
}
|
||||
}
|
||||
|
@ -352,7 +349,7 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
|||
|
||||
// save alert's state on last iteration, so it can be used on the next execRange call
|
||||
if at.Equal(end) {
|
||||
holdAlertState[h] = a
|
||||
holdAlertState[alertID] = a
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -386,15 +383,34 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
|||
}
|
||||
}()
|
||||
|
||||
ar.alertsMu.Lock()
|
||||
defer ar.alertsMu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
||||
}
|
||||
|
||||
ar.logDebugf(ts, nil, "query returned %d samples (elapsed: %s)", curState.Samples, curState.Duration)
|
||||
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
res, _, err := ar.q.Query(ctx, query, ts)
|
||||
return res.Data, err
|
||||
}
|
||||
|
||||
// template labels and annotations before updating ar.alerts,
|
||||
// since they could use `query` function which takes a while to execute,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6079.
|
||||
expandedLabels := make([]*labelSet, len(res.Data))
|
||||
expandedAnnotations := make([]map[string]string, len(res.Data))
|
||||
for i, m := range res.Data {
|
||||
ls, as, err := ar.expandTemplates(m, qFn, ts)
|
||||
if err != nil {
|
||||
curState.Err = fmt.Errorf("failed to expand templates: %w", err)
|
||||
return nil, curState.Err
|
||||
}
|
||||
expandedLabels[i] = ls
|
||||
expandedAnnotations[i] = as
|
||||
}
|
||||
|
||||
ar.alertsMu.Lock()
|
||||
defer ar.alertsMu.Unlock()
|
||||
|
||||
for h, a := range ar.alerts {
|
||||
// cleanup inactive alerts from previous Exec
|
||||
if a.State == notifier.StateInactive && ts.Sub(a.ResolvedAt) > resolvedRetention {
|
||||
|
@ -403,26 +419,18 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
|||
}
|
||||
}
|
||||
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
res, _, err := ar.q.Query(ctx, query, ts)
|
||||
return res.Data, err
|
||||
}
|
||||
updated := make(map[uint64]struct{})
|
||||
// update list of active alerts
|
||||
for _, m := range res.Data {
|
||||
ls, err := ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
curState.Err = fmt.Errorf("failed to expand labels: %w", err)
|
||||
return nil, curState.Err
|
||||
}
|
||||
h := hash(ls.processed)
|
||||
if _, ok := updated[h]; ok {
|
||||
for i, m := range res.Data {
|
||||
labels, annotations := expandedLabels[i], expandedAnnotations[i]
|
||||
alertID := hash(labels.processed)
|
||||
if _, ok := updated[alertID]; ok {
|
||||
// duplicate may be caused the removal of `__name__` label
|
||||
curState.Err = fmt.Errorf("labels %v: %w", ls.processed, errDuplicate)
|
||||
curState.Err = fmt.Errorf("labels %v: %w", labels.processed, errDuplicate)
|
||||
return nil, curState.Err
|
||||
}
|
||||
updated[h] = struct{}{}
|
||||
if a, ok := ar.alerts[h]; ok {
|
||||
updated[alertID] = struct{}{}
|
||||
if a, ok := ar.alerts[alertID]; ok {
|
||||
if a.State == notifier.StateInactive {
|
||||
// alert could be in inactive state for resolvedRetention
|
||||
// so when we again receive metrics for it - we switch it
|
||||
|
@ -432,22 +440,17 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
|||
ar.logDebugf(ts, a, "INACTIVE => PENDING")
|
||||
}
|
||||
a.Value = m.Values[0]
|
||||
// re-exec template since Value or query can be used in annotations
|
||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||
a.Annotations = annotations
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.KeepFiringSince = time.Time{}
|
||||
continue
|
||||
}
|
||||
a, err := ar.newAlert(m, ls, ts, qFn)
|
||||
if err != nil {
|
||||
curState.Err = fmt.Errorf("failed to create alert: %w", err)
|
||||
return nil, curState.Err
|
||||
}
|
||||
a.ID = h
|
||||
a := ar.newAlert(m, ts, labels.processed, annotations)
|
||||
a.ID = alertID
|
||||
a.State = notifier.StatePending
|
||||
ar.alerts[h] = a
|
||||
ar.alerts[alertID] = a
|
||||
ar.logDebugf(ts, a, "created in state PENDING")
|
||||
}
|
||||
var numActivePending int
|
||||
|
@ -497,6 +500,28 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
|||
return ar.toTimeSeries(ts.Unix()), nil
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) expandTemplates(m datasource.Metric, qFn templates.QueryFn, ts time.Time) (*labelSet, map[string]string, error) {
|
||||
ls, err := ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to expand labels: %w", err)
|
||||
}
|
||||
|
||||
tplData := notifier.AlertTplData{
|
||||
Value: m.Values[0],
|
||||
Labels: ls.origin,
|
||||
Expr: ar.Expr,
|
||||
AlertID: hash(ls.processed),
|
||||
GroupID: ar.GroupID,
|
||||
ActiveAt: ts,
|
||||
For: ar.For,
|
||||
}
|
||||
as, err := notifier.ExecTemplate(qFn, ar.Annotations, tplData)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to template annotations: %w", err)
|
||||
}
|
||||
return ls, as, nil
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) toTimeSeries(timestamp int64) []prompbmarshal.TimeSeries {
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
for _, a := range ar.alerts {
|
||||
|
@ -530,25 +555,25 @@ func hash(labels map[string]string) uint64 {
|
|||
return hash.Sum64()
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) newAlert(m datasource.Metric, ls *labelSet, start time.Time, qFn templates.QueryFn) (*notifier.Alert, error) {
|
||||
var err error
|
||||
if ls == nil {
|
||||
ls, err = ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to expand labels: %w", err)
|
||||
}
|
||||
func (ar *AlertingRule) newAlert(m datasource.Metric, start time.Time, labels, annotations map[string]string) *notifier.Alert {
|
||||
as := make(map[string]string)
|
||||
if annotations != nil {
|
||||
as = annotations
|
||||
}
|
||||
a := ¬ifier.Alert{
|
||||
GroupID: ar.GroupID,
|
||||
Name: ar.Name,
|
||||
Labels: ls.processed,
|
||||
Value: m.Values[0],
|
||||
ActiveAt: start,
|
||||
Expr: ar.Expr,
|
||||
For: ar.For,
|
||||
ls := make(map[string]string)
|
||||
if labels != nil {
|
||||
ls = labels
|
||||
}
|
||||
return ¬ifier.Alert{
|
||||
GroupID: ar.GroupID,
|
||||
Name: ar.Name,
|
||||
Expr: ar.Expr,
|
||||
For: ar.For,
|
||||
ActiveAt: start,
|
||||
Value: m.Values[0],
|
||||
Labels: ls,
|
||||
Annotations: as,
|
||||
}
|
||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||
return a, err
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -604,9 +629,6 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
|
|||
return nil
|
||||
}
|
||||
|
||||
ar.alertsMu.Lock()
|
||||
defer ar.alertsMu.Unlock()
|
||||
|
||||
if len(ar.alerts) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
@ -631,6 +653,10 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
|
|||
ar.logDebugf(ts, nil, "no response was received from restore query")
|
||||
return nil
|
||||
}
|
||||
|
||||
ar.alertsMu.Lock()
|
||||
defer ar.alertsMu.Unlock()
|
||||
|
||||
for _, series := range res.Data {
|
||||
series.DelLabel("__name__")
|
||||
labelSet := make(map[string]string, len(series.Labels))
|
||||
|
|
|
@ -724,7 +724,7 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
|||
return errGr.Err()
|
||||
}
|
||||
|
||||
// getStaledSeries checks whether there are stale series from previously sent ones.
|
||||
// getStaleSeries checks whether there are stale series from previously sent ones.
|
||||
func (e *executor) getStaleSeries(r Rule, tss []prompbmarshal.TimeSeries, timestamp time.Time) []prompbmarshal.TimeSeries {
|
||||
ruleLabels := make(map[string][]prompbmarshal.Label, len(tss))
|
||||
for _, ts := range tss {
|
||||
|
|
|
@ -217,20 +217,21 @@ func TestGroupStart(t *testing.T) {
|
|||
|
||||
const evalInterval = time.Millisecond
|
||||
g := NewGroup(groups[0], fs, evalInterval, map[string]string{"cluster": "east-1"})
|
||||
g.Concurrency = 2
|
||||
|
||||
const inst1, inst2, job = "foo", "bar", "baz"
|
||||
m1 := metricWithLabels(t, "instance", inst1, "job", job)
|
||||
m2 := metricWithLabels(t, "instance", inst2, "job", job)
|
||||
|
||||
r := g.Rules[0].(*AlertingRule)
|
||||
alert1, err := r.newAlert(m1, nil, time.Now(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("faield to create alert: %s", err)
|
||||
}
|
||||
alert1 := r.newAlert(m1, time.Now(), nil, nil)
|
||||
alert1.State = notifier.StateFiring
|
||||
// add annotations
|
||||
alert1.Annotations["summary"] = "1"
|
||||
// add external label
|
||||
alert1.Labels["cluster"] = "east-1"
|
||||
// add labels from response
|
||||
alert1.Labels["job"] = job
|
||||
alert1.Labels["instance"] = inst1
|
||||
// add rule labels
|
||||
alert1.Labels["label"] = "bar"
|
||||
alert1.Labels["host"] = inst1
|
||||
|
@ -239,13 +240,15 @@ func TestGroupStart(t *testing.T) {
|
|||
alert1.Labels[alertGroupNameLabel] = g.Name
|
||||
alert1.ID = hash(alert1.Labels)
|
||||
|
||||
alert2, err := r.newAlert(m2, nil, time.Now(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("faield to create alert: %s", err)
|
||||
}
|
||||
alert2 := r.newAlert(m2, time.Now(), nil, nil)
|
||||
alert2.State = notifier.StateFiring
|
||||
// add annotations
|
||||
alert2.Annotations["summary"] = "1"
|
||||
// add external label
|
||||
alert2.Labels["cluster"] = "east-1"
|
||||
// add labels from response
|
||||
alert2.Labels["job"] = job
|
||||
alert2.Labels["instance"] = inst2
|
||||
// add rule labels
|
||||
alert2.Labels["label"] = "bar"
|
||||
alert2.Labels["host"] = inst2
|
||||
|
@ -262,8 +265,25 @@ func TestGroupStart(t *testing.T) {
|
|||
close(finished)
|
||||
}()
|
||||
|
||||
// wait for multiple evals
|
||||
time.Sleep(20 * evalInterval)
|
||||
waitForIterations := func(n int, interval time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
var cur uint64
|
||||
prev := g.metrics.iterationTotal.Get()
|
||||
for i := 0; ; i++ {
|
||||
if i > 40 {
|
||||
t.Fatalf("group wasn't able to perform %d evaluations during %d eval intervals", n, i)
|
||||
}
|
||||
cur = g.metrics.iterationTotal.Get()
|
||||
if int(cur-prev) >= n {
|
||||
return
|
||||
}
|
||||
time.Sleep(interval)
|
||||
}
|
||||
}
|
||||
|
||||
// wait for multiple evaluation iterations
|
||||
waitForIterations(4, evalInterval)
|
||||
|
||||
gotAlerts := fn.GetAlerts()
|
||||
expectedAlerts := []notifier.Alert{*alert1, *alert2}
|
||||
|
@ -280,8 +300,8 @@ func TestGroupStart(t *testing.T) {
|
|||
// and set only one datapoint for response
|
||||
fs.Add(m1)
|
||||
|
||||
// wait for multiple evals
|
||||
time.Sleep(20 * evalInterval)
|
||||
// wait for multiple evaluation iterations
|
||||
waitForIterations(4, evalInterval)
|
||||
|
||||
gotAlerts = fn.GetAlerts()
|
||||
alert2.State = notifier.StateInactive
|
||||
|
|
|
@ -32,7 +32,7 @@ type Rule interface {
|
|||
close()
|
||||
}
|
||||
|
||||
var errDuplicate = errors.New("result contains metrics with the same labelset after applying rule labels. See https://docs.victoriametrics.com/vmalert.html#series-with-the-same-labelset for details")
|
||||
var errDuplicate = errors.New("result contains metrics with the same labelset after applying rule labels. See https://docs.victoriametrics.com/vmalert/#series-with-the-same-labelset for details")
|
||||
|
||||
type ruleState struct {
|
||||
sync.RWMutex
|
||||
|
|
|
@ -13,6 +13,20 @@ function collapseAll() {
|
|||
$('.collapse').removeClass('show');
|
||||
}
|
||||
|
||||
function showByID(id) {
|
||||
if (!id) {
|
||||
return
|
||||
}
|
||||
let parent = $("#" + id).parent();
|
||||
if (!parent) {
|
||||
return
|
||||
}
|
||||
let target = $("#" + parent.attr("data-bs-target"));
|
||||
if (target.length > 0) {
|
||||
target.addClass('show');
|
||||
}
|
||||
}
|
||||
|
||||
function toggleByID(id) {
|
||||
if (id) {
|
||||
let el = $("#" + id);
|
||||
|
@ -61,7 +75,7 @@ function search() {
|
|||
function setParamURL(key, value) {
|
||||
let url = new URL(location.href)
|
||||
url.searchParams.set(key, value);
|
||||
window.history.replaceState(null, null, `?${url.searchParams.toString()}`);
|
||||
window.history.replaceState(null, null, `?${url.searchParams.toString()}${url.hash}`);
|
||||
}
|
||||
|
||||
function getParamURL(key) {
|
||||
|
@ -141,7 +155,7 @@ $(document).ready(function () {
|
|||
search()
|
||||
|
||||
let hash = window.location.hash.substr(1);
|
||||
toggleByID(hash);
|
||||
showByID(hash);
|
||||
});
|
||||
|
||||
$(document).ready(function () {
|
||||
|
|
|
@ -39,7 +39,7 @@ var (
|
|||
{Name: "Groups", Url: "groups"},
|
||||
{Name: "Alerts", Url: "alerts"},
|
||||
{Name: "Notifiers", Url: "notifiers"},
|
||||
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert.html"},
|
||||
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert/"},
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
See vmauth docs [here](https://docs.victoriametrics.com/vmauth.html).
|
||||
See vmauth docs [here](https://docs.victoriametrics.com/vmauth/).
|
||||
|
||||
vmauth docs can be edited at [docs/vmauth.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmauth.md).
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -27,20 +26,21 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
)
|
||||
|
||||
var (
|
||||
authConfigPath = flag.String("auth.config", "", "Path to auth config. It can point either to local file or to http url. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config")
|
||||
"See https://docs.victoriametrics.com/vmauth/ for details on the format of this auth config")
|
||||
configCheckInterval = flag.Duration("configCheckInterval", 0, "interval for config file re-read. "+
|
||||
"Zero value disables config re-reading. By default, refreshing is disabled, send SIGHUP for config refresh.")
|
||||
defaultRetryStatusCodes = flagutil.NewArrayInt("retryStatusCodes", 0, "Comma-separated list of default HTTP response status codes when vmauth re-tries the request on other backends. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html#load-balancing for details")
|
||||
"See https://docs.victoriametrics.com/vmauth/#load-balancing for details")
|
||||
defaultLoadBalancingPolicy = flag.String("loadBalancingPolicy", "least_loaded", "The default load balancing policy to use for backend urls specified inside url_prefix section. "+
|
||||
"Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/vmauth.html#load-balancing")
|
||||
"Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/vmauth/#load-balancing")
|
||||
discoverBackendIPsGlobal = flag.Bool("discoverBackendIPs", false, "Whether to discover backend IPs via periodic DNS queries to hostnames specified in url_prefix. "+
|
||||
"This may be useful when url_prefix points to a hostname with dynamically scaled instances behind it. See https://docs.victoriametrics.com/vmauth.html#discovering-backend-ips")
|
||||
"This may be useful when url_prefix points to a hostname with dynamically scaled instances behind it. See https://docs.victoriametrics.com/vmauth/#discovering-backend-ips")
|
||||
discoverBackendIPsInterval = flag.Duration("discoverBackendIPsInterval", 10*time.Second, "The interval for re-discovering backend IPs if -discoverBackendIPs command-line flag is set. "+
|
||||
"Too low value may lead to DNS errors")
|
||||
httpAuthHeader = flagutil.NewArrayString("httpAuthHeader", "HTTP request header to use for obtaining authorization tokens. By default auth tokens are read from Authorization request header")
|
||||
|
@ -73,15 +73,18 @@ type UserInfo struct {
|
|||
RetryStatusCodes []int `yaml:"retry_status_codes,omitempty"`
|
||||
LoadBalancingPolicy string `yaml:"load_balancing_policy,omitempty"`
|
||||
DropSrcPathPrefixParts *int `yaml:"drop_src_path_prefix_parts,omitempty"`
|
||||
TLSInsecureSkipVerify *bool `yaml:"tls_insecure_skip_verify,omitempty"`
|
||||
TLSCAFile string `yaml:"tls_ca_file,omitempty"`
|
||||
TLSCertFile string `yaml:"tls_cert_file,omitempty"`
|
||||
TLSKeyFile string `yaml:"tls_key_file,omitempty"`
|
||||
TLSServerName string `yaml:"tls_server_name,omitempty"`
|
||||
TLSInsecureSkipVerify *bool `yaml:"tls_insecure_skip_verify,omitempty"`
|
||||
|
||||
MetricLabels map[string]string `yaml:"metric_labels,omitempty"`
|
||||
|
||||
concurrencyLimitCh chan struct{}
|
||||
concurrencyLimitReached *metrics.Counter
|
||||
|
||||
httpTransport *http.Transport
|
||||
rt http.RoundTripper
|
||||
|
||||
requests *metrics.Counter
|
||||
backendErrors *metrics.Counter
|
||||
|
@ -90,8 +93,8 @@ type UserInfo struct {
|
|||
|
||||
// HeadersConf represents config for request and response headers.
|
||||
type HeadersConf struct {
|
||||
RequestHeaders []Header `yaml:"headers,omitempty"`
|
||||
ResponseHeaders []Header `yaml:"response_headers,omitempty"`
|
||||
RequestHeaders []*Header `yaml:"headers,omitempty"`
|
||||
ResponseHeaders []*Header `yaml:"response_headers,omitempty"`
|
||||
}
|
||||
|
||||
func (ui *UserInfo) beginConcurrencyLimit() error {
|
||||
|
@ -155,10 +158,10 @@ type URLMap struct {
|
|||
SrcHosts []*Regex `yaml:"src_hosts,omitempty"`
|
||||
|
||||
// SrcQueryArgs is an optional list of query args, which must match request URL query args.
|
||||
SrcQueryArgs []QueryArg `yaml:"src_query_args,omitempty"`
|
||||
SrcQueryArgs []*QueryArg `yaml:"src_query_args,omitempty"`
|
||||
|
||||
// SrcHeaders is an optional list of headers, which must match request headers.
|
||||
SrcHeaders []Header `yaml:"src_headers,omitempty"`
|
||||
SrcHeaders []*Header `yaml:"src_headers,omitempty"`
|
||||
|
||||
// UrlPrefix contains backend url prefixes for the proxied request url.
|
||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||
|
@ -179,22 +182,15 @@ type URLMap struct {
|
|||
DropSrcPathPrefixParts *int `yaml:"drop_src_path_prefix_parts,omitempty"`
|
||||
}
|
||||
|
||||
// Regex represents a regex
|
||||
type Regex struct {
|
||||
re *regexp.Regexp
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
// QueryArg represents HTTP query arg
|
||||
type QueryArg struct {
|
||||
Name string
|
||||
Value string
|
||||
Value *Regex
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshals up from yaml.
|
||||
// UnmarshalYAML unmarshals qa from yaml.
|
||||
func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error {
|
||||
var s string
|
||||
if err := f(&s); err != nil {
|
||||
|
@ -203,14 +199,27 @@ func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error {
|
|||
qa.sOriginal = s
|
||||
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n >= 0 {
|
||||
qa.Name = s[:n]
|
||||
qa.Value = s[n+1:]
|
||||
if n < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
qa.Name = s[:n]
|
||||
expr := s[n+1:]
|
||||
if !strings.HasPrefix(expr, "~") {
|
||||
expr = regexp.QuoteMeta(expr)
|
||||
} else {
|
||||
expr = expr[1:]
|
||||
}
|
||||
|
||||
var re Regex
|
||||
if err := yaml.Unmarshal([]byte(expr), &re); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal regex for %q query arg: %w", qa.Name, err)
|
||||
}
|
||||
qa.Value = &re
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML marshals up to yaml.
|
||||
// MarshalYAML marshals qa to yaml.
|
||||
func (qa *QueryArg) MarshalYAML() (interface{}, error) {
|
||||
return qa.sOriginal, nil
|
||||
}
|
||||
|
@ -293,7 +302,7 @@ func (up *URLPrefix) getBackendsCount() int {
|
|||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func (up *URLPrefix) getBackendURL() *backendURL {
|
||||
up.discoverBackendIPsIfNeeded()
|
||||
up.discoverBackendAddrsIfNeeded()
|
||||
|
||||
pbus := up.bus.Load()
|
||||
bus := *pbus
|
||||
|
@ -303,7 +312,7 @@ func (up *URLPrefix) getBackendURL() *backendURL {
|
|||
return getLeastLoadedBackendURL(bus, &up.n)
|
||||
}
|
||||
|
||||
func (up *URLPrefix) discoverBackendIPsIfNeeded() {
|
||||
func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
if !up.discoverBackendIPs {
|
||||
// The discovery is disabled.
|
||||
return
|
||||
|
@ -328,27 +337,42 @@ func (up *URLPrefix) discoverBackendIPsIfNeeded() {
|
|||
|
||||
// Discover ips for all the backendURLs
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(intervalSec))
|
||||
hostToIPs := make(map[string][]string)
|
||||
hostToAddrs := make(map[string][]string)
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
if hostToIPs[host] != nil {
|
||||
if hostToAddrs[host] != nil {
|
||||
// ips for the given host have been already discovered
|
||||
continue
|
||||
}
|
||||
addrs, err := resolver.LookupIPAddr(ctx, host)
|
||||
var ips []string
|
||||
if err != nil {
|
||||
logger.Warnf("cannot discover backend IPs for %s: %s; use it literally", bu, err)
|
||||
ips = []string{host}
|
||||
} else {
|
||||
ips = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
ips[i] = addr.String()
|
||||
var resolvedAddrs []string
|
||||
if strings.HasPrefix(host, "srv+") {
|
||||
// The host has the format 'srv+realhost'. Strip 'srv+' prefix before performing the lookup.
|
||||
host = strings.TrimPrefix(host, "srv+")
|
||||
_, addrs, err := netutil.Resolver.LookupSRV(ctx, "", "", host)
|
||||
if err != nil {
|
||||
logger.Warnf("cannot discover backend SRV records for %s: %s; use it literally", bu, err)
|
||||
resolvedAddrs = []string{host}
|
||||
} else {
|
||||
resolvedAddrs := make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
resolvedAddrs[i] = fmt.Sprintf("%s:%d", addr.Target, addr.Port)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
addrs, err := netutil.Resolver.LookupIPAddr(ctx, host)
|
||||
if err != nil {
|
||||
logger.Warnf("cannot discover backend IPs for %s: %s; use it literally", bu, err)
|
||||
resolvedAddrs = []string{host}
|
||||
} else {
|
||||
resolvedAddrs = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
resolvedAddrs[i] = addr.String()
|
||||
}
|
||||
}
|
||||
// sort ips, so they could be compared below in areEqualBackendURLs()
|
||||
sort.Strings(ips)
|
||||
}
|
||||
hostToIPs[host] = ips
|
||||
// sort resolvedAddrs, so they could be compared below in areEqualBackendURLs()
|
||||
sort.Strings(resolvedAddrs)
|
||||
hostToAddrs[host] = resolvedAddrs
|
||||
}
|
||||
cancel()
|
||||
|
||||
|
@ -357,10 +381,14 @@ func (up *URLPrefix) discoverBackendIPsIfNeeded() {
|
|||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
port := bu.Port()
|
||||
for _, ip := range hostToIPs[host] {
|
||||
for _, addr := range hostToAddrs[host] {
|
||||
buCopy := *bu
|
||||
buCopy.Host = ip
|
||||
buCopy.Host = addr
|
||||
if port != "" {
|
||||
if n := strings.IndexByte(buCopy.Host, ':'); n >= 0 {
|
||||
// Drop the discovered port and substitute it the the port specified in bu.
|
||||
buCopy.Host = buCopy.Host[:n]
|
||||
}
|
||||
buCopy.Host += ":" + port
|
||||
}
|
||||
busNew = append(busNew, &backendURL{
|
||||
|
@ -391,11 +419,6 @@ func areEqualBackendURLs(a, b []*backendURL) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
var resolver = &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: true,
|
||||
}
|
||||
|
||||
// getFirstAvailableBackendURL returns the first available backendURL, which isn't broken.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
|
@ -508,6 +531,13 @@ func (up *URLPrefix) MarshalYAML() (interface{}, error) {
|
|||
return up.vOriginal, nil
|
||||
}
|
||||
|
||||
// Regex represents a regex
|
||||
type Regex struct {
|
||||
re *regexp.Regexp
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
func (r *Regex) match(s string) bool {
|
||||
prefix, ok := r.re.LiteralPrefix()
|
||||
if ok {
|
||||
|
@ -716,11 +746,11 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
|||
return float64(len(ui.concurrencyLimitCh))
|
||||
})
|
||||
|
||||
tr, err := getTransport(ui.TLSInsecureSkipVerify, ui.TLSCAFile)
|
||||
rt, err := newRoundTripper(ui.TLSCAFile, ui.TLSCertFile, ui.TLSKeyFile, ui.TLSServerName, ui.TLSInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize HTTP transport: %w", err)
|
||||
return nil, fmt.Errorf("cannot initialize HTTP RoundTripper: %w", err)
|
||||
}
|
||||
ui.httpTransport = tr
|
||||
ui.rt = rt
|
||||
}
|
||||
return ac, nil
|
||||
}
|
||||
|
@ -764,11 +794,11 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
|||
return float64(len(ui.concurrencyLimitCh))
|
||||
})
|
||||
|
||||
tr, err := getTransport(ui.TLSInsecureSkipVerify, ui.TLSCAFile)
|
||||
rt, err := newRoundTripper(ui.TLSCAFile, ui.TLSCertFile, ui.TLSKeyFile, ui.TLSServerName, ui.TLSInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize HTTP transport: %w", err)
|
||||
return nil, fmt.Errorf("cannot initialize HTTP RoundTripper: %w", err)
|
||||
}
|
||||
ui.httpTransport = tr
|
||||
ui.rt = rt
|
||||
|
||||
for _, at := range ats {
|
||||
byAuthToken[at] = ui
|
||||
|
|
|
@ -4,10 +4,11 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
func TestParseAuthConfigFailure(t *testing.T) {
|
||||
|
@ -315,43 +316,54 @@ users:
|
|||
f(`
|
||||
users:
|
||||
- auth_token: foo
|
||||
url_prefix: http://aaa:343/bbb
|
||||
url_prefix: https://aaa:343/bbb
|
||||
max_concurrent_requests: 5
|
||||
tls_insecure_skip_verify: true
|
||||
tls_server_name: "foo.bar"
|
||||
tls_ca_file: "foo/bar"
|
||||
tls_cert_file: "foo/baz"
|
||||
tls_key_file: "foo/foo"
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthToken("foo"): {
|
||||
AuthToken: "foo",
|
||||
URLPrefix: mustParseURL("http://aaa:343/bbb"),
|
||||
URLPrefix: mustParseURL("https://aaa:343/bbb"),
|
||||
MaxConcurrentRequests: 5,
|
||||
TLSInsecureSkipVerify: &insecureSkipVerifyTrue,
|
||||
TLSServerName: "foo.bar",
|
||||
TLSCAFile: "foo/bar",
|
||||
TLSCertFile: "foo/baz",
|
||||
TLSKeyFile: "foo/foo",
|
||||
},
|
||||
})
|
||||
|
||||
// Multiple url_prefix entries
|
||||
insecureSkipVerifyFalse := false
|
||||
discoverBackendIPsTrue := true
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_prefix:
|
||||
- http://node1:343/bbb
|
||||
- http://node2:343/bbb
|
||||
- http://srv+node2:343/bbb
|
||||
tls_insecure_skip_verify: false
|
||||
retry_status_codes: [500, 501]
|
||||
load_balancing_policy: first_available
|
||||
drop_src_path_prefix_parts: 1
|
||||
discover_backend_ips: true
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthBasicToken("foo", "bar"): {
|
||||
Username: "foo",
|
||||
Password: "bar",
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://node1:343/bbb",
|
||||
"http://node2:343/bbb",
|
||||
"http://srv+node2:343/bbb",
|
||||
}),
|
||||
TLSInsecureSkipVerify: &insecureSkipVerifyFalse,
|
||||
RetryStatusCodes: []int{500, 501},
|
||||
LoadBalancingPolicy: "first_available",
|
||||
DropSrcPathPrefixParts: intp(1),
|
||||
DiscoverBackendIPs: &discoverBackendIPsTrue,
|
||||
},
|
||||
})
|
||||
|
||||
|
@ -384,32 +396,21 @@ users:
|
|||
{
|
||||
SrcHosts: getRegexs([]string{"foo\\.bar", "baz:1234"}),
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
SrcQueryArgs: []QueryArg{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
SrcQueryArgs: []*QueryArg{
|
||||
mustNewQueryArg("foo=b.+ar"),
|
||||
mustNewQueryArg("baz=~.*x=y.+"),
|
||||
},
|
||||
SrcHeaders: []Header{
|
||||
{
|
||||
Name: "TenantID",
|
||||
Value: "345",
|
||||
},
|
||||
SrcHeaders: []*Header{
|
||||
mustNewHeader("'TenantID: 345'"),
|
||||
},
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'foo: bar'"),
|
||||
mustNewHeader("'xxx: y'"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -423,7 +424,7 @@ users:
|
|||
url_prefix: http://vmselect/select/0/prometheus
|
||||
- src_paths: ["/api/v1/write"]
|
||||
src_hosts: ["foo\\.bar", "baz:1234"]
|
||||
src_query_args: ['foo=bar']
|
||||
src_query_args: ['foo=b.+ar', 'baz=~.*x=y.+']
|
||||
src_headers: ['TenantID: 345']
|
||||
url_prefix: ["http://vminsert1/insert/0/prometheus","http://vminsert2/insert/0/prometheus"]
|
||||
headers:
|
||||
|
@ -486,15 +487,9 @@ users:
|
|||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'foo: bar'"),
|
||||
mustNewHeader("'xxx: y'"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -518,15 +513,9 @@ users:
|
|||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'foo: bar'"),
|
||||
mustNewHeader("'xxx: y'"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -600,11 +589,11 @@ unauthorized_user:
|
|||
}
|
||||
|
||||
ui := m[getHTTPAuthBasicToken("foo", "bar")]
|
||||
if !isSetBool(ui.TLSInsecureSkipVerify, true) || !ui.httpTransport.TLSClientConfig.InsecureSkipVerify {
|
||||
if !isSetBool(ui.TLSInsecureSkipVerify, true) {
|
||||
t.Fatalf("unexpected TLSInsecureSkipVerify value for user foo")
|
||||
}
|
||||
|
||||
if !isSetBool(ac.UnauthorizedUser.TLSInsecureSkipVerify, false) || ac.UnauthorizedUser.httpTransport.TLSClientConfig.InsecureSkipVerify {
|
||||
if !isSetBool(ac.UnauthorizedUser.TLSInsecureSkipVerify, false) {
|
||||
t.Fatalf("unexpected TLSInsecureSkipVerify value for unauthorized_user")
|
||||
}
|
||||
}
|
||||
|
@ -699,10 +688,7 @@ func isSetBool(boolP *bool, expectedValue bool) bool {
|
|||
func getRegexs(paths []string) []*Regex {
|
||||
var sps []*Regex
|
||||
for _, path := range paths {
|
||||
sps = append(sps, &Regex{
|
||||
sOriginal: path,
|
||||
re: regexp.MustCompile("^(?:" + path + ")$"),
|
||||
})
|
||||
sps = append(sps, mustNewRegex(path))
|
||||
}
|
||||
return sps
|
||||
}
|
||||
|
@ -759,3 +745,27 @@ func mustParseURLs(us []string) *URLPrefix {
|
|||
func intp(n int) *int {
|
||||
return &n
|
||||
}
|
||||
|
||||
func mustNewRegex(s string) *Regex {
|
||||
var re Regex
|
||||
if err := yaml.Unmarshal([]byte(s), &re); err != nil {
|
||||
logger.Panicf("cannot unmarshal regex %q: %s", s, err)
|
||||
}
|
||||
return &re
|
||||
}
|
||||
|
||||
func mustNewQueryArg(s string) *QueryArg {
|
||||
var qa QueryArg
|
||||
if err := yaml.Unmarshal([]byte(s), &qa); err != nil {
|
||||
logger.Panicf("cannot unmarshal query arg filter %q: %s", s, err)
|
||||
}
|
||||
return &qa
|
||||
}
|
||||
|
||||
func mustNewHeader(s string) *Header {
|
||||
var h Header
|
||||
if err := yaml.Unmarshal([]byte(s), &h); err != nil {
|
||||
logger.Panicf("cannot unmarshal header filter %q: %s", s, err)
|
||||
}
|
||||
return &h
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
@ -22,14 +20,13 @@ import (
|
|||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
)
|
||||
|
||||
|
@ -53,9 +50,15 @@ var (
|
|||
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size, which can be cached and re-tried at other backends. "+
|
||||
"Bigger values may require more memory")
|
||||
backendTLSInsecureSkipVerify = flag.Bool("backend.tlsInsecureSkipVerify", false, "Whether to skip TLS verification when connecting to backends over HTTPS. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html#backend-tls-setup")
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSCAFile = flag.String("backend.TLSCAFile", "", "Optional path to TLS root CA file, which is used for TLS verification when connecting to backends over HTTPS. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html#backend-tls-setup")
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSCertFile = flag.String("backend.TLSCertFile", "", "Optional path to TLS client certificate file, which must be sent to HTTPS backend. "+
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSKeyFile = flag.String("backend.TLSKeyFile", "", "Optional path to TLS client key file, which must be sent to HTTPS backend. "+
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSServerName = flag.String("backend.TLSServerName", "", "Optional TLS ServerName, which must be sent to HTTPS backend. "+
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -161,20 +164,12 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
|||
if err := ui.beginConcurrencyLimit(); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
<-concurrencyLimitCh
|
||||
|
||||
// Requests failed because of concurrency limit must be counted as errors,
|
||||
// since this usually means the backend cannot keep up with the current load.
|
||||
ui.backendErrors.Inc()
|
||||
return
|
||||
}
|
||||
default:
|
||||
concurrentRequestsLimitReached.Inc()
|
||||
err := fmt.Errorf("cannot serve more than -maxConcurrentRequests=%d concurrent requests", cap(concurrencyLimitCh))
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
|
||||
// Requests failed because of concurrency limit must be counted as errors,
|
||||
// since this usually means the backend cannot keep up with the current load.
|
||||
ui.backendErrors.Inc()
|
||||
return
|
||||
}
|
||||
processRequest(w, r, ui)
|
||||
|
@ -248,7 +243,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
|||
req.Host = targetURL.Host
|
||||
}
|
||||
updateHeadersByConfig(req.Header, hc.RequestHeaders)
|
||||
res, err := ui.httpTransport.RoundTrip(req)
|
||||
res, err := ui.rt.RoundTrip(req)
|
||||
rtb, rtbOK := req.Body.(*readTrackingBody)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
|
@ -331,7 +326,7 @@ func copyHeader(dst, src http.Header) {
|
|||
}
|
||||
}
|
||||
|
||||
func updateHeadersByConfig(headers http.Header, config []Header) {
|
||||
func updateHeadersByConfig(headers http.Header, config []*Header) {
|
||||
for _, h := range config {
|
||||
if h.Value == "" {
|
||||
headers.Del(h.Name)
|
||||
|
@ -400,50 +395,41 @@ var (
|
|||
missingRouteRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="missing_route"}`)
|
||||
)
|
||||
|
||||
func getTransport(insecureSkipVerifyP *bool, caFile string) (*http.Transport, error) {
|
||||
if insecureSkipVerifyP == nil {
|
||||
insecureSkipVerifyP = backendTLSInsecureSkipVerify
|
||||
func newRoundTripper(caFileOpt, certFileOpt, keyFileOpt, serverNameOpt string, insecureSkipVerifyP *bool) (http.RoundTripper, error) {
|
||||
caFile := *backendTLSCAFile
|
||||
if caFileOpt != "" {
|
||||
caFile = caFileOpt
|
||||
}
|
||||
insecureSkipVerify := *insecureSkipVerifyP
|
||||
if caFile == "" {
|
||||
caFile = *backendTLSCAFile
|
||||
certFile := *backendTLSCertFile
|
||||
if certFileOpt != "" {
|
||||
certFile = certFileOpt
|
||||
}
|
||||
keyFile := *backendTLSKeyFile
|
||||
if keyFileOpt != "" {
|
||||
keyFile = keyFileOpt
|
||||
}
|
||||
serverName := *backendTLSServerName
|
||||
if serverNameOpt != "" {
|
||||
serverName = serverNameOpt
|
||||
}
|
||||
insecureSkipVerify := *backendTLSInsecureSkipVerify
|
||||
if p := insecureSkipVerifyP; p != nil {
|
||||
insecureSkipVerify = *p
|
||||
}
|
||||
opts := &promauth.Options{
|
||||
TLSConfig: &promauth.TLSConfig{
|
||||
CAFile: caFile,
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
ServerName: serverName,
|
||||
InsecureSkipVerify: insecureSkipVerify,
|
||||
},
|
||||
}
|
||||
cfg, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize promauth.Config: %w", err)
|
||||
}
|
||||
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
bb.B = appendTransportKey(bb.B[:0], insecureSkipVerify, caFile)
|
||||
|
||||
transportMapLock.Lock()
|
||||
defer transportMapLock.Unlock()
|
||||
|
||||
tr := transportMap[string(bb.B)]
|
||||
if tr == nil {
|
||||
trLocal, err := newTransport(insecureSkipVerify, caFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transportMap[string(bb.B)] = trLocal
|
||||
tr = trLocal
|
||||
}
|
||||
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
var (
|
||||
transportMap = make(map[string]*http.Transport)
|
||||
transportMapLock sync.Mutex
|
||||
)
|
||||
|
||||
func appendTransportKey(dst []byte, insecureSkipVerify bool, caFile string) []byte {
|
||||
dst = encoding.MarshalBool(dst, insecureSkipVerify)
|
||||
dst = encoding.MarshalBytes(dst, bytesutil.ToUnsafeBytes(caFile))
|
||||
return dst
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
func newTransport(insecureSkipVerify bool, caFile string) (*http.Transport, error) {
|
||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||
tr.ResponseHeaderTimeout = *responseTimeout
|
||||
// Automatic compression must be disabled in order to fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/535
|
||||
|
@ -452,27 +438,10 @@ func newTransport(insecureSkipVerify bool, caFile string) (*http.Transport, erro
|
|||
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
|
||||
tr.MaxIdleConns = tr.MaxIdleConnsPerHost
|
||||
}
|
||||
tlsCfg := tr.TLSClientConfig
|
||||
if tlsCfg == nil {
|
||||
tlsCfg = &tls.Config{}
|
||||
tr.TLSClientConfig = tlsCfg
|
||||
}
|
||||
if insecureSkipVerify || caFile != "" {
|
||||
tlsCfg.ClientSessionCache = tls.NewLRUClientSessionCache(0)
|
||||
tlsCfg.InsecureSkipVerify = insecureSkipVerify
|
||||
if caFile != "" {
|
||||
data, err := fscore.ReadFileOrHTTP(caFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read tls_ca_file: %w", err)
|
||||
}
|
||||
rootCA := x509.NewCertPool()
|
||||
if !rootCA.AppendCertsFromPEM(data) {
|
||||
return nil, fmt.Errorf("cannot parse data read from tls_ca_file %q", caFile)
|
||||
}
|
||||
tlsCfg.RootCAs = rootCA
|
||||
}
|
||||
}
|
||||
return tr, nil
|
||||
tr.DialContext = netutil.DialMaybeSRV
|
||||
|
||||
rt := cfg.NewRoundTripper(tr)
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -496,7 +465,7 @@ func usage() {
|
|||
const s = `
|
||||
vmauth authenticates and authorizes incoming requests and proxies them to VictoriaMetrics.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmauth/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
|
|
@ -86,19 +86,25 @@ func matchAnyRegex(rs []*Regex, s string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func matchAnyQueryArg(qas []QueryArg, args url.Values) bool {
|
||||
func matchAnyQueryArg(qas []*QueryArg, args url.Values) bool {
|
||||
if len(qas) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, qa := range qas {
|
||||
if slices.Contains(args[qa.Name], qa.Value) {
|
||||
return true
|
||||
vs, ok := args[qa.Name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
if qa.Value.match(v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchAnyHeader(headers []Header, h http.Header) bool {
|
||||
func matchAnyHeader(headers []*Header, h http.Header) bool {
|
||||
if len(headers) == 0 {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -97,8 +97,10 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
bu := up.getBackendURL()
|
||||
target := mergeURLs(bu.url, u, up.dropSrcPathPrefixParts)
|
||||
bu.put()
|
||||
if target.String() != expectedTarget {
|
||||
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
|
||||
|
||||
gotTarget := target.String()
|
||||
if gotTarget != expectedTarget {
|
||||
t.Fatalf("unexpected target; \ngot:\n%q;\nwant:\n%q", gotTarget, expectedTarget)
|
||||
}
|
||||
if s := headersToString(hc.RequestHeaders); s != expectedRequestHeaders {
|
||||
t.Fatalf("unexpected request headers; got %q; want %q", s, expectedRequestHeaders)
|
||||
|
@ -123,17 +125,11 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "bb",
|
||||
Value: "aaa",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'bb: aaa'"),
|
||||
},
|
||||
ResponseHeaders: []Header{
|
||||
{
|
||||
Name: "x",
|
||||
Value: "y",
|
||||
},
|
||||
ResponseHeaders: []*Header{
|
||||
mustNewHeader("'x: y'"),
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{503, 501},
|
||||
|
@ -162,29 +158,17 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
{
|
||||
SrcHosts: getRegexs([]string{"host42"}),
|
||||
SrcPaths: getRegexs([]string{"/vmsingle/api/v1/query"}),
|
||||
SrcQueryArgs: []QueryArg{
|
||||
{
|
||||
Name: "db",
|
||||
Value: "foo",
|
||||
},
|
||||
SrcQueryArgs: []*QueryArg{
|
||||
mustNewQueryArg("db=foo"),
|
||||
},
|
||||
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "xx",
|
||||
Value: "aa",
|
||||
},
|
||||
{
|
||||
Name: "yy",
|
||||
Value: "asdf",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'xx: aa'"),
|
||||
mustNewHeader("'yy: asdf'"),
|
||||
},
|
||||
ResponseHeaders: []Header{
|
||||
{
|
||||
Name: "qwe",
|
||||
Value: "rty",
|
||||
},
|
||||
ResponseHeaders: []*Header{
|
||||
mustNewHeader("'qwe: rty'"),
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{503, 500, 501},
|
||||
|
@ -200,14 +184,12 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
},
|
||||
URLPrefix: mustParseURL("http://default-server"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{{
|
||||
Name: "bb",
|
||||
Value: "aaa",
|
||||
}},
|
||||
ResponseHeaders: []Header{{
|
||||
Name: "x",
|
||||
Value: "y",
|
||||
}},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'bb: aaa'"),
|
||||
},
|
||||
ResponseHeaders: []*Header{
|
||||
mustNewHeader("'x: y'"),
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{502},
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
|
@ -250,6 +232,30 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"),
|
||||
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "", "", nil, "least_loaded", 0)
|
||||
|
||||
// Complex routing regexp query args in `url_map`
|
||||
ui = &UserInfo{
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query"}),
|
||||
SrcQueryArgs: []*QueryArg{
|
||||
mustNewQueryArg(`query=~.*{.*env="dev".*}*.`),
|
||||
},
|
||||
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query"}),
|
||||
SrcQueryArgs: []*QueryArg{
|
||||
mustNewQueryArg(`query=~.*{.*env="prod".*}.*`),
|
||||
},
|
||||
URLPrefix: mustParseURL("http://vmselect/1/prometheus"),
|
||||
},
|
||||
},
|
||||
URLPrefix: mustParseURL("http://default-server"),
|
||||
}
|
||||
f(ui, `/api/v1/query?query=up{env="prod"}`, `http://vmselect/1/prometheus/api/v1/query?query=up%7Benv%3D%22prod%22%7D`, "", "", nil, "least_loaded", 0)
|
||||
f(ui, `/api/v1/query?query=up{foo="bar",env="dev",pod!=""}`, `http://vmselect/0/prometheus/api/v1/query?query=up%7Bfoo%3D%22bar%22%2Cenv%3D%22dev%22%2Cpod%21%3D%22%22%7D`, "", "", nil, "least_loaded", 0)
|
||||
f(ui, `/api/v1/query?query=up{foo="bar"}`, `http://default-server/api/v1/query?query=up%7Bfoo%3D%22bar%22%7D`, "", "", nil, "least_loaded", 0)
|
||||
}
|
||||
|
||||
func TestCreateTargetURLFailure(t *testing.T) {
|
||||
|
@ -265,10 +271,10 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
|||
t.Fatalf("unexpected non-empty up=%#v", up)
|
||||
}
|
||||
if hc.RequestHeaders != nil {
|
||||
t.Fatalf("unexpected non-empty request headers=%q", hc.RequestHeaders)
|
||||
t.Fatalf("unexpected non-empty request headers: %s", headersToString(hc.RequestHeaders))
|
||||
}
|
||||
if hc.ResponseHeaders != nil {
|
||||
t.Fatalf("unexpected non-empty response headers=%q", hc.ResponseHeaders)
|
||||
t.Fatalf("unexpected non-empty response headers: %s", headersToString(hc.ResponseHeaders))
|
||||
}
|
||||
}
|
||||
f(&UserInfo{}, "/foo/bar")
|
||||
|
@ -282,7 +288,7 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
|||
}, "/api/v1/write")
|
||||
}
|
||||
|
||||
func headersToString(hs []Header) string {
|
||||
func headersToString(hs []*Header) string {
|
||||
a := make([]string, len(hs))
|
||||
for i, h := range hs {
|
||||
a[i] = fmt.Sprintf("%s: %s", h.Name, h.Value)
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
See vmbackup docs [here](https://docs.victoriametrics.com/vmbackup.html).
|
||||
See vmbackup docs [here](https://docs.victoriametrics.com/vmbackup/).
|
||||
|
||||
vmbackup docs can be edited at [docs/vmbackup.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmbackup.md).
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8420", "TCP address for exporting metrics at /metrics page")
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-metrics-data", "Path to VictoriaMetrics data. Must match -storageDataPath from VictoriaMetrics or vmstorage")
|
||||
snapshotName = flag.String("snapshotName", "", "Name for the snapshot to backup. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots. There is no need in setting -snapshotName if -snapshot.createURL is set")
|
||||
snapshotName = flag.String("snapshotName", "", "Name for the snapshot to backup. See https://docs.victoriametrics.com/single-server-victoriametrics/#how-to-work-with-snapshots. There is no need in setting -snapshotName if -snapshot.createURL is set")
|
||||
snapshotCreateURL = flag.String("snapshot.createURL", "", "VictoriaMetrics create snapshot url. When this is given a snapshot will automatically be created during backup. "+
|
||||
"Example: http://victoriametrics:8428/snapshot/create . There is no need in setting -snapshotName if -snapshot.createURL is set")
|
||||
snapshotDeleteURL = flag.String("snapshot.deleteURL", "", "VictoriaMetrics delete snapshot url. Optional. Will be generated from -snapshot.createURL if not provided. "+
|
||||
|
@ -164,7 +164,7 @@ func usage() {
|
|||
vmbackup performs backups for VictoriaMetrics data from instant snapshots to gcs, s3, azblob
|
||||
or local filesystem. Backed up data can be restored with vmrestore.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmbackup.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmbackup/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
See vmbackupmanager docs [here](https://docs.victoriametrics.com/vmbackupmanager.html).
|
||||
See vmbackupmanager docs [here](https://docs.victoriametrics.com/vmbackupmanager/).
|
||||
|
||||
vmbackupmanager docs can be edited at [docs/vmbackupmanager.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmbackupmanager.md).
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
See vmctl docs [here](https://docs.victoriametrics.com/vmctl.html).
|
||||
See vmctl docs [here](https://docs.victoriametrics.com/vmctl/).
|
||||
|
||||
vmctl docs can be edited at [docs/vmctl.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmctl.md).
|
||||
|
|
|
@ -442,12 +442,12 @@ var (
|
|||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterTimeStart,
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#timestamp-formats",
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/single-server-victoriametrics/#timestamp-formats",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterTimeEnd,
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#timestamp-formats",
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/single-server-victoriametrics/#timestamp-formats",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeStepInterval,
|
||||
|
@ -469,7 +469,7 @@ var (
|
|||
Name: vmNativeSrcAddr,
|
||||
Usage: "VictoriaMetrics address to perform export from. \n" +
|
||||
" Should be the same as --httpListenAddr value for single-node version or vmselect component." +
|
||||
" If exporting from cluster version see https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format",
|
||||
" If exporting from cluster version see https://docs.victoriametrics.com/cluster-victoriametrics/#url-format",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
|
@ -518,7 +518,7 @@ var (
|
|||
Name: vmNativeDstAddr,
|
||||
Usage: "VictoriaMetrics address to perform import to. \n" +
|
||||
" Should be the same as --httpListenAddr value for single-node version or vminsert component." +
|
||||
" If importing into cluster version see https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format",
|
||||
" If importing into cluster version see https://docs.victoriametrics.com/cluster-victoriametrics/#url-format",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
|
|
|
@ -372,6 +372,7 @@ func main() {
|
|||
if err != nil {
|
||||
return cli.Exit(fmt.Errorf("cannot open exported block at path=%q err=%w", blockPath, err), 1)
|
||||
}
|
||||
defer f.Close()
|
||||
var blocksCount atomic.Uint64
|
||||
if err := stream.Parse(f, isBlockGzipped, func(_ *stream.Block) error {
|
||||
blocksCount.Add(1)
|
||||
|
|
|
@ -123,7 +123,7 @@ func NewImporter(ctx context.Context, cfg Config) (*Importer, error) {
|
|||
importPath := addr + "/api/v1/import"
|
||||
if cfg.AccountID != "" {
|
||||
// if cluster version
|
||||
// see https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format
|
||||
// see https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
|
||||
importPath = fmt.Sprintf("%s/insert/%s/prometheus/api/v1/import", addr, cfg.AccountID)
|
||||
}
|
||||
importPath, err := AddExtraLabelsToImportPath(importPath, cfg.ExtraLabels)
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
See vmgateway docs [here](https://docs.victoriametrics.com/vmgateway.html).
|
||||
See vmgateway docs [here](https://docs.victoriametrics.com/vmgateway/).
|
||||
|
||||
vmgateway docs can be edited at [docs/vmgateway.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmgateway.md).
|
||||
|
|
|
@ -2,23 +2,16 @@ package common
|
|||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
)
|
||||
|
||||
// GetInsertCtx returns InsertCtx from the pool.
|
||||
//
|
||||
// Call PutInsertCtx for returning it to the pool.
|
||||
func GetInsertCtx() *InsertCtx {
|
||||
select {
|
||||
case ctx := <-insertCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := insertCtxPool.Get(); v != nil {
|
||||
return v.(*InsertCtx)
|
||||
}
|
||||
return &InsertCtx{}
|
||||
if v := insertCtxPool.Get(); v != nil {
|
||||
return v.(*InsertCtx)
|
||||
}
|
||||
return &InsertCtx{}
|
||||
}
|
||||
|
||||
// PutInsertCtx returns ctx to the pool.
|
||||
|
@ -26,12 +19,7 @@ func GetInsertCtx() *InsertCtx {
|
|||
// ctx cannot be used after the call.
|
||||
func PutInsertCtx(ctx *InsertCtx) {
|
||||
ctx.Reset(0)
|
||||
select {
|
||||
case insertCtxPoolCh <- ctx:
|
||||
default:
|
||||
insertCtxPool.Put(ctx)
|
||||
}
|
||||
insertCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var insertCtxPool sync.Pool
|
||||
var insertCtxPoolCh = make(chan *InsertCtx, cgroup.AvailableCPUs())
|
||||
|
|
|
@ -20,20 +20,22 @@ import (
|
|||
|
||||
var (
|
||||
streamAggrConfig = flag.String("streamAggr.config", "", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation.html . "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/ . "+
|
||||
"See also -streamAggr.keepInput, -streamAggr.dropInput and -streamAggr.dedupInterval")
|
||||
streamAggrKeepInput = flag.Bool("streamAggr.keepInput", false, "Whether to keep all the input samples after the aggregation with -streamAggr.config. "+
|
||||
"By default, only aggregated samples are dropped, while the remaining samples are stored in the database. "+
|
||||
"See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
"See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop all the input samples after the aggregation with -streamAggr.config. "+
|
||||
"By default, only aggregated samples are dropped, while the remaining samples are stored in the database. "+
|
||||
"See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
"See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation with -streamAggr.config . "+
|
||||
"See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication")
|
||||
"See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation/#deduplication")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels")
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation/#dropping-unneeded-labels")
|
||||
streamAggrIgnoreFirstIntervals = flag.Int("streamAggr.ignoreFirstIntervals", 0, "Number of aggregation intervals to skip after the start. Increase this value if you observe incorrect aggregation results after restarts. It could be caused by receiving unordered delayed data from clients pushing data into the database. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/#ignore-aggregation-intervals-on-start")
|
||||
streamAggrIgnoreOldSamples = flag.Bool("streamAggr.ignoreOldSamples", false, "Whether to ignore input samples with old timestamps outside the current aggregation interval. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation.html#ignoring-old-samples")
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/#ignoring-old-samples")
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -56,9 +58,10 @@ func CheckStreamAggrConfig() error {
|
|||
}
|
||||
pushNoop := func(_ []prompbmarshal.TimeSeries) {}
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
IgnoreFirstIntervals: *streamAggrIgnoreFirstIntervals,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushNoop, opts)
|
||||
if err != nil {
|
||||
|
@ -84,9 +87,10 @@ func InitStreamAggr() {
|
|||
sighupCh := procutil.NewSighupChan()
|
||||
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
IgnoreFirstIntervals: *streamAggrIgnoreFirstIntervals,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, opts)
|
||||
if err != nil {
|
||||
|
@ -117,9 +121,10 @@ func reloadStreamAggrConfig() {
|
|||
saCfgReloads.Inc()
|
||||
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
IgnoreFirstIntervals: *streamAggrIgnoreFirstIntervals,
|
||||
}
|
||||
sasNew, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, opts)
|
||||
if err != nil {
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
|
@ -166,25 +165,15 @@ func (ctx *pushCtx) reset() {
|
|||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
|
@ -90,25 +89,15 @@ func (ctx *pushCtx) reset() {
|
|||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
|
@ -96,25 +95,15 @@ func (ctx *pushCtx) reset() {
|
|||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
See vmrestore docs [here](https://docs.victoriametrics.com/vmrestore.html).
|
||||
See vmrestore docs [here](https://docs.victoriametrics.com/vmrestore/).
|
||||
|
||||
vmrestore docs can be edited at [docs/vmrestore.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmrestore.md).
|
||||
|
|
|
@ -74,7 +74,7 @@ func usage() {
|
|||
const s = `
|
||||
vmrestore restores VictoriaMetrics data from backups made by vmbackup.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmrestore.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmrestore/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
|
||||
// Strip /prometheus and /graphite prefixes in order to provide path compatibility with cluster version
|
||||
//
|
||||
// See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format
|
||||
// See https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/prometheus/"):
|
||||
path = path[len("/prometheus"):]
|
||||
|
|
|
@ -21,13 +21,13 @@ textarea { margin: 1em }
|
|||
<form method="get">
|
||||
<div>
|
||||
<p>
|
||||
<a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query with optional WITH expressions:
|
||||
<a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a> query with optional WITH expressions:
|
||||
</p>
|
||||
<textarea name="query" style="height: 15em; width: 90%">{%s q %}</textarea><br/>
|
||||
<input type="submit" value="Expand" />
|
||||
|
||||
<p>
|
||||
<a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:
|
||||
<a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:
|
||||
</p>
|
||||
<textarea style="height: 5em; width: 90%" readonly="readonly">{%= expandWithExprs(q) %}</textarea>
|
||||
</div>
|
||||
|
@ -79,7 +79,7 @@ textarea { margin: 1em }
|
|||
{% endstripspace %}
|
||||
|
||||
{% func withExprsTutorial() %}
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a></h3>
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a></h3>
|
||||
|
||||
<p>
|
||||
Let's look at the following real query from <a href="https://grafana.com/grafana/dashboards/1860">Node Exporter Full</a> dashboard:
|
||||
|
|
|
@ -28,11 +28,11 @@ var (
|
|||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:9
|
||||
func StreamExpandWithExprsResponse(qw422016 *qt422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:9
|
||||
qw422016.N().S(`<html><head><title>Expand WITH expressions</title><style>p { font-weight: bold }textarea { margin: 1em }</style></head><body><div><form method="get"><div><p><a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query with optional WITH expressions:</p><textarea name="query" style="height: 15em; width: 90%">`)
|
||||
qw422016.N().S(`<html><head><title>Expand WITH expressions</title><style>p { font-weight: bold }textarea { margin: 1em }</style></head><body><div><form method="get"><div><p><a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a> query with optional WITH expressions:</p><textarea name="query" style="height: 15em; width: 90%">`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:26
|
||||
qw422016.E().S(q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:26
|
||||
qw422016.N().S(`</textarea><br/><input type="submit" value="Expand" /><p><a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:</p><textarea style="height: 5em; width: 90%" readonly="readonly">`)
|
||||
qw422016.N().S(`</textarea><br/><input type="submit" value="Expand" /><p><a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:</p><textarea style="height: 5em; width: 90%" readonly="readonly">`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:32
|
||||
streamexpandWithExprs(qw422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:32
|
||||
|
@ -192,7 +192,7 @@ func ExpandWithExprsJSONResponse(q string) string {
|
|||
func streamwithExprsTutorial(qw422016 *qt422016.Writer) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:81
|
||||
qw422016.N().S(`
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a></h3>
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a></h3>
|
||||
|
||||
<p>
|
||||
Let's look at the following real query from <a href="https://grafana.com/grafana/dashboards/1860">Node Exporter Full</a> dashboard:
|
||||
|
|
|
@ -1113,7 +1113,7 @@ func (cp *commonParams) IsDefaultTimeRange() bool {
|
|||
return cp.start == 0 && cp.currentTimestamp-cp.end < 1000
|
||||
}
|
||||
|
||||
// getCommonParams obtains common params from r, which are used in /api/v1/export* handlers
|
||||
// getExportParams obtains common params from r, which are used in /api/v1/export* handlers
|
||||
//
|
||||
// - timeout
|
||||
// - start
|
||||
|
|
|
@ -23,8 +23,8 @@ var binaryOpFuncs = map[string]binaryOpFunc{
|
|||
"atan2": newBinaryOpArithFunc(binaryop.Atan2),
|
||||
|
||||
// cmp ops
|
||||
"==": newBinaryOpCmpFunc(binaryop.Eq),
|
||||
"!=": newBinaryOpCmpFunc(binaryop.Neq),
|
||||
"==": binaryOpEqFunc,
|
||||
"!=": binaryOpNeqFunc,
|
||||
">": newBinaryOpCmpFunc(binaryop.Gt),
|
||||
"<": newBinaryOpCmpFunc(binaryop.Lt),
|
||||
">=": newBinaryOpCmpFunc(binaryop.Gte),
|
||||
|
@ -54,6 +54,84 @@ type binaryOpFuncArg struct {
|
|||
|
||||
type binaryOpFunc func(bfa *binaryOpFuncArg) ([]*timeseries, error)
|
||||
|
||||
func binaryOpEqFunc(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
if !isUnionFunc(bfa.be.Left) && !isUnionFunc(bfa.be.Right) {
|
||||
return binaryOpEqStdFunc(bfa)
|
||||
}
|
||||
|
||||
// Special case for `q == (1,2,3)`
|
||||
left := bfa.left
|
||||
right := bfa.right
|
||||
if isUnionFunc(bfa.be.Left) {
|
||||
left, right = right, left
|
||||
}
|
||||
if len(left) == 0 || len(right) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
for _, tsLeft := range left {
|
||||
values := tsLeft.Values
|
||||
for j, v := range values {
|
||||
if !containsValueAt(right, v, j) {
|
||||
values[j] = nan
|
||||
}
|
||||
}
|
||||
}
|
||||
// Do not remove time series containing only NaNs, since then the `(foo op bar) default N`
|
||||
// won't work as expected if `(foo op bar)` results to NaN series.
|
||||
return left, nil
|
||||
}
|
||||
|
||||
func binaryOpNeqFunc(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
if !isUnionFunc(bfa.be.Left) && !isUnionFunc(bfa.be.Right) {
|
||||
return binaryOpNeqStdFunc(bfa)
|
||||
}
|
||||
|
||||
// Special case for `q != (1,2,3)`
|
||||
left := bfa.left
|
||||
right := bfa.right
|
||||
if isUnionFunc(bfa.be.Left) {
|
||||
left, right = right, left
|
||||
}
|
||||
if len(left) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(right) == 0 {
|
||||
return left, nil
|
||||
}
|
||||
for _, tsLeft := range left {
|
||||
values := tsLeft.Values
|
||||
for j, v := range values {
|
||||
if containsValueAt(right, v, j) {
|
||||
values[j] = nan
|
||||
}
|
||||
}
|
||||
}
|
||||
// Do not remove time series containing only NaNs, since then the `(foo op bar) default N`
|
||||
// won't work as expected if `(foo op bar)` results to NaN series.
|
||||
return left, nil
|
||||
}
|
||||
|
||||
func isUnionFunc(e metricsql.Expr) bool {
|
||||
if fe, ok := e.(*metricsql.FuncExpr); ok && (fe.Name == "" || strings.ToLower(fe.Name) == "union") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func containsValueAt(tss []*timeseries, v float64, idx int) bool {
|
||||
for _, ts := range tss {
|
||||
if ts.Values[idx] == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
binaryOpEqStdFunc = newBinaryOpCmpFunc(binaryop.Eq)
|
||||
binaryOpNeqStdFunc = newBinaryOpCmpFunc(binaryop.Neq)
|
||||
)
|
||||
|
||||
func newBinaryOpCmpFunc(cf func(left, right float64) bool) binaryOpFunc {
|
||||
cfe := func(left, right float64, isBool bool) float64 {
|
||||
if !isBool {
|
||||
|
@ -327,7 +405,7 @@ func resetMetricGroupIfRequired(be *metricsql.BinaryOpExpr, ts *timeseries) {
|
|||
}
|
||||
if be.KeepMetricNames {
|
||||
// Do not reset MetricGroup if it is explicitly requested via `a op b keep_metric_names`
|
||||
// See https://docs.victoriametrics.com/MetricsQL.html#keep_metric_names
|
||||
// See https://docs.victoriametrics.com/metricsql/#keep_metric_names
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -26,6 +27,12 @@ var (
|
|||
`For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped `+
|
||||
`in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. `+
|
||||
`This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter`)
|
||||
disableImplicitConversion = flag.Bool("search.disableImplicitConversion", false, "Whether to return an error for queries that rely on implicit subquery conversions, "+
|
||||
"see https://docs.victoriametrics.com/metricsql/#subqueries for details. "+
|
||||
"See also -search.logImplicitConversion.")
|
||||
logImplicitConversion = flag.Bool("search.logImplicitConversion", false, "Whether to log queries with implicit subquery conversions, "+
|
||||
"see https://docs.victoriametrics.com/metricsql/#subqueries for details. "+
|
||||
"Such conversion can be disabled using -search.disableImplicitConversion.")
|
||||
)
|
||||
|
||||
// UserReadableError is a type of error which supposed to be returned to the user without additional context.
|
||||
|
@ -63,6 +70,16 @@ func Exec(qt *querytracer.Tracer, ec *EvalConfig, q string, isFirstPointOnly boo
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if *disableImplicitConversion || *logImplicitConversion {
|
||||
complete := isSubQueryComplete(e, false)
|
||||
if !complete && *disableImplicitConversion {
|
||||
return nil, fmt.Errorf("query contains subquery that requires implicit conversion and is rejected according to `-search.disableImplicitConversion=true` setting. See https://docs.victoriametrics.com/metricsql/#subqueries for details")
|
||||
}
|
||||
if !complete && *logImplicitConversion {
|
||||
logger.Warnf("query=%q contains subquery that requires implicit conversion, see https://docs.victoriametrics.com/metricsql/#subqueries for details", e.AppendString(nil))
|
||||
}
|
||||
}
|
||||
|
||||
qid := activeQueriesV.Add(ec, q)
|
||||
rv, err := evalExpr(qt, ec, e)
|
||||
activeQueriesV.Remove(qid)
|
||||
|
@ -404,3 +421,55 @@ func (pc *parseCache) Put(q string, pcv *parseCacheValue) {
|
|||
pc.m[q] = pcv
|
||||
pc.mu.Unlock()
|
||||
}
|
||||
|
||||
// isSubQueryComplete checks if expr contains incomplete subquery
|
||||
func isSubQueryComplete(e metricsql.Expr, isSubExpr bool) bool {
|
||||
switch exp := e.(type) {
|
||||
case *metricsql.FuncExpr:
|
||||
if isSubExpr {
|
||||
return false
|
||||
}
|
||||
fe := e.(*metricsql.FuncExpr)
|
||||
for _, arg := range exp.Args {
|
||||
if getRollupFunc(fe.Name) != nil {
|
||||
isSubExpr = true
|
||||
}
|
||||
if !isSubQueryComplete(arg, isSubExpr) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case *metricsql.RollupExpr:
|
||||
if _, ok := exp.Expr.(*metricsql.MetricExpr); ok {
|
||||
return true
|
||||
}
|
||||
// exp.Step is optional in subqueries
|
||||
if exp.Window == nil {
|
||||
return false
|
||||
}
|
||||
return isSubQueryComplete(exp.Expr, false)
|
||||
case *metricsql.AggrFuncExpr:
|
||||
if isSubExpr {
|
||||
return false
|
||||
}
|
||||
for _, arg := range exp.Args {
|
||||
if !isSubQueryComplete(arg, false) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case *metricsql.BinaryOpExpr:
|
||||
if isSubExpr {
|
||||
return false
|
||||
}
|
||||
if !isSubQueryComplete(exp.Left, false) {
|
||||
return false
|
||||
}
|
||||
if !isSubQueryComplete(exp.Right, false) {
|
||||
return false
|
||||
}
|
||||
case *metricsql.MetricExpr:
|
||||
return true
|
||||
default:
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -5203,9 +5203,24 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`sum(union-args)`, func(t *testing.T) {
|
||||
t.Run(`sum(union-scalars)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sum((1, 2, 3))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{6, 6, 6, 6, 6, 6},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`sum(union-vectors)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sum((
|
||||
alias(1, "foo"),
|
||||
alias(2, "foo"),
|
||||
alias(3, "foo"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1, 1, 1, 1, 1, 1},
|
||||
|
@ -5763,6 +5778,51 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`equal-list`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `time() == (100, 1000, 1400, 600)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, nan, 1400, nan, nan, nan},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`equal-list-reverse`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `(100, 1000, 1400, 600) == time()`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, nan, 1400, nan, nan, nan},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`not-equal-list`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `alias(time(), "foobar") != UNIon(100, 1000, 1400, 600)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, 1200, nan, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.MetricGroup = []byte("foobar")
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`not-equal-list-reverse`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `(100, 1000, 1400, 600) != time()`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, 1200, nan, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantiles_over_time(single_sample)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
|
@ -9372,3 +9432,104 @@ func testAddLabels(t *testing.T, mn *storage.MetricName, labels ...string) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSubQueryCompleteTrue(t *testing.T) {
|
||||
f := func(q string) {
|
||||
t.Helper()
|
||||
e, err := metricsql.Parse(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !isSubQueryComplete(e, false) {
|
||||
t.Fatalf("query should be complete: %s", e.AppendString(nil))
|
||||
}
|
||||
}
|
||||
|
||||
f("rate(http_total)")
|
||||
f("sum(http_total)")
|
||||
f("absent(http_total)")
|
||||
f("rate(http_total[1m])")
|
||||
f("avg_over_time(up[1m])")
|
||||
f("sum(http_total[1m])")
|
||||
f("sum(rate(http_total))")
|
||||
f("sum(sum(http_total))")
|
||||
f(`sum(sum_over_time(http_total[1m] )) by (instance)`)
|
||||
f("sum(up{cluster='a'}[1m] or up{cluster='b'}[1m])")
|
||||
f("(avg_over_time(alarm_test1[1m]) - avg_over_time(alarm_test1[1m] offset 5m)) > 0.1")
|
||||
f("http_total[1m] offset 1m")
|
||||
|
||||
// subquery
|
||||
f("rate(http_total)[5m:1m]")
|
||||
f("rate(sum(http_total)[5m:1m])")
|
||||
f("rate(rate(http_total)[5m:1m])")
|
||||
f("sum(rate(http_total[1m]))")
|
||||
f("sum(rate(sum(http_total)[5m:1m]))")
|
||||
f("rate(sum(rate(http_total))[5m:1m])")
|
||||
f("rate(sum(sum(http_total))[5m:1m])")
|
||||
f("rate(sum(rate(http_total))[5m:1m])")
|
||||
f("rate(sum(sum(http_total))[5m:1m])")
|
||||
f("avg_over_time(rate(http_total[5m])[5m:1m])")
|
||||
f("delta(avg_over_time(up[1m])[5m:1m]) > 0.1")
|
||||
f("avg_over_time(avg by (site) (metric)[2m:1m])")
|
||||
|
||||
f("sum(http_total)[5m:1m] offset 1m")
|
||||
f("round(sum(sum_over_time(http_total[1m])) by (instance)) [5m:1m] offset 1m")
|
||||
|
||||
f("rate(sum(http_total)[5m:1m]) - rate(sum(http_total)[5m:1m])")
|
||||
f("avg_over_time((rate(http_total)-rate(http_total))[5m:1m])")
|
||||
|
||||
f("sum_over_time((up{cluster='a'} or up{cluster='b'})[5m:1m])")
|
||||
f("sum_over_time((up{cluster='a'} or up{cluster='b'})[5m:1m])")
|
||||
f("sum(sum_over_time((up{cluster='a'} or up{cluster='b'})[5m:1m])) by (instance)")
|
||||
|
||||
// step (or resolution) is optional in subqueries
|
||||
f("max_over_time(rate(my_counter_total[5m])[1h:])")
|
||||
f("max_over_time(rate(my_counter_total[5m])[1h:1m])[5m:1m]")
|
||||
f("max_over_time(rate(my_counter_total[5m])[1h:])[5m:]")
|
||||
|
||||
f(`
|
||||
WITH (
|
||||
cpuSeconds = node_cpu_seconds_total{instance=~"$node:$port",job=~"$job"},
|
||||
cpuIdle = rate(cpuSeconds{mode='idle'}[5m])
|
||||
)
|
||||
max_over_time(cpuIdle[1h:])`)
|
||||
}
|
||||
|
||||
func TestIsSubQueryCompleteFalse(t *testing.T) {
|
||||
f := func(q string) {
|
||||
t.Helper()
|
||||
e, err := metricsql.Parse(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if isSubQueryComplete(e, false) {
|
||||
t.Fatalf("expect to detect incomplete subquery: %s", e.AppendString(nil))
|
||||
}
|
||||
}
|
||||
|
||||
f("rate(sum(http_total))")
|
||||
f("rate(rate(http_total))")
|
||||
f("sum(rate(sum(http_total)))")
|
||||
f("rate(sum(rate(http_total)))")
|
||||
f("rate(sum(sum(http_total)))")
|
||||
f("avg_over_time(rate(http_total[5m]))")
|
||||
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3974
|
||||
f("sum(http_total) offset 1m")
|
||||
f(`round(sum(sum_over_time(http_total[1m])) by (instance)) offset 1m`)
|
||||
|
||||
f("rate(sum(http_total)) - rate(sum(http_total))")
|
||||
f("avg_over_time(rate(http_total)-rate(http_total))")
|
||||
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3996
|
||||
f("sum_over_time(up{cluster='a'} or up{cluster='b'})")
|
||||
f("sum_over_time(up{cluster='a'}[1m] or up{cluster='b'}[1m])")
|
||||
f("sum(sum_over_time(up{cluster='a'}[1m] or up{cluster='b'}[1m])) by (instance)")
|
||||
|
||||
f(`
|
||||
WITH (
|
||||
cpuSeconds = node_cpu_seconds_total{instance=~"$node:$port",job=~"$job"},
|
||||
cpuIdle = rate(cpuSeconds{mode='idle'}[5m])
|
||||
)
|
||||
max_over_time(cpuIdle)`)
|
||||
}
|
||||
|
|
|
@ -1658,6 +1658,16 @@ func transformUnion(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
return evalNumber(tfa.ec, nan), nil
|
||||
}
|
||||
|
||||
if areAllArgsScalar(args) {
|
||||
// Special case for (v1,...,vN) where vX are scalars - return all the scalars as time series.
|
||||
// This is needed for "q == (v1,...,vN)" and "q != (v1,...,vN)" cases, where vX are numeric constants.
|
||||
rvs := make([]*timeseries, len(args))
|
||||
for i, arg := range args {
|
||||
rvs[i] = arg[0]
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
rvs := make([]*timeseries, 0, len(args[0]))
|
||||
m := make(map[string]bool, len(args[0]))
|
||||
bb := bbPool.Get()
|
||||
|
@ -1676,6 +1686,15 @@ func transformUnion(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
return rvs, nil
|
||||
}
|
||||
|
||||
func areAllArgsScalar(args [][]*timeseries) bool {
|
||||
for _, arg := range args {
|
||||
if !isScalar(arg) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func transformLabelKeep(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if len(args) < 1 {
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.a2ad4674.css",
|
||||
"main.js": "./static/js/main.f9cc1e6c.js",
|
||||
"main.css": "./static/css/main.d0b400e3.css",
|
||||
"main.js": "./static/js/main.59c17910.js",
|
||||
"static/js/685.bebe1265.chunk.js": "./static/js/685.bebe1265.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.10add6e7bdf0f1d98cf7.md",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.df7574389d8f8bbcf0c7.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.a2ad4674.css",
|
||||
"static/js/main.f9cc1e6c.js"
|
||||
"static/css/main.d0b400e3.css",
|
||||
"static/js/main.59c17910.js"
|
||||
]
|
||||
}
|
|
@ -1,3 +1,3 @@
|
|||
## Predefined dashboards
|
||||
|
||||
See [this docs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmui#predefined-dashboards)
|
||||
See [this doc](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmui#predefined-dashboards)
|
||||
|
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.f9cc1e6c.js"></script><link href="./static/css/main.a2ad4674.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.59c17910.js"></script><link href="./static/css/main.d0b400e3.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,38 +0,0 @@
|
|||
/*!
|
||||
Copyright (c) 2018 Jed Watson.
|
||||
Licensed under the MIT License (MIT), see
|
||||
http://jedwatson.github.io/classnames
|
||||
*/
|
||||
|
||||
/**
|
||||
* @remix-run/router v1.15.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router DOM v6.22.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router v6.22.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
|
@ -62,13 +62,13 @@ var (
|
|||
minFreeDiskSpaceBytes = flagutil.NewBytes("storage.minFreeDiskSpaceBytes", 10e6, "The minimum free disk space at -storageDataPath after which the storage stops accepting new data")
|
||||
|
||||
cacheSizeStorageTSID = flagutil.NewBytes("storage.cacheSizeStorageTSID", 0, "Overrides max size for storage/tsid cache. "+
|
||||
"See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning")
|
||||
"See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning")
|
||||
cacheSizeIndexDBIndexBlocks = flagutil.NewBytes("storage.cacheSizeIndexDBIndexBlocks", 0, "Overrides max size for indexdb/indexBlocks cache. "+
|
||||
"See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning")
|
||||
"See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning")
|
||||
cacheSizeIndexDBDataBlocks = flagutil.NewBytes("storage.cacheSizeIndexDBDataBlocks", 0, "Overrides max size for indexdb/dataBlocks cache. "+
|
||||
"See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning")
|
||||
"See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning")
|
||||
cacheSizeIndexDBTagFilters = flagutil.NewBytes("storage.cacheSizeIndexDBTagFilters", 0, "Overrides max size for indexdb/tagFiltersToMetricIDs cache. "+
|
||||
"See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning")
|
||||
"See https://docs.victoriametrics.com/single-server-victoriametrics/#cache-tuning")
|
||||
)
|
||||
|
||||
// CheckTimeRange returns true if the given tr is denied for querying.
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
FROM node:18-alpine3.17
|
||||
FROM node:20-alpine3.19
|
||||
|
||||
# Sets a custom location for the npm cache, preventing access errors in system directories
|
||||
ENV NPM_CONFIG_CACHE=/build/.npm
|
||||
|
||||
RUN apk update && apk upgrade
|
||||
RUN apk add --no-cache bash bash-doc bash-completion libtool autoconf automake nasm pkgconfig libpng gcc make g++ zlib-dev gawk
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk add --no-cache bash bash-doc bash-completion libtool autoconf automake nasm pkgconfig libpng gcc make g++ zlib-dev gawk && \
|
||||
mkdir -p /app
|
||||
|
||||
RUN mkdir -p /app
|
||||
WORKDIR /app
|
||||
|
|
|
@ -14,5 +14,5 @@ COPY --from=build-web-stage /build/web-windows /app/web-windows
|
|||
RUN adduser -S -D -u 1000 web && chown -R web /app
|
||||
|
||||
USER web
|
||||
EXPOSE 8080
|
||||
|
||||
ENTRYPOINT ["/app/web"]
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
## Predefined dashboards
|
||||
|
||||
See [this docs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmui#predefined-dashboards)
|
||||
See [this doc](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmui#predefined-dashboards)
|
||||
|
|
|
@ -22,7 +22,7 @@ However, there are some [intentional differences](https://medium.com/@romanhavro
|
|||
[Standalone MetricsQL package](https://godoc.org/github.com/VictoriaMetrics/metricsql) can be used for parsing MetricsQL in external apps.
|
||||
|
||||
If you are unfamiliar with PromQL, then it is suggested reading [this tutorial for beginners](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085)
|
||||
and introduction into [basic querying via MetricsQL](https://docs.victoriametrics.com/keyConcepts.html#metricsql).
|
||||
and introduction into [basic querying via MetricsQL](https://docs.victoriametrics.com/keyconcepts/#metricsql).
|
||||
|
||||
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
||||
|
||||
|
@ -70,15 +70,17 @@ The list of MetricsQL features on top of PromQL:
|
|||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query)
|
||||
and the real interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) (aka `scrape_interval`).
|
||||
For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`.
|
||||
It is roughly equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
The difference is documented in [rate() docs](#rate).
|
||||
* Numeric values can contain `_` delimiters for better readability. For example, `1_234_567_890` can be used in queries instead of `1234567890`.
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyconcepts/#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
selects series with `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
See [these docs](https://docs.victoriametrics.com/keyConcepts.html#filtering-by-multiple-or-filters) for details.
|
||||
See [these docs](https://docs.victoriametrics.com/keyconcepts/#filtering-by-multiple-or-filters) for details.
|
||||
* Support for matching against multiple numeric constants via `q == (C1, ..., CN)` and `q != (C1, ..., CN)` syntax. For example, `status_code == (300, 301, 304)`
|
||||
returns `status_code` metrics with one of `300`, `301` or `304` values.
|
||||
* Support for `group_left(*)` and `group_right(*)` for copying all the labels from time series on the `one` side
|
||||
of [many-to-one operations](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches).
|
||||
The copied label names may clash with the existing label names, so MetricsQL provides an ability to add prefix to the copied metric names
|
||||
|
@ -152,27 +154,27 @@ MetricsQL provides the following functions:
|
|||
|
||||
### Rollup functions
|
||||
|
||||
**Rollup functions** (aka range functions or window functions) calculate rollups over **raw samples**
|
||||
on the given lookbehind window for the [selected time series](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
For example, `avg_over_time(temperature[24h])` calculates the average temperature over raw samples for the last 24 hours.
|
||||
**Rollup functions** (aka range functions or window functions) calculate rollups over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window for the [selected time series](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
For example, `avg_over_time(temperature[24h])` calculates the average temperature over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the last 24 hours.
|
||||
|
||||
Additional details:
|
||||
|
||||
* If rollup functions are used for building graphs in Grafana, then the rollup is calculated independently per each point on the graph.
|
||||
For example, every point for `avg_over_time(temperature[24h])` graph shows the average temperature for the last 24 hours ending at this point.
|
||||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) returns multiple time series,
|
||||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) returns multiple time series,
|
||||
then rollups are calculated individually per each returned series.
|
||||
* If lookbehind window in square brackets is missing, then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
Otherwise, it is automatically wrapped into [default_rollup](#default_rollup). For example, `foo{bar="baz"}`
|
||||
is automatically converted to `default_rollup(foo{bar="baz"})` before performing the calculations.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) is passed to rollup function,
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) is passed to rollup function,
|
||||
then the inner arg is automatically converted to a [subquery](#subqueries).
|
||||
* All the rollup functions accept optional `keep_metric_names` modifier. If it is set, then the function keeps metric names in results.
|
||||
See [these docs](#keep_metric_names).
|
||||
|
@ -184,7 +186,7 @@ The list of supported rollup functions:
|
|||
#### absent_over_time
|
||||
|
||||
`absent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1
|
||||
if the given lookbehind window `d` doesn't contain raw samples. Otherwise, it returns an empty result.
|
||||
if the given lookbehind window `d` doesn't contain [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples). Otherwise, it returns an empty result.
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -193,9 +195,9 @@ See also [present_over_time](#present_over_time).
|
|||
#### aggr_over_time
|
||||
|
||||
`aggr_over_time(("rollup_func1", "rollup_func2", ...), series_selector[d])` is a [rollup function](#rollup-functions),
|
||||
which calculates all the listed `rollup_func*` for raw samples on the given lookbehind window `d`.
|
||||
which calculates all the listed `rollup_func*` for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`.
|
||||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
`rollup_func*` can contain any rollup function. For instance, `aggr_over_time(("min_over_time", "max_over_time", "rate"), m[d])`
|
||||
would calculate [min_over_time](#min_over_time), [max_over_time](#max_over_time) and [rate](#rate) for `m[d]`.
|
||||
|
@ -203,8 +205,8 @@ would calculate [min_over_time](#min_over_time), [max_over_time](#max_over_time)
|
|||
#### ascent_over_time
|
||||
|
||||
`ascent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates
|
||||
ascent of raw sample values on the given lookbehind window `d`. The calculations are performed individually
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
ascent of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) values on the given lookbehind window `d`. The calculations are performed individually
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for tracking height gains in GPS tracking. Metric names are stripped from the resulting rollups.
|
||||
|
||||
|
@ -215,8 +217,8 @@ See also [descent_over_time](#descent_over_time).
|
|||
#### avg_over_time
|
||||
|
||||
`avg_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average value
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -225,8 +227,8 @@ See also [median_over_time](#median_over_time).
|
|||
#### changes
|
||||
|
||||
`changes(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of times
|
||||
the raw samples changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
the [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Unlike `changes()` in Prometheus it takes into account the change from the last sample before the given lookbehind window `d`.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -240,8 +242,8 @@ See also [changes_prometheus](#changes_prometheus).
|
|||
#### changes_prometheus
|
||||
|
||||
`changes_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of times
|
||||
the raw samples changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
the [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
It doesn't take into account the change from the last sample before the given lookbehind window `d` in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -254,9 +256,9 @@ See also [changes](#changes).
|
|||
|
||||
#### count_eq_over_time
|
||||
|
||||
`count_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
`count_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are equal to `eq`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -264,9 +266,9 @@ See also [count_over_time](#count_over_time), [share_eq_over_time](#share_eq_ove
|
|||
|
||||
#### count_gt_over_time
|
||||
|
||||
`count_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
`count_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are bigger than `gt`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -274,9 +276,9 @@ See also [count_over_time](#count_over_time) and [share_gt_over_time](#share_gt_
|
|||
|
||||
#### count_le_over_time
|
||||
|
||||
`count_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
`count_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which don't exceed `le`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -284,9 +286,9 @@ See also [count_over_time](#count_over_time) and [share_le_over_time](#share_le_
|
|||
|
||||
#### count_ne_over_time
|
||||
|
||||
`count_ne_over_time(series_selector[d], ne)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
`count_ne_over_time(series_selector[d], ne)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which aren't equal to `ne`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -294,8 +296,8 @@ See also [count_over_time](#count_over_time).
|
|||
|
||||
#### count_over_time
|
||||
|
||||
`count_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`count_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -305,9 +307,9 @@ See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_
|
|||
|
||||
#### count_values_over_time
|
||||
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of raw samples
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
with the same value over the given lookbehind window and stores the counts in a time series with an additional `label`, which contains each initial value.
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -315,8 +317,8 @@ See also [count_eq_over_time](#count_eq_over_time), [count_values](#count_values
|
|||
|
||||
#### decreases_over_time
|
||||
|
||||
`decreases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw sample value decreases
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`decreases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value decreases over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -324,8 +326,8 @@ See also [increases_over_time](#increases_over_time).
|
|||
|
||||
#### default_rollup
|
||||
|
||||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
|
@ -336,7 +338,7 @@ This allows avoiding unexpected gaps on the graph when `step` is smaller than th
|
|||
|
||||
`delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
the last sample before the given lookbehind window `d` and the last sample at the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
The behaviour of `delta()` function in MetricsQL is slightly different to the behaviour of `delta()` function in Prometheus.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -351,7 +353,7 @@ See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
|||
|
||||
`delta_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
the first and the last samples at the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
The behaviour of `delta_prometheus()` is close to the behaviour of `delta()` function in Prometheus.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -363,7 +365,7 @@ See also [delta](#delta).
|
|||
#### deriv
|
||||
|
||||
`deriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivative over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The derivative is calculated using linear regression.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
@ -375,8 +377,8 @@ See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
|||
#### deriv_fast
|
||||
|
||||
`deriv_fast(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivative
|
||||
using the first and the last raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
using the first and the last [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -384,9 +386,9 @@ See also [deriv](#deriv) and [ideriv](#ideriv).
|
|||
|
||||
#### descent_over_time
|
||||
|
||||
`descent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates descent of raw sample values
|
||||
on the given lookbehind window `d`. The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`descent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates descent of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values on the given lookbehind window `d`. The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for tracking height loss in GPS tracking.
|
||||
|
||||
|
@ -396,8 +398,8 @@ See also [ascent_over_time](#ascent_over_time).
|
|||
|
||||
#### distinct_over_time
|
||||
|
||||
`distinct_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number of distinct raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`distinct_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number of unique [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -406,7 +408,7 @@ See also [count_values_over_time](#count_values_over_time).
|
|||
#### duration_over_time
|
||||
|
||||
`duration_over_time(series_selector[d], max_interval)` is a [rollup function](#rollup-functions), which returns the duration in seconds
|
||||
when time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) were present
|
||||
when time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering) were present
|
||||
over the given lookbehind window `d`. It is expected that intervals between adjacent samples per each series don't exceed the `max_interval`.
|
||||
Otherwise, such intervals are considered as gaps and aren't counted.
|
||||
|
||||
|
@ -416,26 +418,26 @@ See also [lifetime](#lifetime) and [lag](#lag).
|
|||
|
||||
#### first_over_time
|
||||
|
||||
`first_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the first raw sample value
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`first_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the first [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [last_over_time](#last_over_time) and [tfirst_over_time](#tfirst_over_time).
|
||||
|
||||
#### geomean_over_time
|
||||
|
||||
`geomean_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [geometric mean](https://en.wikipedia.org/wiki/Geometric_mean)
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### histogram_over_time
|
||||
|
||||
`histogram_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates
|
||||
[VictoriaMetrics histogram](https://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) over raw samples on the given lookbehind window `d`.
|
||||
It is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
[VictoriaMetrics histogram](https://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`. It is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The resulting histograms are useful to pass to [histogram_quantile](#histogram_quantile) for calculating quantiles
|
||||
over multiple [gauges](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
over multiple [gauges](https://docs.victoriametrics.com/keyconcepts/#gauge).
|
||||
For example, the following query calculates median temperature by country over the last 24 hours:
|
||||
|
||||
`histogram_quantile(0.5, sum(histogram_over_time(temperature[24h])) by (vmrange,country))`.
|
||||
|
@ -457,10 +459,10 @@ See also [hoeffding_bound_lower](#hoeffding_bound_lower).
|
|||
#### holt_winters
|
||||
|
||||
`holt_winters(series_selector[d], sf, tf)` is a [rollup function](#rollup-functions), which calculates Holt-Winters value
|
||||
(aka [double exponential smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing)) for raw samples
|
||||
(aka [double exponential smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing)) for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
over the given lookbehind window `d` using the given smoothing factor `sf` and the given trend factor `tf`.
|
||||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyconcepts/#gauge).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -468,8 +470,8 @@ See also [range_linear_regression](#range_linear_regression).
|
|||
|
||||
#### idelta
|
||||
|
||||
`idelta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between the last two raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`idelta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -479,9 +481,10 @@ See also [delta](#delta).
|
|||
|
||||
#### ideriv
|
||||
|
||||
`ideriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the per-second derivative based on the last two raw samples
|
||||
`ideriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the per-second derivative based
|
||||
on the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
over the given lookbehind window `d`. The derivative is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -490,8 +493,8 @@ See also [deriv](#deriv).
|
|||
#### increase
|
||||
|
||||
`increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the increase over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Unlike Prometheus, it takes into account the last sample before the given lookbehind window `d` when calculating the result.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
@ -505,8 +508,8 @@ See also [increase_pure](#increase_pure), [increase_prometheus](#increase_promet
|
|||
#### increase_prometheus
|
||||
|
||||
`increase_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the increase
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
It doesn't take into account the last sample before the given lookbehind window `d` when calculating the result in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
||||
|
@ -517,13 +520,13 @@ See also [increase_pure](#increase_pure) and [increase](#increase).
|
|||
#### increase_pure
|
||||
|
||||
`increase_pure(series_selector[d])` is a [rollup function](#rollup-functions), which works the same as [increase](#increase) except
|
||||
of the following corner case - it assumes that [counters](https://docs.victoriametrics.com/keyConcepts.html#counter) always start from 0,
|
||||
of the following corner case - it assumes that [counters](https://docs.victoriametrics.com/keyconcepts/#counter) always start from 0,
|
||||
while [increase](#increase) ignores the first value in a series if it is too big.
|
||||
|
||||
#### increases_over_time
|
||||
|
||||
`increases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw sample value increases
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`increases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value increases over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -531,16 +534,17 @@ See also [decreases_over_time](#decreases_over_time).
|
|||
|
||||
#### integrate
|
||||
|
||||
`integrate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the integral over raw samples on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`integrate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the integral over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### irate
|
||||
|
||||
`irate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the "instant" per-second increase rate over the last two raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
`irate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the "instant" per-second increase rate over
|
||||
the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -552,7 +556,7 @@ See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
|||
|
||||
`lag(series_selector[d])` is a [rollup function](#rollup-functions), which returns the duration in seconds between the last sample
|
||||
on the given lookbehind window `d` and the timestamp of the current point. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -560,8 +564,8 @@ See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
|
|||
|
||||
#### last_over_time
|
||||
|
||||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -570,7 +574,7 @@ See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_t
|
|||
#### lifetime
|
||||
|
||||
`lifetime(series_selector[d])` is a [rollup function](#rollup-functions), which returns the duration in seconds between the last and the first sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -579,14 +583,15 @@ See also [duration_over_time](#duration_over_time) and [lag](#lag).
|
|||
#### mad_over_time
|
||||
|
||||
`mad_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
|
||||
over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outlier_iqr_over_time).
|
||||
|
||||
#### max_over_time
|
||||
|
||||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -594,16 +599,16 @@ See also [tmax_over_time](#tmax_over_time).
|
|||
|
||||
#### median_over_time
|
||||
|
||||
`median_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates median value over raw samples
|
||||
`median_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates median value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [avg_over_time](#avg_over_time).
|
||||
|
||||
#### min_over_time
|
||||
|
||||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -612,15 +617,16 @@ See also [tmin_over_time](#tmin_over_time).
|
|||
#### mode_over_time
|
||||
|
||||
`mode_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [mode](https://en.wikipedia.org/wiki/Mode_(statistics))
|
||||
for raw samples on the given lookbehind window `d`. It is calculated individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). It is expected that raw sample values are discrete.
|
||||
for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`. It is calculated individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). It is expected that [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values are discrete.
|
||||
|
||||
#### outlier_iqr_over_time
|
||||
|
||||
`outlier_iqr_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last sample on the given lookbehind window `d`
|
||||
if its value is either smaller than the `q25-1.5*iqr` or bigger than `q75+1.5*iqr` where:
|
||||
- `iqr` is an [Interquartile range](https://en.wikipedia.org/wiki/Interquartile_range) over raw samples on the lookbehind window `d`
|
||||
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) over raw samples on the lookbehind window `d`.
|
||||
- `iqr` is an [Interquartile range](https://en.wikipedia.org/wiki/Interquartile_range) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the lookbehind window `d`
|
||||
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the lookbehind window `d`.
|
||||
|
||||
The `outlier_iqr_over_time()` is useful for detecting anomalies in gauge values based on the previous history of values.
|
||||
For example, `outlier_iqr_over_time(memory_usage_bytes[1h])` triggers when `memory_usage_bytes` suddenly goes outside the usual value range for the last hour.
|
||||
|
@ -630,8 +636,8 @@ See also [outliers_iqr](#outliers_iqr).
|
|||
#### predict_linear
|
||||
|
||||
`predict_linear(series_selector[d], t)` is a [rollup function](#rollup-functions), which calculates the value `t` seconds in the future using
|
||||
linear interpolation over raw samples on the given lookbehind window `d`. The predicted value is calculated individually per each time series
|
||||
returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
linear interpolation over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`.
|
||||
The predicted value is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
|
@ -639,7 +645,7 @@ See also [range_linear_regression](#range_linear_regression).
|
|||
|
||||
#### present_over_time
|
||||
|
||||
`present_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1 if there is at least a single raw sample
|
||||
`present_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1 if there is at least a single [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`. Otherwise, an empty result is returned.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
@ -648,8 +654,8 @@ This function is supported by PromQL.
|
|||
|
||||
#### quantile_over_time
|
||||
|
||||
`quantile_over_time(phi, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi`-quantile over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`quantile_over_time(phi, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi`-quantile over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The `phi` value must be in the range `[0...1]`.
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
@ -659,16 +665,16 @@ See also [quantiles_over_time](#quantiles_over_time).
|
|||
#### quantiles_over_time
|
||||
|
||||
`quantiles_over_time("phiLabel", phi1, ..., phiN, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi*`-quantiles
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The function returns individual series per each `phi*` with `{phiLabel="phi*"}` label. `phi*` values must be in the range `[0...1]`.
|
||||
|
||||
See also [quantile_over_time](#quantile_over_time).
|
||||
|
||||
#### range_over_time
|
||||
|
||||
`range_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates value range over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`range_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates value range over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
E.g. it calculates `max_over_time(series_selector[d]) - min_over_time(series_selector[d])`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
@ -676,8 +682,8 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
#### rate
|
||||
|
||||
`rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average per-second increase rate
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
|
@ -692,18 +698,18 @@ See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
|||
|
||||
#### rate_over_sum
|
||||
|
||||
`rate_over_sum(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second rate over the sum of raw samples
|
||||
`rate_over_sum(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second rate over the sum of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`. The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### resets
|
||||
|
||||
`resets(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number
|
||||
of [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) resets over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
of [counter](https://docs.victoriametrics.com/keyconcepts/#counter) resets over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -711,9 +717,9 @@ This function is supported by PromQL.
|
|||
|
||||
#### rollup
|
||||
|
||||
`rollup(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `min`, `max` and `avg` values for raw samples
|
||||
`rollup(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `min`, `max` and `avg` values for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -721,19 +727,20 @@ See also [label_match](#label_match).
|
|||
#### rollup_candlestick
|
||||
|
||||
`rollup_candlestick(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `open`, `high`, `low` and `close` values (aka OHLC)
|
||||
over raw samples on the given lookbehind window `d` and returns them in time series with `rollup="open"`, `rollup="high"`, `rollup="low"` and `rollup="close"` additional labels.
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns them in time series
|
||||
with `rollup="open"`, `rollup="high"`, `rollup="low"` and `rollup="close"` additional labels.
|
||||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). This function is useful for financial applications.
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). This function is useful for financial applications.
|
||||
|
||||
Optional 2nd argument `"open"`, `"high"` or `"low"` or `"close"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_delta
|
||||
|
||||
`rollup_delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates differences between adjacent raw samples
|
||||
`rollup_delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates differences between adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated differences
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -745,9 +752,9 @@ See also [rollup_increase](#rollup_increase).
|
|||
#### rollup_deriv
|
||||
|
||||
`rollup_deriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivatives
|
||||
for adjacent raw samples on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated per-second derivatives
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns `min`, `max` and `avg` values
|
||||
for the calculated per-second derivatives and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -756,10 +763,10 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
|
||||
#### rollup_increase
|
||||
|
||||
`rollup_increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates increases for adjacent raw samples
|
||||
`rollup_increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates increases for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated increases
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -768,7 +775,8 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
|
||||
#### rollup_rate
|
||||
|
||||
`rollup_rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second change rates for adjacent raw samples
|
||||
`rollup_rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second change rates
|
||||
for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated per-second change rates
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
|
||||
|
@ -778,16 +786,16 @@ when to use `rollup_rate()`.
|
|||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### rollup_scrape_interval
|
||||
|
||||
`rollup_scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the interval in seconds between
|
||||
adjacent raw samples on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated interval
|
||||
adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated interval
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
@ -796,8 +804,9 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
|||
|
||||
#### scrape_interval
|
||||
|
||||
`scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average interval in seconds between raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average interval in seconds
|
||||
between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -805,9 +814,10 @@ See also [rollup_scrape_interval](#rollup_scrape_interval).
|
|||
|
||||
#### share_gt_over_time
|
||||
|
||||
`share_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
`share_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
|
||||
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are bigger than `gt`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for calculating SLI and SLO. Example: `share_gt_over_time(up[24h], 0)` - returns service availability for the last 24 hours.
|
||||
|
||||
|
@ -817,9 +827,10 @@ See also [share_le_over_time](#share_le_over_time) and [count_gt_over_time](#cou
|
|||
|
||||
#### share_le_over_time
|
||||
|
||||
`share_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
`share_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
|
||||
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are smaller or equal to `le`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for calculating SLI and SLO. Example: `share_le_over_time(memory_usage_bytes[24h], 100*1024*1024)` returns
|
||||
the share of time series values for the last 24 hours when memory usage was below or equal to 100MB.
|
||||
|
@ -830,9 +841,10 @@ See also [share_gt_over_time](#share_gt_over_time) and [count_le_over_time](#cou
|
|||
|
||||
#### share_eq_over_time
|
||||
|
||||
`share_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
`share_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
|
||||
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are equal to `eq`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -841,15 +853,15 @@ See also [count_eq_over_time](#count_eq_over_time).
|
|||
#### stale_samples_over_time
|
||||
|
||||
`stale_samples_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number
|
||||
of [staleness markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) on the given lookbehind window `d`
|
||||
per each time series matching the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
of [staleness markers](https://docs.victoriametrics.com/vmagent/#prometheus-staleness-markers) on the given lookbehind window `d`
|
||||
per each time series matching the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### stddev_over_time
|
||||
|
||||
`stddev_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard deviation over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`stddev_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard deviation over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -859,8 +871,8 @@ See also [stdvar_over_time](#stdvar_over_time).
|
|||
|
||||
#### stdvar_over_time
|
||||
|
||||
`stdvar_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard variance over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`stdvar_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard variance over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -870,8 +882,8 @@ See also [stddev_over_time](#stddev_over_time).
|
|||
|
||||
#### sum_eq_over_time
|
||||
|
||||
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values equal to `eq`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values equal to `eq` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -879,8 +891,8 @@ See also [sum_over_time](#sum_over_time) and [count_eq_over_time](#count_eq_over
|
|||
|
||||
#### sum_gt_over_time
|
||||
|
||||
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values bigger than `gt`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values bigger than `gt` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -888,8 +900,8 @@ See also [sum_over_time](#sum_over_time) and [count_gt_over_time](#count_gt_over
|
|||
|
||||
#### sum_le_over_time
|
||||
|
||||
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values smaller or equal to `le`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values smaller or equal to `le` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -897,8 +909,8 @@ See also [sum_over_time](#sum_over_time) and [count_le_over_time](#count_le_over
|
|||
|
||||
#### sum_over_time
|
||||
|
||||
`sum_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -906,15 +918,16 @@ This function is supported by PromQL.
|
|||
|
||||
#### sum2_over_time
|
||||
|
||||
`sum2_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of squares for raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum2_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of squares for [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### timestamp
|
||||
|
||||
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -924,8 +937,9 @@ See also [time](#time) and [now](#now).
|
|||
|
||||
#### timestamp_with_name
|
||||
|
||||
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are preserved in the resulting rollups.
|
||||
|
||||
|
@ -933,8 +947,9 @@ See also [timestamp](#timestamp) and [keep_metric_names](#keep_metric_names) mod
|
|||
|
||||
#### tfirst_over_time
|
||||
|
||||
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the first raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the first [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -943,7 +958,7 @@ See also [first_over_time](#first_over_time).
|
|||
#### tlast_change_over_time
|
||||
|
||||
`tlast_change_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last change
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) on the given lookbehind window `d`.
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering) on the given lookbehind window `d`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -957,9 +972,10 @@ See also [tlast_change_over_time](#tlast_change_over_time).
|
|||
|
||||
#### tmax_over_time
|
||||
|
||||
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
|
||||
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
with the maximum value on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -967,9 +983,10 @@ See also [max_over_time](#max_over_time).
|
|||
|
||||
#### tmin_over_time
|
||||
|
||||
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
|
||||
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
with the minimum value on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -978,8 +995,8 @@ See also [min_over_time](#min_over_time).
|
|||
#### zscore_over_time
|
||||
|
||||
`zscore_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns [z-score](https://en.wikipedia.org/wiki/Standard_score)
|
||||
for raw samples on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
|
@ -994,7 +1011,7 @@ returned from the rollup `delta(temperature[24h])`.
|
|||
|
||||
Additional details:
|
||||
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the transformations.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature))`.
|
||||
* All the transform functions accept optional `keep_metric_names` modifier. If it is set,
|
||||
|
@ -1230,7 +1247,7 @@ by replacing all the values bigger or equal to 30 with 40.
|
|||
#### end
|
||||
|
||||
`end()` is a [transform function](#transform-functions), which returns the unix timestamp in seconds for the last point.
|
||||
It is known as `end` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `end` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [start](#start), [time](#time) and [now](#now).
|
||||
|
||||
|
@ -1653,14 +1670,14 @@ This function is supported by PromQL.
|
|||
|
||||
`start()` is a [transform function](#transform-functions), which returns unix timestamp in seconds for the first point.
|
||||
|
||||
It is known as `start` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `start` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [end](#end), [time](#time) and [now](#now).
|
||||
|
||||
#### step
|
||||
|
||||
`step()` is a [transform function](#transform-functions), which returns the step in seconds (aka interval) between the returned points.
|
||||
It is known as `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [start](#start) and [end](#end).
|
||||
|
||||
|
@ -1717,7 +1734,7 @@ This function is supported by PromQL.
|
|||
|
||||
Additional details:
|
||||
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before performing the label transformation.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature), "foo")`.
|
||||
|
||||
|
@ -1894,7 +1911,7 @@ Additional details:
|
|||
and calculate the [count](#count) aggregate function independently per each group, while `count(up) without (instance)`
|
||||
would group [rollup results](#rollup-functions) by all the labels except `instance` before calculating [count](#count) aggregate function independently per each group.
|
||||
Multiple labels can be put in `by` and `without` modifiers.
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the aggregate.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up))`.
|
||||
* Aggregate functions accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point
|
||||
|
@ -2104,7 +2121,7 @@ See also [quantile](#quantile).
|
|||
`share(q) by (group_labels)` is [aggregate function](#aggregate-functions), which returns shares in the range `[0..1]`
|
||||
for every non-negative points returned by `q` per each timestamp, so the sum of shares per each `group_labels` equals 1.
|
||||
|
||||
This function is useful for normalizing [histogram bucket](https://docs.victoriametrics.com/keyConcepts.html#histogram) shares
|
||||
This function is useful for normalizing [histogram bucket](https://docs.victoriametrics.com/keyconcepts/#histogram) shares
|
||||
into `[0..1]` range:
|
||||
|
||||
```metricsql
|
||||
|
@ -2208,10 +2225,11 @@ See also [zscore_over_time](#zscore_over_time), [range_trim_zscore](#range_trim_
|
|||
## Subqueries
|
||||
|
||||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) form a subquery.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) form a subquery.
|
||||
Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions).
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m))[1i:1i])`, so it becomes a subquery,
|
||||
since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
This behavior can be disabled or logged via cmd-line flags `-search.disableImplicitConversion` and `-search.logImplicitConversion` since v1.101.0.
|
||||
|
||||
VictoriaMetrics performs subqueries in the following way:
|
||||
|
||||
|
@ -2219,19 +2237,19 @@ VictoriaMetrics performs subqueries in the following way:
|
|||
For example, for expression `max_over_time(rate(http_requests_total[5m])[1h:30s])` the inner function `rate(http_requests_total[5m])`
|
||||
is calculated with `step=30s`. The resulting data points are aligned by the `step`.
|
||||
* It calculates the outer rollup function over the results of the inner rollup function using the `step` value
|
||||
passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
## Implicit query conversions
|
||||
|
||||
VictoriaMetrics performs the following implicit conversions for incoming queries before starting the calculations:
|
||||
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions), then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
which aren't wrapped into [rollup functions](#rollup-functions), are automatically wrapped into [default_rollup](#default_rollup) function.
|
||||
Examples:
|
||||
* `foo` is transformed to `default_rollup(foo)`
|
||||
|
@ -2242,6 +2260,7 @@ VictoriaMetrics performs the following implicit conversions for incoming queries
|
|||
it is [transform function](#transform-functions)
|
||||
* If `step` in square brackets is missing inside [subquery](#subqueries), then `1i` step is automatically added there.
|
||||
For example, `avg_over_time(rate(http_requests_total[5m])[1h])` is automatically converted to `avg_over_time(rate(http_requests_total[5m])[1h:1i])`.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
|
||||
is passed to [rollup function](#rollup-functions), then a [subquery](#subqueries) with `1i` lookbehind window and `1i` step is automatically formed.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
||||
This behavior can be disabled or logged via cmd-line flags `-search.disableImplicitConversion` and `-search.logImplicitConversion` since v1.101.0.
|
||||
|
|
|
@ -7,10 +7,11 @@ import "./style.scss";
|
|||
interface LegendProps {
|
||||
labels: LegendItemType[];
|
||||
query: string[];
|
||||
isAnomalyView?: boolean;
|
||||
onChange: (item: LegendItemType, metaKey: boolean) => void;
|
||||
}
|
||||
|
||||
const Legend: FC<LegendProps> = ({ labels, query, onChange }) => {
|
||||
const Legend: FC<LegendProps> = ({ labels, query, isAnomalyView, onChange }) => {
|
||||
const groups = useMemo(() => {
|
||||
return Array.from(new Set(labels.map(l => l.group)));
|
||||
}, [labels]);
|
||||
|
@ -39,6 +40,7 @@ const Legend: FC<LegendProps> = ({ labels, query, onChange }) => {
|
|||
<LegendItem
|
||||
key={legendItem.label}
|
||||
legend={legendItem}
|
||||
isAnomalyView={isAnomalyView}
|
||||
onChange={onChange}
|
||||
/>
|
||||
)}
|
||||
|
|
|
@ -11,9 +11,10 @@ interface LegendItemProps {
|
|||
legend: LegendItemType;
|
||||
onChange?: (item: LegendItemType, metaKey: boolean) => void;
|
||||
isHeatmap?: boolean;
|
||||
isAnomalyView?: boolean;
|
||||
}
|
||||
|
||||
const LegendItem: FC<LegendItemProps> = ({ legend, onChange, isHeatmap }) => {
|
||||
const LegendItem: FC<LegendItemProps> = ({ legend, onChange, isHeatmap, isAnomalyView }) => {
|
||||
const copyToClipboard = useCopyToClipboard();
|
||||
|
||||
const freeFormFields = useMemo(() => {
|
||||
|
@ -47,7 +48,7 @@ const LegendItem: FC<LegendItemProps> = ({ legend, onChange, isHeatmap }) => {
|
|||
})}
|
||||
onClick={createHandlerClick(legend)}
|
||||
>
|
||||
{!isHeatmap && (
|
||||
{!isAnomalyView && !isHeatmap && (
|
||||
<div
|
||||
className="vm-legend-item__marker"
|
||||
style={{ backgroundColor: legend.color }}
|
||||
|
|
|
@ -9,8 +9,8 @@ type Props = {
|
|||
|
||||
const titles: Partial<Record<ForecastType, string>> = {
|
||||
[ForecastType.yhat]: "yhat",
|
||||
[ForecastType.yhatLower]: "yhat_lower/_upper",
|
||||
[ForecastType.yhatUpper]: "yhat_lower/_upper",
|
||||
[ForecastType.yhatLower]: "yhat_upper - yhat_lower",
|
||||
[ForecastType.yhatUpper]: "yhat_upper - yhat_lower",
|
||||
[ForecastType.anomaly]: "anomalies",
|
||||
[ForecastType.training]: "training data",
|
||||
[ForecastType.actual]: "y"
|
||||
|
@ -42,9 +42,6 @@ const LegendAnomaly: FC<Props> = ({ series }) => {
|
|||
}));
|
||||
}, [series]);
|
||||
|
||||
const container = document.getElementById("legendAnomaly");
|
||||
if (!container) return null;
|
||||
|
||||
return <>
|
||||
<div className="vm-legend-anomaly">
|
||||
{/* TODO: remove .filter() after the correct training data has been added */}
|
||||
|
|
|
@ -40,7 +40,7 @@ export interface LineChartProps {
|
|||
setPeriod: ({ from, to }: { from: Date, to: Date }) => void;
|
||||
layoutSize: ElementSize;
|
||||
height?: number;
|
||||
anomalyView?: boolean;
|
||||
isAnomalyView?: boolean;
|
||||
spanGaps?: boolean;
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ const LineChart: FC<LineChartProps> = ({
|
|||
setPeriod,
|
||||
layoutSize,
|
||||
height,
|
||||
anomalyView,
|
||||
isAnomalyView,
|
||||
spanGaps = false
|
||||
}) => {
|
||||
const { isDarkTheme } = useAppState();
|
||||
|
@ -73,7 +73,7 @@ const LineChart: FC<LineChartProps> = ({
|
|||
seriesFocus,
|
||||
setCursor,
|
||||
resetTooltips
|
||||
} = useLineTooltip({ u: uPlotInst, metrics, series, unit, anomalyView });
|
||||
} = useLineTooltip({ u: uPlotInst, metrics, series, unit, isAnomalyView });
|
||||
|
||||
const options: uPlotOptions = {
|
||||
...getDefaultOptions({ width: layoutSize.width, height }),
|
||||
|
|
|
@ -12,8 +12,11 @@ import useBoolean from "../../../hooks/useBoolean";
|
|||
import useEventListener from "../../../hooks/useEventListener";
|
||||
import Tooltip from "../../Main/Tooltip/Tooltip";
|
||||
import { AUTOCOMPLETE_QUICK_KEY } from "../../Main/ShortcutKeys/constants/keyList";
|
||||
import { QueryConfiguratorProps } from "../../../pages/CustomPanel/QueryConfigurator/QueryConfigurator";
|
||||
|
||||
const AdditionalSettingsControls: FC<{isMobile?: boolean}> = ({ isMobile }) => {
|
||||
type Props = Pick<QueryConfiguratorProps, "hideButtons">;
|
||||
|
||||
const AdditionalSettingsControls: FC<Props & {isMobile?: boolean}> = ({ isMobile, hideButtons }) => {
|
||||
const { autocomplete } = useQueryState();
|
||||
const queryDispatch = useQueryDispatch();
|
||||
|
||||
|
@ -54,31 +57,35 @@ const AdditionalSettingsControls: FC<{isMobile?: boolean}> = ({ isMobile }) => {
|
|||
"vm-additional-settings_mobile": isMobile
|
||||
})}
|
||||
>
|
||||
<Tooltip title={<>Quick tip: {AUTOCOMPLETE_QUICK_KEY}</>}>
|
||||
<Switch
|
||||
label={"Autocomplete"}
|
||||
value={autocomplete}
|
||||
onChange={onChangeAutocomplete}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
</Tooltip>
|
||||
{!hideButtons?.autocomplete && (
|
||||
<Tooltip title={<>Quick tip: {AUTOCOMPLETE_QUICK_KEY}</>}>
|
||||
<Switch
|
||||
label={"Autocomplete"}
|
||||
value={autocomplete}
|
||||
onChange={onChangeAutocomplete}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
</Tooltip>
|
||||
)}
|
||||
<Switch
|
||||
label={"Disable cache"}
|
||||
value={nocache}
|
||||
onChange={onChangeCache}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
<Switch
|
||||
label={"Trace query"}
|
||||
value={isTracingEnabled}
|
||||
onChange={onChangeQueryTracing}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
{!hideButtons?.traceQuery && (
|
||||
<Switch
|
||||
label={"Trace query"}
|
||||
value={isTracingEnabled}
|
||||
onChange={onChangeQueryTracing}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const AdditionalSettings: FC = () => {
|
||||
const AdditionalSettings: FC<Props> = (props) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
const targetRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
|
@ -106,13 +113,16 @@ const AdditionalSettings: FC = () => {
|
|||
onClose={handleCloseList}
|
||||
title={"Query settings"}
|
||||
>
|
||||
<AdditionalSettingsControls isMobile={isMobile}/>
|
||||
<AdditionalSettingsControls
|
||||
isMobile={isMobile}
|
||||
{...props}
|
||||
/>
|
||||
</Popper>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
return <AdditionalSettingsControls/>;
|
||||
return <AdditionalSettingsControls {...props}/>;
|
||||
};
|
||||
|
||||
export default AdditionalSettings;
|
||||
|
|
|
@ -9,6 +9,7 @@ import { partialWarning, seriesFetchedWarning } from "./warningText";
|
|||
import { AutocompleteOptions } from "../../Main/Autocomplete/Autocomplete";
|
||||
import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
||||
import { useQueryState } from "../../../state/query/QueryStateContext";
|
||||
import debounce from "lodash.debounce";
|
||||
|
||||
export interface QueryEditorProps {
|
||||
onChange: (query: string) => void;
|
||||
|
@ -40,9 +41,12 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const [openAutocomplete, setOpenAutocomplete] = useState(false);
|
||||
const [caretPosition, setCaretPosition] = useState([0, 0]);
|
||||
const [caretPosition, setCaretPosition] = useState<[number, number]>([0, 0]);
|
||||
const autocompleteAnchorEl = useRef<HTMLInputElement>(null);
|
||||
|
||||
const [showAutocomplete, setShowAutocomplete] = useState(autocomplete);
|
||||
const debouncedSetShowAutocomplete = useRef(debounce(setShowAutocomplete, 500)).current;
|
||||
|
||||
const warning = [
|
||||
{
|
||||
show: stats?.seriesFetched === "0" && !stats.resultLength,
|
||||
|
@ -58,8 +62,9 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
label = `${label} (${stats.executionTimeMsec || 0}ms)`;
|
||||
}
|
||||
|
||||
const handleSelect = (val: string) => {
|
||||
const handleSelect = (val: string, caretPosition: number) => {
|
||||
onChange(val);
|
||||
setCaretPosition([caretPosition, caretPosition]);
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
|
@ -100,14 +105,19 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
setOpenAutocomplete(!!val.length);
|
||||
};
|
||||
|
||||
const handleChangeCaret = (val: number[]) => {
|
||||
setCaretPosition(val);
|
||||
const handleChangeCaret = (val: [number, number]) => {
|
||||
setCaretPosition(prev => prev[0] === val[0] && prev[1] === val[1] ? prev : val);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setOpenAutocomplete(autocomplete);
|
||||
}, [autocompleteQuick]);
|
||||
|
||||
useEffect(() => {
|
||||
setShowAutocomplete(false);
|
||||
debouncedSetShowAutocomplete(true);
|
||||
}, [caretPosition]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className="vm-query-editor"
|
||||
|
@ -125,12 +135,14 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
onChangeCaret={handleChangeCaret}
|
||||
disabled={disabled}
|
||||
inputmode={"search"}
|
||||
caretPosition={caretPosition}
|
||||
/>
|
||||
{autocomplete && (
|
||||
{showAutocomplete && autocomplete && (
|
||||
<QueryEditorAutocomplete
|
||||
value={value}
|
||||
anchorEl={autocompleteAnchorEl}
|
||||
caretPosition={caretPosition}
|
||||
hasHelperText={Boolean(warning || error)}
|
||||
onSelect={handleSelect}
|
||||
onFoundOptions={handleChangeFoundOptions}
|
||||
/>
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import React, { FC, Ref, useState, useEffect, useMemo } from "preact/compat";
|
||||
import React, { FC, Ref, useState, useEffect, useMemo, useCallback } from "preact/compat";
|
||||
import Autocomplete, { AutocompleteOptions } from "../../Main/Autocomplete/Autocomplete";
|
||||
import { useFetchQueryOptions } from "../../../hooks/useFetchQueryOptions";
|
||||
import { getTextWidth } from "../../../utils/uplot";
|
||||
import { escapeRegexp, hasUnclosedQuotes } from "../../../utils/regexp";
|
||||
import useGetMetricsQL from "../../../hooks/useGetMetricsQL";
|
||||
import { QueryContextType } from "../../../types";
|
||||
|
@ -10,8 +9,9 @@ import { AUTOCOMPLETE_LIMITS } from "../../../constants/queryAutocomplete";
|
|||
interface QueryEditorAutocompleteProps {
|
||||
value: string;
|
||||
anchorEl: Ref<HTMLInputElement>;
|
||||
caretPosition: number[];
|
||||
onSelect: (val: string) => void;
|
||||
caretPosition: [number, number]; // [start, end]
|
||||
hasHelperText: boolean;
|
||||
onSelect: (val: string, caretPosition: number) => void;
|
||||
onFoundOptions: (val: AutocompleteOptions[]) => void;
|
||||
}
|
||||
|
||||
|
@ -19,16 +19,24 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
|||
value,
|
||||
anchorEl,
|
||||
caretPosition,
|
||||
hasHelperText,
|
||||
onSelect,
|
||||
onFoundOptions
|
||||
}) => {
|
||||
const [leftOffset, setLeftOffset] = useState(0);
|
||||
const [offsetPos, setOffsetPos] = useState({ top: 0, left: 0 });
|
||||
const metricsqlFunctions = useGetMetricsQL();
|
||||
|
||||
const values = useMemo(() => {
|
||||
if (caretPosition[0] !== caretPosition[1]) return { beforeCursor: value, afterCursor: "" };
|
||||
const beforeCursor = value.substring(0, caretPosition[0]);
|
||||
const afterCursor = value.substring(caretPosition[1]);
|
||||
return { beforeCursor, afterCursor };
|
||||
}, [value, caretPosition]);
|
||||
|
||||
const exprLastPart = useMemo(() => {
|
||||
const parts = value.split("}");
|
||||
const parts = values.beforeCursor.split("}");
|
||||
return parts[parts.length - 1];
|
||||
}, [value]);
|
||||
}, [values]);
|
||||
|
||||
const metric = useMemo(() => {
|
||||
const regexp = /\b[^{}(),\s]+(?={|$)/g;
|
||||
|
@ -43,7 +51,7 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
|||
}, [exprLastPart]);
|
||||
|
||||
const shouldSuppressAutoSuggestion = (value: string) => {
|
||||
const pattern = /([(),+\-*/^]|\b(?:or|and|unless|default|ifnot|if|group_left|group_right)\b)/;
|
||||
const pattern = /([{(),+\-*/^]|\b(?:or|and|unless|default|ifnot|if|group_left|group_right)\b)/;
|
||||
const parts = value.split(/\s+/);
|
||||
const partsCount = parts.length;
|
||||
const lastPart = parts[partsCount - 1];
|
||||
|
@ -55,7 +63,7 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
|||
};
|
||||
|
||||
const context = useMemo(() => {
|
||||
if (!value || value.endsWith("}") || shouldSuppressAutoSuggestion(value)) {
|
||||
if (!values.beforeCursor || values.beforeCursor.endsWith("}") || shouldSuppressAutoSuggestion(values.beforeCursor)) {
|
||||
return QueryContextType.empty;
|
||||
}
|
||||
|
||||
|
@ -63,19 +71,19 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
|||
const labelValueRegexp = new RegExp(`(${escapeRegexp(metric)})?{?.+${escapeRegexp(label)}(=|!=|=~|!~)"?([^"]*)$`, "g");
|
||||
|
||||
switch (true) {
|
||||
case labelValueRegexp.test(value):
|
||||
case labelValueRegexp.test(values.beforeCursor):
|
||||
return QueryContextType.labelValue;
|
||||
case labelRegexp.test(value):
|
||||
case labelRegexp.test(values.beforeCursor):
|
||||
return QueryContextType.label;
|
||||
default:
|
||||
return QueryContextType.metricsql;
|
||||
}
|
||||
}, [value, metric, label]);
|
||||
}, [values, metric, label]);
|
||||
|
||||
const valueByContext = useMemo(() => {
|
||||
const wordMatch = value.match(/([\w_\-.:/]+(?![},]))$/);
|
||||
const wordMatch = values.beforeCursor.match(/([\w_\-.:/]+(?![},]))$/);
|
||||
return wordMatch ? wordMatch[0] : "";
|
||||
}, [value]);
|
||||
}, [values.beforeCursor]);
|
||||
|
||||
const { metrics, labels, labelValues, loading } = useFetchQueryOptions({
|
||||
valueByContext,
|
||||
|
@ -97,8 +105,10 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
|||
}
|
||||
}, [context, metrics, labels, labelValues]);
|
||||
|
||||
const handleSelect = (insert: string) => {
|
||||
const handleSelect = useCallback((insert: string) => {
|
||||
// Find the start and end of valueByContext in the query string
|
||||
const value = values.beforeCursor;
|
||||
let valueAfterCursor = values.afterCursor;
|
||||
const startIndexOfValueByContext = value.lastIndexOf(valueByContext, caretPosition[0]);
|
||||
const endIndexOfValueByContext = startIndexOfValueByContext + valueByContext.length;
|
||||
|
||||
|
@ -110,26 +120,59 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
|||
if (context === QueryContextType.labelValue) {
|
||||
const quote = "\"";
|
||||
const needsQuote = /(?:=|!=|=~|!~)$/.test(beforeValueByContext);
|
||||
valueAfterCursor = valueAfterCursor.replace(/^[^\s"|},]*/, "");
|
||||
insert = `${needsQuote ? quote : ""}${insert}`;
|
||||
}
|
||||
|
||||
if (context === QueryContextType.label) {
|
||||
valueAfterCursor = valueAfterCursor.replace(/^[^\s=!,{}()"|+\-/*^]*/, "");
|
||||
}
|
||||
|
||||
if (context === QueryContextType.metricsql) {
|
||||
valueAfterCursor = valueAfterCursor.replace(/^[^\s[\]{}()"|+\-/*^]*/, "");
|
||||
}
|
||||
// Assemble the new value with the inserted text
|
||||
const newVal = `${beforeValueByContext}${insert}${afterValueByContext}`;
|
||||
onSelect(newVal);
|
||||
};
|
||||
const newVal = `${beforeValueByContext}${insert}${afterValueByContext}${valueAfterCursor}`;
|
||||
onSelect(newVal, beforeValueByContext.length + insert.length);
|
||||
}, [values]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!anchorEl.current) {
|
||||
setLeftOffset(0);
|
||||
setOffsetPos({ top: 0, left: 0 });
|
||||
return;
|
||||
}
|
||||
|
||||
const style = window.getComputedStyle(anchorEl.current);
|
||||
const element = anchorEl.current.querySelector("textarea") || anchorEl.current;
|
||||
const style = window.getComputedStyle(element);
|
||||
const fontSize = `${style.getPropertyValue("font-size")}`;
|
||||
const fontFamily = `${style.getPropertyValue("font-family")}`;
|
||||
const offset = getTextWidth(value, `${fontSize} ${fontFamily}`);
|
||||
setLeftOffset(offset);
|
||||
}, [anchorEl, caretPosition]);
|
||||
const lineHeight = parseInt(`${style.getPropertyValue("line-height")}`);
|
||||
|
||||
const span = document.createElement("div");
|
||||
span.style.font = `${fontSize} ${fontFamily}`;
|
||||
span.style.padding = style.getPropertyValue("padding");
|
||||
span.style.lineHeight = `${lineHeight}px`;
|
||||
span.style.width = `${element.offsetWidth}px`;
|
||||
span.style.maxWidth = `${element.offsetWidth}px`;
|
||||
span.style.whiteSpace = style.getPropertyValue("white-space");
|
||||
span.style.overflowWrap = style.getPropertyValue("overflow-wrap");
|
||||
|
||||
const marker = document.createElement("span");
|
||||
span.appendChild(document.createTextNode(values.beforeCursor));
|
||||
span.appendChild(marker);
|
||||
span.appendChild(document.createTextNode(values.afterCursor));
|
||||
document.body.appendChild(span);
|
||||
|
||||
const spanRect = span.getBoundingClientRect();
|
||||
const markerRect = marker.getBoundingClientRect();
|
||||
|
||||
const leftOffset = markerRect.left - spanRect.left;
|
||||
const topOffset = markerRect.bottom - spanRect.bottom - (hasHelperText ? lineHeight : 0);
|
||||
setOffsetPos({ top: topOffset, left: leftOffset });
|
||||
|
||||
span.remove();
|
||||
marker.remove();
|
||||
}, [anchorEl, caretPosition, hasHelperText]);
|
||||
|
||||
return (
|
||||
<>
|
||||
|
@ -140,7 +183,7 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
|||
options={options}
|
||||
anchor={anchorEl}
|
||||
minLength={0}
|
||||
offset={{ top: 0, left: leftOffset }}
|
||||
offset={offsetPos}
|
||||
onSelect={handleSelect}
|
||||
onFoundOptions={onFoundOptions}
|
||||
maxDisplayResults={{
|
||||
|
|
|
@ -2,4 +2,13 @@
|
|||
|
||||
.vm-query-editor {
|
||||
position: relative;
|
||||
|
||||
.marker-detection {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
pointer-events: none;
|
||||
z-index: -9999;
|
||||
visibility: hidden;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ const Autocomplete: FC<AutocompleteProps> = ({
|
|||
const handleKeyDown = useCallback((e: KeyboardEvent) => {
|
||||
const { key, ctrlKey, metaKey, shiftKey } = e;
|
||||
const modifiers = ctrlKey || metaKey || shiftKey;
|
||||
const hasOptions = foundOptions.length;
|
||||
const hasOptions = foundOptions.length && !hideFoundedOptions;
|
||||
|
||||
if (key === "ArrowUp" && !modifiers && hasOptions) {
|
||||
e.preventDefault();
|
||||
|
@ -148,7 +148,7 @@ const Autocomplete: FC<AutocompleteProps> = ({
|
|||
if (key === "Escape") {
|
||||
handleCloseAutocomplete();
|
||||
}
|
||||
}, [focusOption, foundOptions, handleCloseAutocomplete, onSelect, selected]);
|
||||
}, [focusOption, foundOptions, hideFoundedOptions, handleCloseAutocomplete, onSelect, selected]);
|
||||
|
||||
useEffect(() => {
|
||||
setOpenAutocomplete(value.length >= minLength);
|
||||
|
|
|
@ -72,6 +72,10 @@ const Select: FC<SelectProps> = ({
|
|||
setOpenList(true);
|
||||
};
|
||||
|
||||
const handleBlur = () => {
|
||||
list.includes(search) && onChange(search);
|
||||
};
|
||||
|
||||
const handleToggleList = (e: MouseEvent<HTMLDivElement>) => {
|
||||
if (e.target instanceof HTMLInputElement || disabled) return;
|
||||
setOpenList(prev => !prev);
|
||||
|
@ -142,6 +146,7 @@ const Select: FC<SelectProps> = ({
|
|||
placeholder={placeholder}
|
||||
onInput={handleChange}
|
||||
onFocus={handleFocus}
|
||||
onBlur={handleBlur}
|
||||
ref={inputRef}
|
||||
readOnly={isMobile || !searchable}
|
||||
/>
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import React, {
|
||||
FC,
|
||||
useEffect,
|
||||
useState,
|
||||
useRef,
|
||||
useMemo,
|
||||
FormEvent,
|
||||
|
@ -28,12 +29,13 @@ interface TextFieldProps {
|
|||
autofocus?: boolean
|
||||
helperText?: string
|
||||
inputmode?: "search" | "text" | "email" | "tel" | "url" | "none" | "numeric" | "decimal"
|
||||
caretPosition?: [number, number]
|
||||
onChange?: (value: string) => void
|
||||
onEnter?: () => void
|
||||
onKeyDown?: (e: KeyboardEvent) => void
|
||||
onFocus?: () => void
|
||||
onBlur?: () => void
|
||||
onChangeCaret?: (position: number[]) => void
|
||||
onChangeCaret?: (position: [number, number]) => void
|
||||
}
|
||||
|
||||
const TextField: FC<TextFieldProps> = ({
|
||||
|
@ -49,6 +51,7 @@ const TextField: FC<TextFieldProps> = ({
|
|||
disabled = false,
|
||||
autofocus = false,
|
||||
inputmode = "text",
|
||||
caretPosition,
|
||||
onChange,
|
||||
onEnter,
|
||||
onKeyDown,
|
||||
|
@ -62,6 +65,7 @@ const TextField: FC<TextFieldProps> = ({
|
|||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const fieldRef = useMemo(() => type === "textarea" ? textareaRef : inputRef, [type]);
|
||||
const [selectionPos, setSelectionPos] = useState<[start: number, end: number]>([0, 0]);
|
||||
|
||||
const inputClasses = classNames({
|
||||
"vm-text-field__input": true,
|
||||
|
@ -74,7 +78,7 @@ const TextField: FC<TextFieldProps> = ({
|
|||
|
||||
const updateCaretPosition = (target: HTMLInputElement | HTMLTextAreaElement) => {
|
||||
const { selectionStart, selectionEnd } = target;
|
||||
onChangeCaret && onChangeCaret([selectionStart || 0, selectionEnd || 0]);
|
||||
setSelectionPos([selectionStart || 0, selectionEnd || 0]);
|
||||
};
|
||||
|
||||
const handleMouseUp = (e: MouseEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||
|
@ -102,11 +106,6 @@ const TextField: FC<TextFieldProps> = ({
|
|||
updateCaretPosition(e.currentTarget);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (!autofocus || isMobile) return;
|
||||
fieldRef?.current?.focus && fieldRef.current.focus();
|
||||
}, [fieldRef, autofocus]);
|
||||
|
||||
const handleFocus = () => {
|
||||
onFocus && onFocus();
|
||||
};
|
||||
|
@ -115,6 +114,31 @@ const TextField: FC<TextFieldProps> = ({
|
|||
onBlur && onBlur();
|
||||
};
|
||||
|
||||
const setSelectionRange = (range: [number, number]) => {
|
||||
try {
|
||||
fieldRef.current && fieldRef.current.setSelectionRange(range[0], range[1]);
|
||||
} catch (e) {
|
||||
return e;
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (!autofocus || isMobile) return;
|
||||
fieldRef?.current?.focus && fieldRef.current.focus();
|
||||
}, [fieldRef, autofocus]);
|
||||
|
||||
useEffect(() => {
|
||||
onChangeCaret && onChangeCaret(selectionPos);
|
||||
}, [selectionPos]);
|
||||
|
||||
useEffect(() => {
|
||||
setSelectionRange(selectionPos);
|
||||
}, [value]);
|
||||
|
||||
useEffect(() => {
|
||||
caretPosition && setSelectionRange(caretPosition);
|
||||
}, [caretPosition]);
|
||||
|
||||
return <label
|
||||
className={classNames({
|
||||
"vm-text-field": true,
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
&_textarea:after {
|
||||
content: attr(data-replicated-value) " ";
|
||||
white-space: pre-wrap;
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
|||
import useElementSize from "../../../hooks/useElementSize";
|
||||
import { ChartTooltipProps } from "../../Chart/ChartTooltip/ChartTooltip";
|
||||
import LegendAnomaly from "../../Chart/Line/LegendAnomaly/LegendAnomaly";
|
||||
import { groupByMultipleKeys } from "../../../utils/array";
|
||||
|
||||
export interface GraphViewProps {
|
||||
data?: MetricResult[];
|
||||
|
@ -40,7 +41,7 @@ export interface GraphViewProps {
|
|||
fullWidth?: boolean;
|
||||
height?: number;
|
||||
isHistogram?: boolean;
|
||||
anomalyView?: boolean;
|
||||
isAnomalyView?: boolean;
|
||||
spanGaps?: boolean;
|
||||
}
|
||||
|
||||
|
@ -58,7 +59,7 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
fullWidth = true,
|
||||
height,
|
||||
isHistogram,
|
||||
anomalyView,
|
||||
isAnomalyView,
|
||||
spanGaps
|
||||
}) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
@ -74,8 +75,8 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
const [legendValue, setLegendValue] = useState<ChartTooltipProps | null>(null);
|
||||
|
||||
const getSeriesItem = useMemo(() => {
|
||||
return getSeriesItemContext(data, hideSeries, alias, anomalyView);
|
||||
}, [data, hideSeries, alias, anomalyView]);
|
||||
return getSeriesItemContext(data, hideSeries, alias, isAnomalyView);
|
||||
}, [data, hideSeries, alias, isAnomalyView]);
|
||||
|
||||
const setLimitsYaxis = (values: { [key: string]: number[] }) => {
|
||||
const limits = getLimitsYAxis(values, !isHistogram);
|
||||
|
@ -83,7 +84,7 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
};
|
||||
|
||||
const onChangeLegend = (legend: LegendItemType, metaKey: boolean) => {
|
||||
setHideSeries(getHideSeries({ hideSeries, legend, metaKey, series }));
|
||||
setHideSeries(getHideSeries({ hideSeries, legend, metaKey, series, isAnomalyView }));
|
||||
};
|
||||
|
||||
const prepareHistogramData = (data: (number | null)[][]) => {
|
||||
|
@ -108,6 +109,20 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
return [null, [xs, ys, counts]];
|
||||
};
|
||||
|
||||
const prepareAnomalyLegend = (legend: LegendItemType[]): LegendItemType[] => {
|
||||
if (!isAnomalyView) return legend;
|
||||
|
||||
// For vmanomaly: Only select the first series per group (due to API specs) and clear __name__ in freeFormFields.
|
||||
const grouped = groupByMultipleKeys(legend, ["group", "label"]);
|
||||
return grouped.map((group) => {
|
||||
const firstEl = group.values[0];
|
||||
return {
|
||||
...firstEl,
|
||||
freeFormFields: { ...firstEl.freeFormFields, __name__: "" }
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const tempTimes: number[] = [];
|
||||
const tempValues: { [key: string]: number[] } = {};
|
||||
|
@ -153,14 +168,18 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
const range = getMinMaxBuffer(getMinFromArray(resultAsNumber), getMaxFromArray(resultAsNumber));
|
||||
const rangeStep = Math.abs(range[1] - range[0]);
|
||||
|
||||
return (avg > rangeStep * 1e10) && !anomalyView ? results.map(() => avg) : results;
|
||||
return (avg > rangeStep * 1e10) && !isAnomalyView ? results.map(() => avg) : results;
|
||||
});
|
||||
timeDataSeries.unshift(timeSeries);
|
||||
setLimitsYaxis(tempValues);
|
||||
const result = isHistogram ? prepareHistogramData(timeDataSeries) : timeDataSeries;
|
||||
setDataChart(result as uPlotData);
|
||||
setSeries(tempSeries);
|
||||
setLegend(tempLegend);
|
||||
const legend = prepareAnomalyLegend(tempLegend);
|
||||
setLegend(legend);
|
||||
if (isAnomalyView) {
|
||||
setHideSeries(legend.map(s => s.label || "").slice(1));
|
||||
}
|
||||
}, [data, timezone, isHistogram]);
|
||||
|
||||
useEffect(() => {
|
||||
|
@ -172,7 +191,7 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
tempLegend.push(getLegendItem(seriesItem, d.group));
|
||||
});
|
||||
setSeries(tempSeries);
|
||||
setLegend(tempLegend);
|
||||
setLegend(prepareAnomalyLegend(tempLegend));
|
||||
}, [hideSeries]);
|
||||
|
||||
const [containerRef, containerSize] = useElementSize();
|
||||
|
@ -197,7 +216,7 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
setPeriod={setPeriod}
|
||||
layoutSize={containerSize}
|
||||
height={height}
|
||||
anomalyView={anomalyView}
|
||||
isAnomalyView={isAnomalyView}
|
||||
spanGaps={spanGaps}
|
||||
/>
|
||||
)}
|
||||
|
@ -213,10 +232,12 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
onChangeLegend={setLegendValue}
|
||||
/>
|
||||
)}
|
||||
{!isHistogram && !anomalyView && showLegend && (
|
||||
{isAnomalyView && showLegend && (<LegendAnomaly series={series as SeriesItem[]}/>)}
|
||||
{!isHistogram && showLegend && (
|
||||
<Legend
|
||||
labels={legend}
|
||||
query={query}
|
||||
isAnomalyView={isAnomalyView}
|
||||
onChange={onChangeLegend}
|
||||
/>
|
||||
)}
|
||||
|
@ -228,11 +249,6 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
legendValue={legendValue}
|
||||
/>
|
||||
)}
|
||||
{anomalyView && showLegend && (
|
||||
<LegendAnomaly
|
||||
series={series as SeriesItem[]}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
|
|
@ -62,10 +62,6 @@ export const anomalyNavigation: NavigationItem[] = [
|
|||
{
|
||||
label: routerOptions[router.anomaly].title,
|
||||
value: router.home,
|
||||
},
|
||||
{
|
||||
label: routerOptions[router.home].title,
|
||||
value: router.query,
|
||||
}
|
||||
];
|
||||
|
||||
|
|
|
@ -14,10 +14,10 @@ interface LineTooltipHook {
|
|||
metrics: MetricResult[];
|
||||
series: uPlotSeries[];
|
||||
unit?: string;
|
||||
anomalyView?: boolean;
|
||||
isAnomalyView?: boolean;
|
||||
}
|
||||
|
||||
const useLineTooltip = ({ u, metrics, series, unit, anomalyView }: LineTooltipHook) => {
|
||||
const useLineTooltip = ({ u, metrics, series, unit, isAnomalyView }: LineTooltipHook) => {
|
||||
const [showTooltip, setShowTooltip] = useState(false);
|
||||
const [tooltipIdx, setTooltipIdx] = useState({ seriesIdx: -1, dataIdx: -1 });
|
||||
const [stickyTooltips, setStickyToolTips] = useState<ChartTooltipProps[]>([]);
|
||||
|
@ -61,14 +61,14 @@ const useLineTooltip = ({ u, metrics, series, unit, anomalyView }: LineTooltipHo
|
|||
point,
|
||||
u: u,
|
||||
id: `${seriesIdx}_${dataIdx}`,
|
||||
title: groups.size > 1 && !anomalyView ? `Query ${group}` : "",
|
||||
title: groups.size > 1 && !isAnomalyView ? `Query ${group}` : "",
|
||||
dates: [date ? dayjs(date * 1000).tz().format(DATE_FULL_TIMEZONE_FORMAT) : "-"],
|
||||
value: formatPrettyNumber(value, min, max),
|
||||
info: getMetricName(metricItem),
|
||||
statsFormatted: seriesItem?.statsFormatted,
|
||||
marker: `${seriesItem?.stroke}`,
|
||||
};
|
||||
}, [u, tooltipIdx, metrics, series, unit, anomalyView]);
|
||||
}, [u, tooltipIdx, metrics, series, unit, isAnomalyView]);
|
||||
|
||||
const handleClick = useCallback(() => {
|
||||
if (!showTooltip) return;
|
||||
|
|
|
@ -12,7 +12,8 @@ import { useTimeState } from "../state/time/TimeStateContext";
|
|||
import { useCustomPanelState } from "../state/customPanel/CustomPanelStateContext";
|
||||
import { isHistogramData } from "../utils/metric";
|
||||
import { useGraphState } from "../state/graph/GraphStateContext";
|
||||
import { getStepFromDuration } from "../utils/time";
|
||||
import { getSecondsFromDuration, getStepFromDuration } from "../utils/time";
|
||||
import { AppType } from "../types/appType";
|
||||
|
||||
interface FetchQueryParams {
|
||||
predefinedQuery?: string[]
|
||||
|
@ -47,13 +48,15 @@ interface FetchDataParams {
|
|||
hideQuery?: number[]
|
||||
}
|
||||
|
||||
const isAnomalyUI = AppType.anomaly === process.env.REACT_APP_TYPE;
|
||||
|
||||
export const useFetchQuery = ({
|
||||
predefinedQuery,
|
||||
visible,
|
||||
display,
|
||||
customStep,
|
||||
hideQuery,
|
||||
showAllSeries
|
||||
showAllSeries,
|
||||
}: FetchQueryParams): FetchQueryReturn => {
|
||||
const { query } = useQueryState();
|
||||
const { period } = useTimeState();
|
||||
|
@ -124,7 +127,7 @@ export const useFetchQuery = ({
|
|||
tempTraces.push(trace);
|
||||
}
|
||||
|
||||
isHistogramResult = isDisplayChart && isHistogramData(resp.data.result);
|
||||
isHistogramResult = !isAnomalyUI && isDisplayChart && isHistogramData(resp.data.result);
|
||||
seriesLimit = isHistogramResult ? Infinity : defaultLimit;
|
||||
const freeTempSize = seriesLimit - tempData.length;
|
||||
resp.data.result.slice(0, freeTempSize).forEach((d: MetricBase) => {
|
||||
|
@ -150,9 +153,17 @@ export const useFetchQuery = ({
|
|||
setTraces(tempTraces);
|
||||
setIsHistogram(prev => totalLength ? isHistogramResult : prev);
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
setError(`${e.name}: ${e.message}`);
|
||||
const error = e as Error;
|
||||
if (error.name === "AbortError") {
|
||||
// Aborts are expected, don't show an error for them.
|
||||
return;
|
||||
}
|
||||
const helperText = "Please check your serverURL settings and confirm server availability.";
|
||||
let text = `Error executing query: ${error.message}. ${helperText}`;
|
||||
if (error.message === "Unexpected end of JSON input") {
|
||||
text += "\nAdditionally, this error can occur if the server response is too large to process. Apply more specific filters to reduce the data volume.";
|
||||
}
|
||||
setError(text);
|
||||
}
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
@ -172,7 +183,7 @@ export const useFetchQuery = ({
|
|||
setQueryErrors(expr.map(() => ErrorTypes.validQuery));
|
||||
} else if (isValidHttpUrl(serverUrl)) {
|
||||
const updatedPeriod = { ...period };
|
||||
updatedPeriod.step = customStep;
|
||||
updatedPeriod.step = isAnomalyUI ? `${getSecondsFromDuration(customStep)*1000}ms` : customStep;
|
||||
return expr.map(q => displayChart
|
||||
? getQueryRangeUrl(serverUrl, q, updatedPeriod, nocache, isTracingEnabled)
|
||||
: getQueryUrl(serverUrl, q, updatedPeriod, nocache, isTracingEnabled));
|
||||
|
|
|
@ -96,6 +96,7 @@ export const useFetchQueryOptions = ({ valueByContext, metric, label, context }:
|
|||
const cachedData = autocompleteCache.get(key);
|
||||
if (cachedData) {
|
||||
setter(processData(cachedData, type));
|
||||
setLoading(false);
|
||||
return;
|
||||
}
|
||||
const response = await fetch(`${serverUrl}/api/v1/${urlSuffix}?${params}`, { signal });
|
||||
|
@ -104,13 +105,13 @@ export const useFetchQueryOptions = ({ valueByContext, metric, label, context }:
|
|||
setter(processData(data, type));
|
||||
queryDispatch({ type: "SET_AUTOCOMPLETE_CACHE", payload: { key, value: data } });
|
||||
}
|
||||
setLoading(false);
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
queryDispatch({ type: "SET_AUTOCOMPLETE_CACHE", payload: { key, value: [] } });
|
||||
setLoading(false);
|
||||
console.error(e);
|
||||
}
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -14,10 +14,10 @@ type Props = {
|
|||
isHistogram: boolean;
|
||||
graphData: MetricResult[];
|
||||
controlsRef: React.RefObject<HTMLDivElement>;
|
||||
anomalyView?: boolean;
|
||||
isAnomalyView?: boolean;
|
||||
}
|
||||
|
||||
const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, anomalyView }) => {
|
||||
const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, isAnomalyView }) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const { customStep, yaxis, spanGaps } = useGraphState();
|
||||
|
@ -68,7 +68,7 @@ const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, anomalyView
|
|||
setPeriod={setPeriod}
|
||||
height={isMobile ? window.innerHeight * 0.5 : 500}
|
||||
isHistogram={isHistogram}
|
||||
anomalyView={anomalyView}
|
||||
isAnomalyView={isAnomalyView}
|
||||
spanGaps={spanGaps}
|
||||
/>
|
||||
</>
|
||||
|
|
|
@ -30,8 +30,14 @@ export interface QueryConfiguratorProps {
|
|||
setQueryErrors: StateUpdater<string[]>;
|
||||
setHideError: StateUpdater<boolean>;
|
||||
stats: QueryStats[];
|
||||
onHideQuery: (queries: number[]) => void
|
||||
onRunQuery: () => void
|
||||
onHideQuery?: (queries: number[]) => void
|
||||
onRunQuery: () => void;
|
||||
hideButtons?: {
|
||||
addQuery?: boolean;
|
||||
prettify?: boolean;
|
||||
autocomplete?: boolean;
|
||||
traceQuery?: boolean;
|
||||
}
|
||||
}
|
||||
|
||||
const QueryConfigurator: FC<QueryConfiguratorProps> = ({
|
||||
|
@ -40,7 +46,8 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({
|
|||
setHideError,
|
||||
stats,
|
||||
onHideQuery,
|
||||
onRunQuery
|
||||
onRunQuery,
|
||||
hideButtons
|
||||
}) => {
|
||||
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
@ -159,7 +166,7 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({
|
|||
}, [stateQuery]);
|
||||
|
||||
useEffect(() => {
|
||||
onHideQuery(hideQuery);
|
||||
onHideQuery && onHideQuery(hideQuery);
|
||||
}, [hideQuery]);
|
||||
|
||||
useEffect(() => {
|
||||
|
@ -188,40 +195,43 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({
|
|||
>
|
||||
<QueryEditor
|
||||
value={stateQuery[i]}
|
||||
autocomplete={autocomplete || autocompleteQuick}
|
||||
autocomplete={!hideButtons?.autocomplete && (autocomplete || autocompleteQuick)}
|
||||
error={queryErrors[i]}
|
||||
stats={stats[i]}
|
||||
onArrowUp={createHandlerArrow(-1, i)}
|
||||
onArrowDown={createHandlerArrow(1, i)}
|
||||
onEnter={handleRunQuery}
|
||||
onChange={createHandlerChangeQuery(i)}
|
||||
label={`Query ${i + 1}`}
|
||||
label={`Query ${stateQuery.length > 1 ? i + 1 : ""}`}
|
||||
disabled={hideQuery.includes(i)}
|
||||
/>
|
||||
<Tooltip title={hideQuery.includes(i) ? "Enable query" : "Disable query"}>
|
||||
<div className="vm-query-configurator-list-row__button">
|
||||
<Button
|
||||
variant={"text"}
|
||||
color={"gray"}
|
||||
startIcon={hideQuery.includes(i) ? <VisibilityOffIcon/> : <VisibilityIcon/>}
|
||||
onClick={createHandlerHideQuery(i)}
|
||||
ariaLabel="visibility query"
|
||||
/>
|
||||
</div>
|
||||
</Tooltip>
|
||||
{onHideQuery && (
|
||||
<Tooltip title={hideQuery.includes(i) ? "Enable query" : "Disable query"}>
|
||||
<div className="vm-query-configurator-list-row__button">
|
||||
<Button
|
||||
variant={"text"}
|
||||
color={"gray"}
|
||||
startIcon={hideQuery.includes(i) ? <VisibilityOffIcon/> : <VisibilityIcon/>}
|
||||
onClick={createHandlerHideQuery(i)}
|
||||
ariaLabel="visibility query"
|
||||
/>
|
||||
</div>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
<Tooltip title={"Prettify query"}>
|
||||
<div className="vm-query-configurator-list-row__button">
|
||||
<Button
|
||||
variant={"text"}
|
||||
color={"gray"}
|
||||
startIcon={<Prettify/>}
|
||||
onClick={async () => await handlePrettifyQuery(i)}
|
||||
className="prettify"
|
||||
ariaLabel="prettify the query"
|
||||
/>
|
||||
</div>
|
||||
</Tooltip>
|
||||
{!hideButtons?.prettify && (
|
||||
<Tooltip title={"Prettify query"}>
|
||||
<div className="vm-query-configurator-list-row__button">
|
||||
<Button
|
||||
variant={"text"}
|
||||
color={"gray"}
|
||||
startIcon={<Prettify/>}
|
||||
onClick={async () => await handlePrettifyQuery(i)}
|
||||
className="prettify"
|
||||
ariaLabel="prettify the query"
|
||||
/>
|
||||
</div>
|
||||
</Tooltip>)}
|
||||
|
||||
{stateQuery.length > 1 && (
|
||||
<Tooltip title="Remove Query">
|
||||
|
@ -240,10 +250,10 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({
|
|||
))}
|
||||
</div>
|
||||
<div className="vm-query-configurator-settings">
|
||||
<AdditionalSettings/>
|
||||
<AdditionalSettings hideButtons={hideButtons}/>
|
||||
<div className="vm-query-configurator-settings__buttons">
|
||||
<QueryHistory handleSelectQuery={handleSelectHistory}/>
|
||||
{stateQuery.length < MAX_QUERY_FIELDS && (
|
||||
{!hideButtons?.addQuery && stateQuery.length < MAX_QUERY_FIELDS && (
|
||||
<Button
|
||||
variant="outlined"
|
||||
onClick={handleAddQuery}
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
position: relative;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
justify-content: space-between;
|
||||
font-size: $font-size-small;
|
||||
margin: -$padding-medium 0-$padding-medium $padding-medium;
|
||||
padding: 0 $padding-medium;
|
||||
|
|
|
@ -1,60 +1,63 @@
|
|||
import React, { FC, useMemo, useRef } from "preact/compat";
|
||||
import React, { FC, useMemo, useRef, useState } from "preact/compat";
|
||||
import classNames from "classnames";
|
||||
import useDeviceDetect from "../../hooks/useDeviceDetect";
|
||||
import useEventListener from "../../hooks/useEventListener";
|
||||
import { ForecastType } from "../../types";
|
||||
import { useSetQueryParams } from "../CustomPanel/hooks/useSetQueryParams";
|
||||
import QueryConfigurator from "../CustomPanel/QueryConfigurator/QueryConfigurator";
|
||||
import "../CustomPanel/style.scss";
|
||||
import ExploreAnomalyHeader from "./ExploreAnomalyHeader/ExploreAnomalyHeader";
|
||||
import Alert from "../../components/Main/Alert/Alert";
|
||||
import { extractFields, isForecast } from "../../utils/uplot";
|
||||
import { useQueryState } from "../../state/query/QueryStateContext";
|
||||
import { useFetchQuery } from "../../hooks/useFetchQuery";
|
||||
import Spinner from "../../components/Main/Spinner/Spinner";
|
||||
import GraphTab from "../CustomPanel/CustomPanelTabs/GraphTab";
|
||||
import { useGraphState } from "../../state/graph/GraphStateContext";
|
||||
import Spinner from "../../components/Main/Spinner/Spinner";
|
||||
import Alert from "../../components/Main/Alert/Alert";
|
||||
import WarningLimitSeries from "../CustomPanel/WarningLimitSeries/WarningLimitSeries";
|
||||
import GraphTab from "../CustomPanel/CustomPanelTabs/GraphTab";
|
||||
import { extractFields, isForecast } from "../../utils/uplot";
|
||||
import { MetricResult } from "../../api/types";
|
||||
import { promValueToNumber } from "../../utils/metric";
|
||||
import { ForecastType } from "../../types";
|
||||
import { useFetchAnomalySeries } from "./hooks/useFetchAnomalySeries";
|
||||
import { useQueryDispatch } from "../../state/query/QueryStateContext";
|
||||
import { useTimeDispatch } from "../../state/time/TimeStateContext";
|
||||
|
||||
const anomalySeries = [
|
||||
ForecastType.yhat,
|
||||
ForecastType.yhatUpper,
|
||||
ForecastType.yhatLower,
|
||||
ForecastType.anomalyScore
|
||||
];
|
||||
|
||||
// Hardcoded to 1.0 for now; consider adding a UI slider for threshold adjustment in the future.
|
||||
const ANOMALY_SCORE_THRESHOLD = 1;
|
||||
|
||||
const ExploreAnomaly: FC = () => {
|
||||
useSetQueryParams();
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const queryDispatch = useQueryDispatch();
|
||||
const timeDispatch = useTimeDispatch();
|
||||
const { series, error: errorSeries, isLoading: isAnomalySeriesLoading } = useFetchAnomalySeries();
|
||||
const queries = useMemo(() => series ? Object.keys(series) : [], [series]);
|
||||
|
||||
const controlsRef = useRef<HTMLDivElement>(null);
|
||||
const { query } = useQueryState();
|
||||
const { customStep } = useGraphState();
|
||||
|
||||
const { graphData, error, queryErrors, isHistogram, isLoading: isGraphDataLoading } = useFetchQuery({
|
||||
const controlsRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
const [hideQuery] = useState<number[]>([]);
|
||||
const [hideError, setHideError] = useState(!query[0]);
|
||||
const [showAllSeries, setShowAllSeries] = useState(false);
|
||||
|
||||
const {
|
||||
isLoading,
|
||||
graphData,
|
||||
error,
|
||||
queryErrors,
|
||||
setQueryErrors,
|
||||
queryStats,
|
||||
warning,
|
||||
} = useFetchQuery({
|
||||
visible: true,
|
||||
customStep,
|
||||
showAllSeries: true,
|
||||
hideQuery,
|
||||
showAllSeries
|
||||
});
|
||||
|
||||
const data = useMemo(() => {
|
||||
if (!graphData) return;
|
||||
if (!graphData) return [];
|
||||
const detectedData = graphData.map(d => ({ ...isForecast(d.metric), ...d }));
|
||||
const realData = detectedData.filter(d => d.value === null);
|
||||
const anomalyScoreData = detectedData.filter(d => d.isAnomalyScore);
|
||||
const realData = detectedData.filter(d => d.value === ForecastType.actual);
|
||||
const anomalyScoreData = detectedData.filter(d => d.value === ForecastType.anomaly);
|
||||
const anomalyData: MetricResult[] = realData.map((d) => {
|
||||
const id = extractFields(d.metric);
|
||||
const anomalyScoreDataByLabels = anomalyScoreData.find(du => extractFields(du.metric) === id);
|
||||
|
||||
return {
|
||||
group: queries.length + 1,
|
||||
group: 1,
|
||||
metric: { ...d.metric, __name__: ForecastType.anomaly },
|
||||
values: d.values.filter(([t]) => {
|
||||
if (!anomalyScoreDataByLabels) return false;
|
||||
|
@ -63,23 +66,14 @@ const ExploreAnomaly: FC = () => {
|
|||
})
|
||||
};
|
||||
});
|
||||
return graphData.filter(d => d.group !== anomalyScoreData[0]?.group).concat(anomalyData);
|
||||
const filterData = detectedData.filter(d => (d.value !== ForecastType.anomaly) && d.value) as MetricResult[];
|
||||
return filterData.concat(anomalyData);
|
||||
}, [graphData]);
|
||||
|
||||
const onChangeFilter = (expr: Record<string, string>) => {
|
||||
const { __name__ = "", ...labelValue } = expr;
|
||||
let prefix = __name__.replace(/y|_y/, "");
|
||||
if (prefix) prefix += "_";
|
||||
const metrics = [__name__, ...anomalySeries];
|
||||
const filters = Object.entries(labelValue).map(([key, value]) => `${key}="${value}"`).join(",");
|
||||
const queries = metrics.map((m, i) => `${i ? prefix : ""}${m}{${filters}}`);
|
||||
queryDispatch({ type: "SET_QUERY", payload: queries });
|
||||
timeDispatch({ type: "RUN_QUERY" });
|
||||
const handleRunQuery = () => {
|
||||
setHideError(false);
|
||||
};
|
||||
|
||||
const handleChangePopstate = () => window.location.reload();
|
||||
useEventListener("popstate", handleChangePopstate);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={classNames({
|
||||
|
@ -87,14 +81,23 @@ const ExploreAnomaly: FC = () => {
|
|||
"vm-custom-panel_mobile": isMobile,
|
||||
})}
|
||||
>
|
||||
<ExploreAnomalyHeader
|
||||
queries={queries}
|
||||
series={series}
|
||||
onChange={onChangeFilter}
|
||||
<QueryConfigurator
|
||||
queryErrors={!hideError ? queryErrors : []}
|
||||
setQueryErrors={setQueryErrors}
|
||||
setHideError={setHideError}
|
||||
stats={queryStats}
|
||||
onRunQuery={handleRunQuery}
|
||||
hideButtons={{ addQuery: true, prettify: true, autocomplete: true, traceQuery: true }}
|
||||
/>
|
||||
{(isGraphDataLoading || isAnomalySeriesLoading) && <Spinner />}
|
||||
{(error || errorSeries) && <Alert variant="error">{error || errorSeries}</Alert>}
|
||||
{!error && !errorSeries && queryErrors?.[0] && <Alert variant="error">{queryErrors[0]}</Alert>}
|
||||
{isLoading && <Spinner/>}
|
||||
{(!hideError && error) && <Alert variant="error">{error}</Alert>}
|
||||
{warning && (
|
||||
<WarningLimitSeries
|
||||
warning={warning}
|
||||
query={query}
|
||||
onChange={setShowAllSeries}
|
||||
/>
|
||||
)}
|
||||
<div
|
||||
className={classNames({
|
||||
"vm-custom-panel-body": true,
|
||||
|
@ -112,9 +115,9 @@ const ExploreAnomaly: FC = () => {
|
|||
{data && (
|
||||
<GraphTab
|
||||
graphData={data}
|
||||
isHistogram={isHistogram}
|
||||
isHistogram={false}
|
||||
controlsRef={controlsRef}
|
||||
anomalyView={true}
|
||||
isAnomalyView={true}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
|
|
@ -1,112 +0,0 @@
|
|||
import React, { FC, useMemo, useState } from "preact/compat";
|
||||
import classNames from "classnames";
|
||||
import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
||||
import Select from "../../../components/Main/Select/Select";
|
||||
import "./style.scss";
|
||||
import usePrevious from "../../../hooks/usePrevious";
|
||||
import { useEffect } from "react";
|
||||
import { arrayEquals } from "../../../utils/array";
|
||||
import { getQueryStringValue } from "../../../utils/query-string";
|
||||
import { useSetQueryParams } from "../hooks/useSetQueryParams";
|
||||
|
||||
type Props = {
|
||||
queries: string[];
|
||||
series?: Record<string, {[p: string]: string}[]>
|
||||
onChange: (expr: Record<string, string>) => void;
|
||||
}
|
||||
|
||||
const ExploreAnomalyHeader: FC<Props> = ({ queries, series, onChange }) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
const [alias, setAlias] = useState(queries[0]);
|
||||
const [selectedValues, setSelectedValues] = useState<Record<string, string>>({});
|
||||
useSetQueryParams({ alias: alias, ...selectedValues });
|
||||
|
||||
const uniqueKeysWithValues = useMemo(() => {
|
||||
if (!series) return {};
|
||||
return series[alias]?.reduce((accumulator, currentSeries) => {
|
||||
const metric = Object.entries(currentSeries);
|
||||
if (!metric.length) return accumulator;
|
||||
const excludeMetrics = ["__name__", "for"];
|
||||
for (const [key, value] of metric) {
|
||||
if (excludeMetrics.includes(key) || accumulator[key]?.includes(value)) continue;
|
||||
|
||||
if (!accumulator[key]) {
|
||||
accumulator[key] = [];
|
||||
}
|
||||
|
||||
accumulator[key].push(value);
|
||||
}
|
||||
return accumulator;
|
||||
}, {} as Record<string, string[]>) || {};
|
||||
}, [alias, series]);
|
||||
const prevUniqueKeysWithValues = usePrevious(uniqueKeysWithValues);
|
||||
|
||||
const createHandlerChangeSelect = (key: string) => (value: string) => {
|
||||
setSelectedValues((prev) => ({ ...prev, [key]: value }));
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const nextValues = Object.values(uniqueKeysWithValues).flat();
|
||||
const prevValues = Object.values(prevUniqueKeysWithValues || {}).flat();
|
||||
if (arrayEquals(prevValues, nextValues)) return;
|
||||
const newSelectedValues: Record<string, string> = {};
|
||||
Object.keys(uniqueKeysWithValues).forEach((key) => {
|
||||
const value = getQueryStringValue(key, "") as string;
|
||||
newSelectedValues[key] = value || uniqueKeysWithValues[key]?.[0];
|
||||
});
|
||||
setSelectedValues(newSelectedValues);
|
||||
}, [uniqueKeysWithValues, prevUniqueKeysWithValues]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!alias || !Object.keys(selectedValues).length) return;
|
||||
const __name__ = series?.[alias]?.[0]?.__name__ || "";
|
||||
onChange({ ...selectedValues, for: alias, __name__ });
|
||||
}, [selectedValues, alias]);
|
||||
|
||||
useEffect(() => {
|
||||
setAlias(getQueryStringValue("alias", queries[0]) as string);
|
||||
}, [series]);
|
||||
|
||||
return (
|
||||
<div
|
||||
id="legendAnomaly"
|
||||
className={classNames({
|
||||
"vm-explore-anomaly-header": true,
|
||||
"vm-explore-anomaly-header_mobile": isMobile,
|
||||
"vm-block": true,
|
||||
"vm-block_mobile": isMobile,
|
||||
})}
|
||||
>
|
||||
<div className="vm-explore-anomaly-header-main">
|
||||
<div className="vm-explore-anomaly-header__select">
|
||||
<Select
|
||||
value={alias}
|
||||
list={queries}
|
||||
label="Query"
|
||||
placeholder="Please select query"
|
||||
onChange={setAlias}
|
||||
searchable
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
{Object.entries(uniqueKeysWithValues).map(([key, values]) => (
|
||||
<div
|
||||
className="vm-explore-anomaly-header__values"
|
||||
key={key}
|
||||
>
|
||||
<Select
|
||||
value={selectedValues[key] || ""}
|
||||
list={values}
|
||||
label={key}
|
||||
placeholder={`Please select ${key}`}
|
||||
onChange={createHandlerChangeSelect(key)}
|
||||
searchable={values.length > 2}
|
||||
disabled={values.length === 1}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default ExploreAnomalyHeader;
|
|
@ -1,37 +0,0 @@
|
|||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-explore-anomaly-header {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
gap: $padding-global calc($padding-small + 10px);
|
||||
max-width: calc(100vw - var(--scrollbar-width));
|
||||
|
||||
&_mobile {
|
||||
flex-direction: column;
|
||||
align-items: stretch;
|
||||
}
|
||||
|
||||
&-main {
|
||||
display: grid;
|
||||
gap: $padding-large;
|
||||
align-items: center;
|
||||
justify-items: center;
|
||||
flex-grow: 1;
|
||||
width: 100%;
|
||||
|
||||
&__config {
|
||||
text-transform: lowercase;
|
||||
}
|
||||
}
|
||||
|
||||
&__select {
|
||||
flex-grow: 1;
|
||||
min-width: 100%;
|
||||
}
|
||||
|
||||
&__values {
|
||||
flex-grow: 1;
|
||||
}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
import { useMemo, useState } from "preact/compat";
|
||||
import { useAppState } from "../../../state/common/StateContext";
|
||||
import { ErrorTypes } from "../../../types";
|
||||
import { useEffect } from "react";
|
||||
import { MetricBase } from "../../../api/types";
|
||||
import { useTimeState } from "../../../state/time/TimeStateContext";
|
||||
import dayjs from "dayjs";
|
||||
|
||||
// TODO: Change the method of retrieving aliases from the configuration after the API has been added
|
||||
const seriesQuery = `{
|
||||
for!="",
|
||||
__name__!~".*yhat.*|.*trend.*|.*anomaly_score.*|.*daily.*|.*additive_terms.*|.*multiplicative_terms.*|.*weekly.*"
|
||||
}`;
|
||||
|
||||
export const useFetchAnomalySeries = () => {
|
||||
const { serverUrl } = useAppState();
|
||||
const { period: { start, end } } = useTimeState();
|
||||
|
||||
const [series, setSeries] = useState<Record<string, MetricBase["metric"][]>>();
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [error, setError] = useState<ErrorTypes | string>();
|
||||
|
||||
// TODO add cached metrics by date
|
||||
const fetchUrl = useMemo(() => {
|
||||
const startDay = dayjs(start * 1000).startOf("day").valueOf() / 1000;
|
||||
const endDay = dayjs(end * 1000).endOf("day").valueOf() / 1000;
|
||||
|
||||
const params = new URLSearchParams({
|
||||
"match[]": seriesQuery,
|
||||
start: `${startDay}`,
|
||||
end: `${endDay}`
|
||||
});
|
||||
|
||||
return `${serverUrl}/api/v1/series?${params}`;
|
||||
}, [serverUrl, start, end]);
|
||||
|
||||
useEffect(() => {
|
||||
const fetchSeries = async () => {
|
||||
setError("");
|
||||
setIsLoading(true);
|
||||
try {
|
||||
const response = await fetch(fetchUrl);
|
||||
const resp = await response.json();
|
||||
const data = (resp?.data || []) as MetricBase["metric"][];
|
||||
const groupedByFor = data.reduce<{ [key: string]: MetricBase["metric"][] }>((acc, item) => {
|
||||
const forKey = item["for"];
|
||||
if (!acc[forKey]) acc[forKey] = [];
|
||||
acc[forKey].push(item);
|
||||
return acc;
|
||||
}, {});
|
||||
setSeries(groupedByFor);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorType = resp.errorType ? `${resp.errorType}\r\n` : "";
|
||||
setError(`${errorType}${resp?.error || resp?.message}`);
|
||||
}
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
const message = e.name === "SyntaxError" ? ErrorTypes.unknownType : `${e.name}: ${e.message}`;
|
||||
setError(`${message}`);
|
||||
}
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
fetchSeries();
|
||||
}, [fetchUrl]);
|
||||
|
||||
return {
|
||||
error,
|
||||
series,
|
||||
isLoading,
|
||||
};
|
||||
};
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue