mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
6a64823581
141 changed files with 2957 additions and 2000 deletions
6
Makefile
6
Makefile
|
@ -24,6 +24,8 @@ all: \
|
||||||
|
|
||||||
include app/*/Makefile
|
include app/*/Makefile
|
||||||
include deployment/*/Makefile
|
include deployment/*/Makefile
|
||||||
|
include snap/local/Makefile
|
||||||
|
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf bin/*
|
rm -rf bin/*
|
||||||
|
@ -84,9 +86,6 @@ vmutils-windows-amd64: \
|
||||||
vmauth-windows-amd64 \
|
vmauth-windows-amd64 \
|
||||||
vmctl-windows-amd64
|
vmctl-windows-amd64
|
||||||
|
|
||||||
release-snap:
|
|
||||||
snapcraft
|
|
||||||
snapcraft upload "victoriametrics_$(PKG_TAG)_multi.snap" --release beta,edge,candidate
|
|
||||||
|
|
||||||
publish-release:
|
publish-release:
|
||||||
git checkout $(TAG) && $(MAKE) release publish && \
|
git checkout $(TAG) && $(MAKE) release publish && \
|
||||||
|
@ -180,6 +179,7 @@ release-vmutils-windows-generic: \
|
||||||
vmctl-windows-$(GOARCH)-prod.exe \
|
vmctl-windows-$(GOARCH)-prod.exe \
|
||||||
> vmutils-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
> vmutils-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||||
|
|
||||||
|
|
||||||
pprof-cpu:
|
pprof-cpu:
|
||||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
|
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
|
||||||
|
|
||||||
|
|
114
README.md
114
README.md
|
@ -13,46 +13,13 @@
|
||||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||||
|
|
||||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and follow [these instructions](#how-to-start-victoriametrics).
|
||||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||||
|
|
||||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||||
|
|
||||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
|
||||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
|
||||||
|
|
||||||
|
|
||||||
## Case studies and talks
|
|
||||||
|
|
||||||
Case studies:
|
|
||||||
|
|
||||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
|
||||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
|
||||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
|
||||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
|
||||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
|
||||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
|
||||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
|
||||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
|
||||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
|
||||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
|
||||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
|
||||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
|
||||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
|
||||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
|
||||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
|
||||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
|
||||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
|
||||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
|
||||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
|
||||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
|
||||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
|
||||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
|
||||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
|
||||||
|
|
||||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
|
||||||
|
|
||||||
|
|
||||||
## Prominent features
|
## Prominent features
|
||||||
|
@ -95,6 +62,37 @@ VictoriaMetrics has the following prominent features:
|
||||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||||
|
|
||||||
|
|
||||||
|
## Case studies and talks
|
||||||
|
|
||||||
|
Case studies:
|
||||||
|
|
||||||
|
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||||
|
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||||
|
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||||
|
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||||
|
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||||
|
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||||
|
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||||
|
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||||
|
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||||
|
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||||
|
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||||
|
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||||
|
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||||
|
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||||
|
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||||
|
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||||
|
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||||
|
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||||
|
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||||
|
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||||
|
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||||
|
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||||
|
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||||
|
|
||||||
|
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||||
|
|
||||||
|
|
||||||
## Operation
|
## Operation
|
||||||
|
|
||||||
## How to start VictoriaMetrics
|
## How to start VictoriaMetrics
|
||||||
|
@ -418,9 +416,15 @@ The `/api/v1/export` endpoint should return the following response:
|
||||||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||||
|
|
||||||
* [Graphite API](#graphite-api-usage)
|
* [Graphite API](#graphite-api-usage)
|
||||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
* [Prometheus querying API](#prometheus-querying-api-usage). See also [selecting Graphite metrics](#selecting-graphite-metrics).
|
||||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||||
|
|
||||||
|
## Selecting Graphite metrics
|
||||||
|
|
||||||
|
VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See [docs for Graphite paths and wildcards](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||||
|
|
||||||
|
The `__graphite__` pseudo-label supports e.g. alternate regexp filters such as `(value1|...|valueN)`. They are transparently converted to `{value1,...,valueN}` syntax [used in Graphite](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). This allows using [multi-value template variables in Grafana](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, Grafana expands `{__graphite__=~"foo.($bar).baz"}` into `{__graphite__=~"foo.(x|y).baz"}` if `$bar` template variable contains `x` and `y` values. In this case the query is automatically converted into `{__graphite__=~"foo.{x,y}.baz"}` before execution.
|
||||||
|
|
||||||
## How to send data from OpenTSDB-compatible agents
|
## How to send data from OpenTSDB-compatible agents
|
||||||
|
|
||||||
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||||
|
@ -517,9 +521,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
||||||
### Prometheus querying API enhancements
|
### Prometheus querying API enhancements
|
||||||
|
|
||||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
|
||||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||||
|
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
|
|
||||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||||
|
@ -556,12 +561,11 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
||||||
|
|
||||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||||
|
|
||||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
|
||||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||||
|
|
||||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). See [these docs](#selecting-graphite-metrics).
|
||||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
|
||||||
|
|
||||||
|
|
||||||
### Graphite Render API usage
|
### Graphite Render API usage
|
||||||
|
@ -612,6 +616,10 @@ Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressin
|
||||||
|
|
||||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||||
|
|
||||||
|
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||||
|
|
||||||
|
VMUI allows investigating correlations between two queries on the same graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||||
|
|
||||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||||
|
|
||||||
|
|
||||||
|
@ -1025,6 +1033,7 @@ VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-sc
|
||||||
|
|
||||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||||
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
||||||
|
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||||
|
|
||||||
Example contents for `-relabelConfig` file:
|
Example contents for `-relabelConfig` file:
|
||||||
|
@ -1217,7 +1226,8 @@ Consider setting the following command-line flags:
|
||||||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
* `-configAuthKey` for protecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||||
|
- `-pprofAuthKey` for protecting `/debug/pprof/*` endpoints, which can be used for [profiling](#profiling).
|
||||||
|
|
||||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||||
|
@ -1372,9 +1382,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
||||||
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
||||||
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
||||||
|
|
||||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then `{__graphite__="foo.*.baz"}` filter can be used for selecting such metrics. See [these docs](#selecting-graphite-metrics) for details.
|
||||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
|
||||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
|
||||||
|
|
||||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||||
|
|
||||||
|
@ -1493,9 +1501,11 @@ Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics
|
||||||
Feel free asking any questions regarding VictoriaMetrics:
|
Feel free asking any questions regarding VictoriaMetrics:
|
||||||
|
|
||||||
* [slack](https://slack.victoriametrics.com/)
|
* [slack](https://slack.victoriametrics.com/)
|
||||||
|
* [linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||||
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||||
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
||||||
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||||
|
* [articles and talks about VictoriaMetrics in Russian](https://github.com/denisgolius/victoriametrics-ru-links)
|
||||||
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||||
|
|
||||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||||
|
@ -1650,8 +1660,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-maxInsertRequestSize size
|
-maxInsertRequestSize size
|
||||||
The maximum size in bytes of a single Prometheus remote_write API request
|
The maximum size in bytes of a single Prometheus remote_write API request
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
||||||
|
-maxLabelValueLen int
|
||||||
|
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
|
||||||
-maxLabelsPerTimeseries int
|
-maxLabelsPerTimeseries int
|
||||||
The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30)
|
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
|
||||||
-memory.allowedBytes size
|
-memory.allowedBytes size
|
||||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
|
@ -1681,7 +1693,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.cluster.replicationFactor int
|
-promscrape.cluster.replicationFactor int
|
||||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||||
-promscrape.config string
|
-promscrape.config string
|
||||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||||
-promscrape.config.dryRun
|
-promscrape.config.dryRun
|
||||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||||
-promscrape.config.strictParse
|
-promscrape.config.strictParse
|
||||||
|
@ -1748,7 +1760,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.suppressScrapeErrors
|
-promscrape.suppressScrapeErrors
|
||||||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
||||||
-relabelConfig string
|
-relabelConfig string
|
||||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||||
-relabelDebug
|
-relabelDebug
|
||||||
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
||||||
-retentionPeriod value
|
-retentionPeriod value
|
||||||
|
|
|
@ -46,7 +46,7 @@ to `vmagent` such as the ability to push metrics instead of pulling them. We did
|
||||||
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
||||||
and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets:
|
and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets:
|
||||||
|
|
||||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`)
|
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url.
|
||||||
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
|
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
|
||||||
|
|
||||||
Example command line:
|
Example command line:
|
||||||
|
@ -214,15 +214,16 @@ The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders w
|
||||||
|
|
||||||
## Loading scrape configs from multiple files
|
## Loading scrape configs from multiple files
|
||||||
|
|
||||||
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory plus a `single_scrape_config.yml` file:
|
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
scrape_config_files:
|
scrape_config_files:
|
||||||
- configs/*.yml
|
- configs/*.yml
|
||||||
- single_scrape_config.yml
|
- single_scrape_config.yml
|
||||||
|
- https://config-server/scrape_config.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Every referred file can contain arbitrary number of any [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
Every referred file can contain arbitrary number of [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
|
@ -279,7 +280,7 @@ The relabeling can be defined in the following places:
|
||||||
|
|
||||||
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied to target labels. This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied to target labels. This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
||||||
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to all the scraped metrics in the given `scrape_config`. This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to all the scraped metrics in the given `scrape_config`. This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
||||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is aplied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
* At the `-remoteWrite.relabelConfig` file. This relabeling is applied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||||
|
|
||||||
You can read more about relabeling in the following articles:
|
You can read more about relabeling in the following articles:
|
||||||
|
@ -806,7 +807,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||||
-promscrape.cluster.replicationFactor int
|
-promscrape.cluster.replicationFactor int
|
||||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||||
-promscrape.config string
|
-promscrape.config string
|
||||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||||
-promscrape.config.dryRun
|
-promscrape.config.dryRun
|
||||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||||
-promscrape.config.strictParse
|
-promscrape.config.strictParse
|
||||||
|
@ -931,7 +932,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||||
Optional rate limit in bytes per second for data sent to -remoteWrite.url. By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data is sent after temporary unavailability of the remote storage
|
Optional rate limit in bytes per second for data sent to -remoteWrite.url. By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data is sent after temporary unavailability of the remote storage
|
||||||
Supports array of values separated by comma or specified via multiple flags.
|
Supports array of values separated by comma or specified via multiple flags.
|
||||||
-remoteWrite.relabelConfig string
|
-remoteWrite.relabelConfig string
|
||||||
Optional path to file with relabel_config entries. These entries are applied to all the metrics before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details
|
Optional path to file with relabel_config entries. The path can point either to local file or to http url. These entries are applied to all the metrics before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details
|
||||||
-remoteWrite.relabelDebug
|
-remoteWrite.relabelDebug
|
||||||
Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs
|
Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs
|
||||||
-remoteWrite.roundDigits array
|
-remoteWrite.roundDigits array
|
||||||
|
@ -966,7 +967,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||||
Remote storage URL to write data to. It must support Prometheus remote_write API. It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . Pass multiple -remoteWrite.url flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.multitenantURL
|
Remote storage URL to write data to. It must support Prometheus remote_write API. It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . Pass multiple -remoteWrite.url flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.multitenantURL
|
||||||
Supports an array of values separated by comma or specified via multiple flags.
|
Supports an array of values separated by comma or specified via multiple flags.
|
||||||
-remoteWrite.urlRelabelConfig array
|
-remoteWrite.urlRelabelConfig array
|
||||||
Optional path to relabel config for the corresponding -remoteWrite.url
|
Optional path to relabel config for the corresponding -remoteWrite.url. The path can point either to local file or to http url
|
||||||
Supports an array of values separated by comma or specified via multiple flags.
|
Supports an array of values separated by comma or specified via multiple flags.
|
||||||
-remoteWrite.urlRelabelDebug array
|
-remoteWrite.urlRelabelDebug array
|
||||||
Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. This is useful for debugging the relabeling configs
|
Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. This is useful for debugging the relabeling configs
|
||||||
|
|
|
@ -15,11 +15,13 @@ import (
|
||||||
var (
|
var (
|
||||||
unparsedLabelsGlobal = flagutil.NewArray("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
|
unparsedLabelsGlobal = flagutil.NewArray("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
|
||||||
"Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage")
|
"Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage")
|
||||||
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabel_config entries. These entries are applied to all the metrics "+
|
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabel_config entries. "+
|
||||||
|
"The path can point either to local file or to http url. These entries are applied to all the metrics "+
|
||||||
"before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details")
|
"before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details")
|
||||||
relabelDebugGlobal = flag.Bool("remoteWrite.relabelDebug", false, "Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. "+
|
relabelDebugGlobal = flag.Bool("remoteWrite.relabelDebug", false, "Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. "+
|
||||||
"If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs")
|
"If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs")
|
||||||
relabelConfigPaths = flagutil.NewArray("remoteWrite.urlRelabelConfig", "Optional path to relabel config for the corresponding -remoteWrite.url")
|
relabelConfigPaths = flagutil.NewArray("remoteWrite.urlRelabelConfig", "Optional path to relabel config for the corresponding -remoteWrite.url. "+
|
||||||
|
"The path can point either to local file or to http url")
|
||||||
relabelDebug = flagutil.NewArrayBool("remoteWrite.urlRelabelDebug", "Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. "+
|
relabelDebug = flagutil.NewArrayBool("remoteWrite.urlRelabelDebug", "Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. "+
|
||||||
"If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. "+
|
"If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. "+
|
||||||
"This is useful for debugging the relabeling configs")
|
"This is useful for debugging the relabeling configs")
|
||||||
|
|
|
@ -153,6 +153,13 @@ func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]
|
||||||
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
||||||
}
|
}
|
||||||
for _, s := range series {
|
for _, s := range series {
|
||||||
|
// set additional labels to identify group and rule name
|
||||||
|
if ar.Name != "" {
|
||||||
|
s.SetLabel(alertNameLabel, ar.Name)
|
||||||
|
}
|
||||||
|
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
||||||
|
s.SetLabel(alertGroupNameLabel, ar.GroupName)
|
||||||
|
}
|
||||||
// extra labels could contain templates, so we expand them first
|
// extra labels could contain templates, so we expand them first
|
||||||
labels, err := expandLabels(s, qFn, ar)
|
labels, err := expandLabels(s, qFn, ar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -163,13 +170,6 @@ func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]
|
||||||
// so the hash key will be consistent on restore
|
// so the hash key will be consistent on restore
|
||||||
s.SetLabel(k, v)
|
s.SetLabel(k, v)
|
||||||
}
|
}
|
||||||
// set additional labels to identify group and rule name
|
|
||||||
if ar.Name != "" {
|
|
||||||
s.SetLabel(alertNameLabel, ar.Name)
|
|
||||||
}
|
|
||||||
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
|
||||||
s.SetLabel(alertGroupNameLabel, ar.GroupName)
|
|
||||||
}
|
|
||||||
a, err := ar.newAlert(s, time.Time{}, qFn) // initial alert
|
a, err := ar.newAlert(s, time.Time{}, qFn) // initial alert
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create alert: %s", err)
|
return nil, fmt.Errorf("failed to create alert: %s", err)
|
||||||
|
@ -225,6 +225,13 @@ func (ar *AlertingRule) Exec(ctx context.Context) ([]prompbmarshal.TimeSeries, e
|
||||||
updated := make(map[uint64]struct{})
|
updated := make(map[uint64]struct{})
|
||||||
// update list of active alerts
|
// update list of active alerts
|
||||||
for _, m := range qMetrics {
|
for _, m := range qMetrics {
|
||||||
|
// set additional labels to identify group and rule name
|
||||||
|
if ar.Name != "" {
|
||||||
|
m.SetLabel(alertNameLabel, ar.Name)
|
||||||
|
}
|
||||||
|
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
||||||
|
m.SetLabel(alertGroupNameLabel, ar.GroupName)
|
||||||
|
}
|
||||||
// extra labels could contain templates, so we expand them first
|
// extra labels could contain templates, so we expand them first
|
||||||
labels, err := expandLabels(m, qFn, ar)
|
labels, err := expandLabels(m, qFn, ar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -235,14 +242,6 @@ func (ar *AlertingRule) Exec(ctx context.Context) ([]prompbmarshal.TimeSeries, e
|
||||||
// so the hash key will be consistent on restore
|
// so the hash key will be consistent on restore
|
||||||
m.SetLabel(k, v)
|
m.SetLabel(k, v)
|
||||||
}
|
}
|
||||||
// set additional labels to identify group and rule name
|
|
||||||
// set additional labels to identify group and rule name
|
|
||||||
if ar.Name != "" {
|
|
||||||
m.SetLabel(alertNameLabel, ar.Name)
|
|
||||||
}
|
|
||||||
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
|
||||||
m.SetLabel(alertGroupNameLabel, ar.GroupName)
|
|
||||||
}
|
|
||||||
h := hash(m)
|
h := hash(m)
|
||||||
if _, ok := updated[h]; ok {
|
if _, ok := updated[h]; ok {
|
||||||
// duplicate may be caused by extra labels
|
// duplicate may be caused by extra labels
|
||||||
|
|
|
@ -715,6 +715,44 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
&AlertingRule{
|
||||||
|
Name: "ExtraTemplating",
|
||||||
|
GroupName: "Testing",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"name": "alert_{{ $labels.alertname }}",
|
||||||
|
"group": "group_{{ $labels.alertgroup }}",
|
||||||
|
"instance": "{{ $labels.instance }}",
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}`,
|
||||||
|
"description": `Alert "{{ $labels.name }}({{ $labels.group }})" for instance {{ $labels.instance }}`,
|
||||||
|
},
|
||||||
|
alerts: make(map[uint64]*notifier.Alert),
|
||||||
|
},
|
||||||
|
[]datasource.Metric{
|
||||||
|
metricWithValueAndLabels(t, 1, "instance", "foo"),
|
||||||
|
},
|
||||||
|
map[uint64]*notifier.Alert{
|
||||||
|
hash(metricWithLabels(t, alertNameLabel, "ExtraTemplating",
|
||||||
|
"name", "alert_ExtraTemplating",
|
||||||
|
alertGroupNameLabel, "Testing",
|
||||||
|
"group", "group_Testing",
|
||||||
|
"instance", "foo")): {
|
||||||
|
Labels: map[string]string{
|
||||||
|
alertNameLabel: "ExtraTemplating",
|
||||||
|
"name": "alert_ExtraTemplating",
|
||||||
|
alertGroupNameLabel: "Testing",
|
||||||
|
"group": "group_Testing",
|
||||||
|
"instance": "foo",
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"summary": `Alert "ExtraTemplating(Testing)" for instance foo`,
|
||||||
|
"description": `Alert "alert_ExtraTemplating(group_Testing)" for instance foo`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
fakeGroup := Group{Name: "TestRule_Exec"}
|
fakeGroup := Group{Name: "TestRule_Exec"}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
|
|
@ -46,8 +46,6 @@ type Group struct {
|
||||||
XXX map[string]interface{} `yaml:",inline"`
|
XXX map[string]interface{} `yaml:",inline"`
|
||||||
}
|
}
|
||||||
|
|
||||||
const extraLabelParam = "extra_label"
|
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
type group Group
|
type group Group
|
||||||
|
@ -68,8 +66,14 @@ func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
if g.Params == nil {
|
if g.Params == nil {
|
||||||
g.Params = url.Values{}
|
g.Params = url.Values{}
|
||||||
}
|
}
|
||||||
|
// Sort extraFilters for consistent order for query args across runs.
|
||||||
|
extraFilters := make([]string, 0, len(g.ExtraFilterLabels))
|
||||||
for k, v := range g.ExtraFilterLabels {
|
for k, v := range g.ExtraFilterLabels {
|
||||||
g.Params.Add(extraLabelParam, fmt.Sprintf("%s=%s", k, v))
|
extraFilters = append(extraFilters, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
|
sort.Strings(extraFilters)
|
||||||
|
for _, extraFilter := range extraFilters {
|
||||||
|
g.Params.Add("extra_label", extraFilter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -538,7 +538,7 @@ extra_filter_labels:
|
||||||
rules:
|
rules:
|
||||||
- alert: ExampleAlertAlwaysFiring
|
- alert: ExampleAlertAlwaysFiring
|
||||||
expr: sum by(job) (up == 1)
|
expr: sum by(job) (up == 1)
|
||||||
`, url.Values{extraLabelParam: {"job=victoriametrics", "env=prod"}})
|
`, url.Values{"extra_label": {"env=prod", "job=victoriametrics"}})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("extra labels and params", func(t *testing.T) {
|
t.Run("extra labels and params", func(t *testing.T) {
|
||||||
|
@ -552,6 +552,6 @@ params:
|
||||||
rules:
|
rules:
|
||||||
- alert: ExampleAlertAlwaysFiring
|
- alert: ExampleAlertAlwaysFiring
|
||||||
expr: sum by(job) (up == 1)
|
expr: sum by(job) (up == 1)
|
||||||
`, url.Values{"nocache": {"1"}, extraLabelParam: {"env=prod", "job=victoriametrics"}})
|
`, url.Values{"nocache": {"1"}, "extra_label": {"env=prod", "job=victoriametrics"}})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
`vmauth` is a simple auth proxy, router and [load balancer](#load-balancing) for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
`vmauth` is a simple auth proxy, router and [load balancer](#load-balancing) for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It reads auth credentials from `Authorization` http header ([Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication) and `Bearer token` is supported),
|
It reads auth credentials from `Authorization` http header ([Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication) and `Bearer token` is supported),
|
||||||
matches them against configs pointed by [-auth.config](#auth-config) command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match.
|
matches them against configs pointed by [-auth.config](#auth-config) command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match.
|
||||||
|
The `-auth.config` can point to either local file or to http url.
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
|
@ -26,12 +26,10 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi
|
||||||
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
||||||
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
||||||
|
|
||||||
|
|
||||||
## Load balancing
|
## Load balancing
|
||||||
|
|
||||||
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls. In the latter case `vmauth` balances load among the configured urls in a round-robin manner. This feature is useful for balancing the load among multiple `vmselect` and/or `vminsert` nodes in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls. In the latter case `vmauth` balances load among the configured urls in a round-robin manner. This feature is useful for balancing the load among multiple `vmselect` and/or `vminsert` nodes in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||||
|
|
||||||
|
|
||||||
## Auth config
|
## Auth config
|
||||||
|
|
||||||
`-auth.config` is represented in the following simple `yml` format:
|
`-auth.config` is represented in the following simple `yml` format:
|
||||||
|
@ -124,7 +122,6 @@ users:
|
||||||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||||
This may be useful for passing secrets to the config.
|
This may be useful for passing secrets to the config.
|
||||||
|
|
||||||
|
|
||||||
## Security
|
## Security
|
||||||
|
|
||||||
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
|
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
|
||||||
|
@ -142,7 +139,6 @@ Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termi
|
||||||
|
|
||||||
It is recommended protecting `/-/reload` endpoint with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload.
|
It is recommended protecting `/-/reload` endpoint with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload.
|
||||||
|
|
||||||
|
|
||||||
## Monitoring
|
## Monitoring
|
||||||
|
|
||||||
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
||||||
|
@ -161,7 +157,6 @@ users:
|
||||||
|
|
||||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
||||||
|
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
||||||
|
@ -187,7 +182,6 @@ by setting it via `<ROOT_IMAGE>` environment variable. For example, the followin
|
||||||
ROOT_IMAGE=scratch make package-vmauth
|
ROOT_IMAGE=scratch make package-vmauth
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Profiling
|
## Profiling
|
||||||
|
|
||||||
`vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
`vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||||
|
@ -208,7 +202,6 @@ The command for collecting CPU profile waits for 30 seconds before returning.
|
||||||
|
|
||||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||||
|
|
||||||
|
|
||||||
## Advanced usage
|
## Advanced usage
|
||||||
|
|
||||||
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
|
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
|
||||||
|
@ -221,7 +214,7 @@ vmauth authenticates and authorizes incoming requests and proxies them to Victor
|
||||||
See the docs at https://docs.victoriametrics.com/vmauth.html .
|
See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||||
|
|
||||||
-auth.config string
|
-auth.config string
|
||||||
Path to auth config. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
Path to auth config. It can point either to local file or to http url. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
||||||
-enableTCP6
|
-enableTCP6
|
||||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||||
-envflag.enable
|
-envflag.enable
|
||||||
|
@ -249,7 +242,7 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||||
-httpListenAddr string
|
-httpListenAddr string
|
||||||
TCP address to listen for http connections (default ":8427")
|
TCP address to listen for http connections (default ":8427")
|
||||||
-logInvalidAuthTokens
|
-logInvalidAuthTokens
|
||||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmagent_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
Whether to log requests with invalid auth tokens. Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||||
-loggerDisableTimestamps
|
-loggerDisableTimestamps
|
||||||
Whether to disable writing timestamps in logs
|
Whether to disable writing timestamps in logs
|
||||||
-loggerErrorsPerSecondLimit int
|
-loggerErrorsPerSecondLimit int
|
||||||
|
@ -272,9 +265,9 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||||
-memory.allowedPercent float
|
-memory.allowedPercent float
|
||||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||||
-metricsAuthKey string
|
-metricsAuthKey string
|
||||||
Auth key for /metrics. It overrides httpAuth settings
|
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||||
-pprofAuthKey string
|
-pprofAuthKey string
|
||||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||||
-reloadAuthKey string
|
-reloadAuthKey string
|
||||||
Auth key for /-/reload http endpoint. It must be passed as authKey=...
|
Auth key for /-/reload http endpoint. It must be passed as authKey=...
|
||||||
-tls
|
-tls
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -14,6 +13,7 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
|
@ -21,8 +21,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
authConfigPath = flag.String("auth.config", "", "Path to auth config. See https://docs.victoriametrics.com/vmauth.html "+
|
authConfigPath = flag.String("auth.config", "", "Path to auth config. It can point either to local file or to http url. "+
|
||||||
"for details on the format of this auth config")
|
"See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config")
|
||||||
)
|
)
|
||||||
|
|
||||||
// AuthConfig represents auth config.
|
// AuthConfig represents auth config.
|
||||||
|
@ -237,9 +237,9 @@ var authConfigWG sync.WaitGroup
|
||||||
var stopCh chan struct{}
|
var stopCh chan struct{}
|
||||||
|
|
||||||
func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := fs.ReadFileOrHTTP(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read %q: %w", path, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
m, err := parseAuthConfig(data)
|
m, err := parseAuthConfig(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -73,7 +73,6 @@ func main() {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("starting http server for exporting metrics at http://%q/metrics", *httpListenAddr)
|
|
||||||
go httpserver.Serve(*httpListenAddr, nil)
|
go httpserver.Serve(*httpListenAddr, nil)
|
||||||
|
|
||||||
srcFS, err := newSrcFS()
|
srcFS, err := newSrcFS()
|
||||||
|
|
|
@ -43,7 +43,8 @@ var (
|
||||||
"Usually :4242 must be set. Doesn't work if empty")
|
"Usually :4242 must be set. Doesn't work if empty")
|
||||||
opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty")
|
opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty")
|
||||||
configAuthKey = flag.String("configAuthKey", "", "Authorization key for accessing /config page. It must be passed via authKey query arg")
|
configAuthKey = flag.String("configAuthKey", "", "Authorization key for accessing /config page. It must be passed via authKey query arg")
|
||||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped")
|
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented")
|
||||||
|
maxLabelValueLen = flag.Int("maxLabelValueLen", 16*1024, "The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented")
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -57,6 +58,7 @@ var (
|
||||||
func Init() {
|
func Init() {
|
||||||
relabel.Init()
|
relabel.Init()
|
||||||
storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries)
|
storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries)
|
||||||
|
storage.SetMaxLabelValueLen(*maxLabelValueLen)
|
||||||
common.StartUnmarshalWorkers()
|
common.StartUnmarshalWorkers()
|
||||||
writeconcurrencylimiter.Init()
|
writeconcurrencylimiter.Init()
|
||||||
if len(*graphiteListenAddr) > 0 {
|
if len(*graphiteListenAddr) > 0 {
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
relabelConfig = flag.String("relabelConfig", "", "Optional path to a file with relabeling rules, which are applied to all the ingested metrics. "+
|
relabelConfig = flag.String("relabelConfig", "", "Optional path to a file with relabeling rules, which are applied to all the ingested metrics. "+
|
||||||
|
"The path can point either to local file or to http url. "+
|
||||||
"See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal")
|
"See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal")
|
||||||
relabelDebug = flag.Bool("relabelDebug", false, "Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, "+
|
relabelDebug = flag.Bool("relabelDebug", false, "Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, "+
|
||||||
"then the metrics aren't sent to storage. This is useful for debugging the relabeling configs")
|
"then the metrics aren't sent to storage. This is useful for debugging the relabeling configs")
|
||||||
|
|
|
@ -36,7 +36,6 @@ func main() {
|
||||||
buildinfo.Init()
|
buildinfo.Init()
|
||||||
logger.Init()
|
logger.Init()
|
||||||
|
|
||||||
logger.Infof("starting http server for exporting metrics at http://%q/metrics", *httpListenAddr)
|
|
||||||
go httpserver.Serve(*httpListenAddr, nil)
|
go httpserver.Serve(*httpListenAddr, nil)
|
||||||
|
|
||||||
srcFS, err := newSrcFS()
|
srcFS, err := newSrcFS()
|
||||||
|
|
|
@ -32,7 +32,7 @@ func TagsDelSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
||||||
var row graphiteparser.Row
|
var row graphiteparser.Row
|
||||||
var tagsPool []graphiteparser.Tag
|
var tagsPool []graphiteparser.Tag
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -53,8 +53,8 @@ func TagsDelSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
||||||
Value: []byte(tag.Value),
|
Value: []byte(tag.Value),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
tfs = append(tfs, etfs...)
|
tfss := joinTagFilterss(tfs, etfs)
|
||||||
sq := storage.NewSearchQuery(0, ct, [][]storage.TagFilter{tfs})
|
sq := storage.NewSearchQuery(0, ct, tfss)
|
||||||
n, err := netstorage.DeleteSeries(sq, deadline)
|
n, err := netstorage.DeleteSeries(sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot delete series for %q: %w", sq, err)
|
return fmt.Errorf("cannot delete series for %q: %w", sq, err)
|
||||||
|
@ -181,7 +181,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, w http.ResponseWriter, r
|
||||||
valuePrefix := r.FormValue("valuePrefix")
|
valuePrefix := r.FormValue("valuePrefix")
|
||||||
exprs := r.Form["expr"]
|
exprs := r.Form["expr"]
|
||||||
var tagValues []string
|
var tagValues []string
|
||||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -266,7 +266,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, w http.ResponseWriter, r *
|
||||||
tagPrefix := r.FormValue("tagPrefix")
|
tagPrefix := r.FormValue("tagPrefix")
|
||||||
exprs := r.Form["expr"]
|
exprs := r.Form["expr"]
|
||||||
var labels []string
|
var labels []string
|
||||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -345,7 +345,7 @@ func TagsFindSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.R
|
||||||
if len(exprs) == 0 {
|
if len(exprs) == 0 {
|
||||||
return fmt.Errorf("expecting at least one `expr` query arg")
|
return fmt.Errorf("expecting at least one `expr` query arg")
|
||||||
}
|
}
|
||||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -474,14 +474,14 @@ func getInt(r *http.Request, argName string) (int, error) {
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSearchQueryForExprs(startTime time.Time, etfs []storage.TagFilter, exprs []string) (*storage.SearchQuery, error) {
|
func getSearchQueryForExprs(startTime time.Time, etfs [][]storage.TagFilter, exprs []string) (*storage.SearchQuery, error) {
|
||||||
tfs, err := exprsToTagFilters(exprs)
|
tfs, err := exprsToTagFilters(exprs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
tfs = append(tfs, etfs...)
|
tfss := joinTagFilterss(tfs, etfs)
|
||||||
sq := storage.NewSearchQuery(0, ct, [][]storage.TagFilter{tfs})
|
sq := storage.NewSearchQuery(0, ct, tfss)
|
||||||
return sq, nil
|
return sq, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -524,3 +524,7 @@ func parseFilterExpr(s string) (*storage.TagFilter, error) {
|
||||||
IsRegexp: isRegexp,
|
IsRegexp: isRegexp,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func joinTagFilterss(tfs []storage.TagFilter, extraFilters [][]storage.TagFilter) [][]storage.TagFilter {
|
||||||
|
return searchutils.JoinTagFilterss([][]storage.TagFilter{tfs}, extraFilters)
|
||||||
|
}
|
||||||
|
|
|
@ -283,11 +283,11 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
if start >= end {
|
if start >= end {
|
||||||
end = start + defaultStep
|
end = start + defaultStep
|
||||||
}
|
}
|
||||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := exportHandler(w, matches, etf, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
if err := exportHandler(w, matches, etfs, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||||
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
|
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -295,7 +295,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||||
|
|
||||||
func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFilter, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
func exportHandler(w http.ResponseWriter, matches []string, etfs [][]storage.TagFilter, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
||||||
writeResponseFunc := WriteExportStdResponse
|
writeResponseFunc := WriteExportStdResponse
|
||||||
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||||
bb := quicktemplate.AcquireByteBuffer()
|
bb := quicktemplate.AcquireByteBuffer()
|
||||||
|
@ -352,7 +352,7 @@ func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||||
|
|
||||||
sq := storage.NewSearchQuery(start, end, tagFilterss)
|
sq := storage.NewSearchQuery(start, end, tagFilterss)
|
||||||
w.Header().Set("Content-Type", contentType)
|
w.Header().Set("Content-Type", contentType)
|
||||||
|
@ -478,13 +478,13 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse form values: %w", err)
|
return fmt.Errorf("cannot parse form values: %w", err)
|
||||||
}
|
}
|
||||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
matches := getMatchesFromRequest(r)
|
matches := getMatchesFromRequest(r)
|
||||||
var labelValues []string
|
var labelValues []string
|
||||||
if len(matches) == 0 && len(etf) == 0 {
|
if len(matches) == 0 && len(etfs) == 0 {
|
||||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||||
var err error
|
var err error
|
||||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||||
|
@ -527,7 +527,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
labelValues, err = labelValuesWithMatches(labelName, matches, etf, start, end, deadline)
|
labelValues, err = labelValuesWithMatches(labelName, matches, etfs, start, end, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
||||||
}
|
}
|
||||||
|
@ -543,7 +543,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func labelValuesWithMatches(labelName string, matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
func labelValuesWithMatches(labelName string, matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -564,7 +564,7 @@ func labelValuesWithMatches(labelName string, matches []string, etf []storage.Ta
|
||||||
if start >= end {
|
if start >= end {
|
||||||
end = start + defaultStep
|
end = start + defaultStep
|
||||||
}
|
}
|
||||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||||
if len(tagFilterss) == 0 {
|
if len(tagFilterss) == 0 {
|
||||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||||
}
|
}
|
||||||
|
@ -648,7 +648,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse form values: %w", err)
|
return fmt.Errorf("cannot parse form values: %w", err)
|
||||||
}
|
}
|
||||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -679,13 +679,13 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
topN = n
|
topN = n
|
||||||
}
|
}
|
||||||
var status *storage.TSDBStatus
|
var status *storage.TSDBStatus
|
||||||
if len(matches) == 0 && len(etf) == 0 {
|
if len(matches) == 0 && len(etfs) == 0 {
|
||||||
status, err = netstorage.GetTSDBStatusForDate(deadline, date, topN)
|
status, err = netstorage.GetTSDBStatusForDate(deadline, date, topN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
status, err = tsdbStatusWithMatches(matches, etf, date, topN, deadline)
|
status, err = tsdbStatusWithMatches(matches, etfs, date, topN, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain tsdb status with matches for date=%d, topN=%d: %w", date, topN, err)
|
return fmt.Errorf("cannot obtain tsdb status with matches for date=%d, topN=%d: %w", date, topN, err)
|
||||||
}
|
}
|
||||||
|
@ -700,12 +700,12 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func tsdbStatusWithMatches(matches []string, etf []storage.TagFilter, date uint64, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
func tsdbStatusWithMatches(matches []string, etfs [][]storage.TagFilter, date uint64, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
||||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||||
if len(tagFilterss) == 0 {
|
if len(tagFilterss) == 0 {
|
||||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||||
}
|
}
|
||||||
|
@ -731,13 +731,13 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse form values: %w", err)
|
return fmt.Errorf("cannot parse form values: %w", err)
|
||||||
}
|
}
|
||||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
matches := getMatchesFromRequest(r)
|
matches := getMatchesFromRequest(r)
|
||||||
var labels []string
|
var labels []string
|
||||||
if len(matches) == 0 && len(etf) == 0 {
|
if len(matches) == 0 && len(etfs) == 0 {
|
||||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||||
var err error
|
var err error
|
||||||
labels, err = netstorage.GetLabels(deadline)
|
labels, err = netstorage.GetLabels(deadline)
|
||||||
|
@ -778,7 +778,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
labels, err = labelsWithMatches(matches, etf, start, end, deadline)
|
labels, err = labelsWithMatches(matches, etfs, start, end, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
||||||
}
|
}
|
||||||
|
@ -794,7 +794,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
func labelsWithMatches(matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -802,7 +802,7 @@ func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int
|
||||||
if start >= end {
|
if start >= end {
|
||||||
end = start + defaultStep
|
end = start + defaultStep
|
||||||
}
|
}
|
||||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||||
if len(tagFilterss) == 0 {
|
if len(tagFilterss) == 0 {
|
||||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||||
}
|
}
|
||||||
|
@ -999,7 +999,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
if len(query) > maxQueryLen.N {
|
if len(query) > maxQueryLen.N {
|
||||||
return fmt.Errorf("too long query; got %d bytes; mustn't exceed `-search.maxQueryLen=%d` bytes", len(query), maxQueryLen.N)
|
return fmt.Errorf("too long query; got %d bytes; mustn't exceed `-search.maxQueryLen=%d` bytes", len(query), maxQueryLen.N)
|
||||||
}
|
}
|
||||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1014,7 +1014,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
if end < start {
|
if end < start {
|
||||||
end = start
|
end = start
|
||||||
}
|
}
|
||||||
if err := exportHandler(w, []string{childQuery}, etf, start, end, "promapi", 0, false, deadline); err != nil {
|
if err := exportHandler(w, []string{childQuery}, etfs, start, end, "promapi", 0, false, deadline); err != nil {
|
||||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
||||||
}
|
}
|
||||||
queryDuration.UpdateDuration(startTime)
|
queryDuration.UpdateDuration(startTime)
|
||||||
|
@ -1030,7 +1030,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
start -= offset
|
start -= offset
|
||||||
end := start
|
end := start
|
||||||
start = end - window
|
start = end - window
|
||||||
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etf); err != nil {
|
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etfs); err != nil {
|
||||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
||||||
}
|
}
|
||||||
queryDuration.UpdateDuration(startTime)
|
queryDuration.UpdateDuration(startTime)
|
||||||
|
@ -1055,7 +1055,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
Deadline: deadline,
|
Deadline: deadline,
|
||||||
LookbackDelta: lookbackDelta,
|
LookbackDelta: lookbackDelta,
|
||||||
RoundDigits: getRoundDigits(r),
|
RoundDigits: getRoundDigits(r),
|
||||||
EnforcedTagFilters: etf,
|
EnforcedTagFilterss: etfs,
|
||||||
}
|
}
|
||||||
result, err := promql.Exec(&ec, query, true)
|
result, err := promql.Exec(&ec, query, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1105,17 +1105,17 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etf); err != nil {
|
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etfs); err != nil {
|
||||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etf []storage.TagFilter) error {
|
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
mayCache := !searchutils.GetBool(r, "nocache")
|
mayCache := !searchutils.GetBool(r, "nocache")
|
||||||
lookbackDelta, err := getMaxLookback(r)
|
lookbackDelta, err := getMaxLookback(r)
|
||||||
|
@ -1146,7 +1146,7 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string,
|
||||||
MayCache: mayCache,
|
MayCache: mayCache,
|
||||||
LookbackDelta: lookbackDelta,
|
LookbackDelta: lookbackDelta,
|
||||||
RoundDigits: getRoundDigits(r),
|
RoundDigits: getRoundDigits(r),
|
||||||
EnforcedTagFilters: etf,
|
EnforcedTagFilterss: etfs,
|
||||||
}
|
}
|
||||||
result, err := promql.Exec(&ec, query, false)
|
result, err := promql.Exec(&ec, query, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1254,24 +1254,12 @@ func getMaxLookback(r *http.Request) (int64, error) {
|
||||||
return searchutils.GetDuration(r, "max_lookback", d)
|
return searchutils.GetDuration(r, "max_lookback", d)
|
||||||
}
|
}
|
||||||
|
|
||||||
func addEnforcedFiltersToTagFilterss(dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter) [][]storage.TagFilter {
|
|
||||||
if len(dstTfss) == 0 {
|
|
||||||
return [][]storage.TagFilter{
|
|
||||||
enforcedFilters,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := range dstTfss {
|
|
||||||
dstTfss[i] = append(dstTfss[i], enforcedFilters...)
|
|
||||||
}
|
|
||||||
return dstTfss
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
||||||
tagFilterss := make([][]storage.TagFilter, 0, len(matches))
|
tagFilterss := make([][]storage.TagFilter, 0, len(matches))
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
tagFilters, err := promql.ParseMetricSelector(match)
|
tagFilters, err := searchutils.ParseMetricSelector(match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse %q: %w", match, err)
|
return nil, fmt.Errorf("cannot parse matches[]=%s: %w", match, err)
|
||||||
}
|
}
|
||||||
tagFilterss = append(tagFilterss, tagFilters)
|
tagFilterss = append(tagFilterss, tagFilters)
|
||||||
}
|
}
|
||||||
|
@ -1287,11 +1275,11 @@ func getTagFilterssFromRequest(r *http.Request) ([][]storage.TagFilter, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||||
return tagFilterss, nil
|
return tagFilterss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRemoveEmptyValuesAndTimeseries(t *testing.T) {
|
func TestRemoveEmptyValuesAndTimeseries(t *testing.T) {
|
||||||
|
@ -196,38 +195,3 @@ func TestAdjustLastPoints(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// helper for tests
|
|
||||||
func tfFromKV(k, v string) storage.TagFilter {
|
|
||||||
return storage.TagFilter{
|
|
||||||
Key: []byte(k),
|
|
||||||
Value: []byte(v),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_addEnforcedFiltersToTagFilterss(t *testing.T) {
|
|
||||||
f := func(t *testing.T, dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter, want [][]storage.TagFilter) {
|
|
||||||
t.Helper()
|
|
||||||
got := addEnforcedFiltersToTagFilterss(dstTfss, enforcedFilters)
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Fatalf("unxpected result for addEnforcedFiltersToTagFilterss, \ngot: %v,\n want: %v", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f(t, [][]storage.TagFilter{{tfFromKV("label", "value")}},
|
|
||||||
nil,
|
|
||||||
[][]storage.TagFilter{{tfFromKV("label", "value")}})
|
|
||||||
|
|
||||||
f(t, nil,
|
|
||||||
[]storage.TagFilter{tfFromKV("ext-label", "ext-value")},
|
|
||||||
[][]storage.TagFilter{{tfFromKV("ext-label", "ext-value")}})
|
|
||||||
|
|
||||||
f(t, [][]storage.TagFilter{
|
|
||||||
{tfFromKV("l1", "v1")},
|
|
||||||
{tfFromKV("l2", "v2")},
|
|
||||||
},
|
|
||||||
[]storage.TagFilter{tfFromKV("ext-l1", "v2")},
|
|
||||||
[][]storage.TagFilter{
|
|
||||||
{tfFromKV("l1", "v1"), tfFromKV("ext-l1", "v2")},
|
|
||||||
{tfFromKV("l2", "v2"), tfFromKV("ext-l1", "v2")},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
|
@ -29,7 +29,6 @@ var aggrFuncs = map[string]aggrFunc{
|
||||||
"geomean": newAggrFunc(aggrFuncGeomean),
|
"geomean": newAggrFunc(aggrFuncGeomean),
|
||||||
"group": newAggrFunc(aggrFuncGroup),
|
"group": newAggrFunc(aggrFuncGroup),
|
||||||
"histogram": newAggrFunc(aggrFuncHistogram),
|
"histogram": newAggrFunc(aggrFuncHistogram),
|
||||||
"limit_offset": aggrFuncLimitOffset,
|
|
||||||
"limitk": aggrFuncLimitK,
|
"limitk": aggrFuncLimitK,
|
||||||
"mad": newAggrFunc(aggrFuncMAD),
|
"mad": newAggrFunc(aggrFuncMAD),
|
||||||
"max": newAggrFunc(aggrFuncMax),
|
"max": newAggrFunc(aggrFuncMax),
|
||||||
|
@ -1005,37 +1004,12 @@ func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||||
if len(limits) > 0 {
|
if len(limits) > 0 {
|
||||||
limit = int(limits[0])
|
limit = int(limits[0])
|
||||||
}
|
}
|
||||||
afe := newLimitOffsetAggrFunc(limit, 0)
|
|
||||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func aggrFuncLimitOffset(afa *aggrFuncArg) ([]*timeseries, error) {
|
|
||||||
args := afa.args
|
|
||||||
if err := expectTransformArgsNum(args, 3); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
limit, err := getIntNumber(args[0], 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot obtain limit arg: %w", err)
|
|
||||||
}
|
|
||||||
offset, err := getIntNumber(args[1], 1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot obtain offset arg: %w", err)
|
|
||||||
}
|
|
||||||
afe := newLimitOffsetAggrFunc(limit, offset)
|
|
||||||
return aggrFuncExt(afe, args[2], &afa.ae.Modifier, afa.ae.Limit, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLimitOffsetAggrFunc(limit, offset int) func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
|
||||||
if offset < 0 {
|
|
||||||
offset = 0
|
|
||||||
}
|
|
||||||
if limit < 0 {
|
if limit < 0 {
|
||||||
limit = 0
|
limit = 0
|
||||||
}
|
}
|
||||||
return func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||||
// Sort series by metricName hash in order to get consistent set of output series
|
// Sort series by metricName hash in order to get consistent set of output series
|
||||||
// across multiple calls to limitk() and limit_offset() functions.
|
// across multiple calls to limitk() function.
|
||||||
// Sort series by hash in order to guarantee uniform selection across series.
|
// Sort series by hash in order to guarantee uniform selection across series.
|
||||||
type hashSeries struct {
|
type hashSeries struct {
|
||||||
h uint64
|
h uint64
|
||||||
|
@ -1056,15 +1030,12 @@ func newLimitOffsetAggrFunc(limit, offset int) func(tss []*timeseries, modifier
|
||||||
for i, hs := range hss {
|
for i, hs := range hss {
|
||||||
tss[i] = hs.ts
|
tss[i] = hs.ts
|
||||||
}
|
}
|
||||||
if offset > len(tss) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
tss = tss[offset:]
|
|
||||||
if limit < len(tss) {
|
if limit < len(tss) {
|
||||||
tss = tss[:limit]
|
tss = tss[:limit]
|
||||||
}
|
}
|
||||||
return tss
|
return tss
|
||||||
}
|
}
|
||||||
|
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHash(d *xxhash.Digest, mn *storage.MetricName) uint64 {
|
func getHash(d *xxhash.Digest, mn *storage.MetricName) uint64 {
|
||||||
|
|
|
@ -104,8 +104,8 @@ type EvalConfig struct {
|
||||||
// How many decimal digits after the point to leave in response.
|
// How many decimal digits after the point to leave in response.
|
||||||
RoundDigits int
|
RoundDigits int
|
||||||
|
|
||||||
// EnforcedTagFilters used for apply additional label filters to query.
|
// EnforcedTagFilterss may contain additional label filters to use in the query.
|
||||||
EnforcedTagFilters []storage.TagFilter
|
EnforcedTagFilterss [][]storage.TagFilter
|
||||||
|
|
||||||
timestamps []int64
|
timestamps []int64
|
||||||
timestampsOnce sync.Once
|
timestampsOnce sync.Once
|
||||||
|
@ -121,7 +121,7 @@ func newEvalConfig(src *EvalConfig) *EvalConfig {
|
||||||
ec.MayCache = src.MayCache
|
ec.MayCache = src.MayCache
|
||||||
ec.LookbackDelta = src.LookbackDelta
|
ec.LookbackDelta = src.LookbackDelta
|
||||||
ec.RoundDigits = src.RoundDigits
|
ec.RoundDigits = src.RoundDigits
|
||||||
ec.EnforcedTagFilters = src.EnforcedTagFilters
|
ec.EnforcedTagFilterss = src.EnforcedTagFilterss
|
||||||
|
|
||||||
// do not copy src.timestamps - they must be generated again.
|
// do not copy src.timestamps - they must be generated again.
|
||||||
return &ec
|
return &ec
|
||||||
|
@ -672,16 +672,15 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the remaining part of the result.
|
// Fetch the remaining part of the result.
|
||||||
tfs := toTagFilters(me.LabelFilters)
|
tfs := searchutils.ToTagFilters(me.LabelFilters)
|
||||||
// append external filters.
|
tfss := searchutils.JoinTagFilterss([][]storage.TagFilter{tfs}, ec.EnforcedTagFilterss)
|
||||||
tfs = append(tfs, ec.EnforcedTagFilters...)
|
|
||||||
minTimestamp := start - maxSilenceInterval
|
minTimestamp := start - maxSilenceInterval
|
||||||
if window > ec.Step {
|
if window > ec.Step {
|
||||||
minTimestamp -= window
|
minTimestamp -= window
|
||||||
} else {
|
} else {
|
||||||
minTimestamp -= ec.Step
|
minTimestamp -= ec.Step
|
||||||
}
|
}
|
||||||
sq := storage.NewSearchQuery(minTimestamp, ec.End, [][]storage.TagFilter{tfs})
|
sq := storage.NewSearchQuery(minTimestamp, ec.End, tfss)
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, true, ec.Deadline)
|
rss, err := netstorage.ProcessSearchQuery(sq, true, ec.Deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -877,26 +876,6 @@ func mulNoOverflow(a, b int64) int64 {
|
||||||
return a * b
|
return a * b
|
||||||
}
|
}
|
||||||
|
|
||||||
func toTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
|
||||||
tfs := make([]storage.TagFilter, len(lfs))
|
|
||||||
for i := range lfs {
|
|
||||||
toTagFilter(&tfs[i], &lfs[i])
|
|
||||||
}
|
|
||||||
return tfs
|
|
||||||
}
|
|
||||||
|
|
||||||
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
|
||||||
if src.Label != "__name__" {
|
|
||||||
dst.Key = []byte(src.Label)
|
|
||||||
} else {
|
|
||||||
// This is required for storage.Search.
|
|
||||||
dst.Key = nil
|
|
||||||
}
|
|
||||||
dst.Value = []byte(src.Value)
|
|
||||||
dst.IsRegexp = src.IsRegexp
|
|
||||||
dst.IsNegative = src.IsNegative
|
|
||||||
}
|
|
||||||
|
|
||||||
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
|
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||||
if *noStaleMarkers || funcName == "default_rollup" {
|
if *noStaleMarkers || funcName == "default_rollup" {
|
||||||
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
|
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
|
||||||
|
|
|
@ -2055,6 +2055,24 @@ func TestExecSuccess(t *testing.T) {
|
||||||
resultExpected := []netstorage.Result{r1, r2, r3}
|
resultExpected := []netstorage.Result{r1, r2, r3}
|
||||||
f(q, resultExpected)
|
f(q, resultExpected)
|
||||||
})
|
})
|
||||||
|
t.Run(`limit_offset`, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
q := `limit_offset(1, 1, sort_by_label((
|
||||||
|
label_set(time()*1, "foo", "y"),
|
||||||
|
label_set(time()*2, "foo", "a"),
|
||||||
|
label_set(time()*3, "foo", "x"),
|
||||||
|
), "foo"))`
|
||||||
|
r := netstorage.Result{
|
||||||
|
Values: []float64{3000, 3600, 4200, 4800, 5400, 6000},
|
||||||
|
Timestamps: timestampsExpected,
|
||||||
|
}
|
||||||
|
r.MetricName.Tags = []storage.Tag{{
|
||||||
|
Key: []byte("foo"),
|
||||||
|
Value: []byte("x"),
|
||||||
|
}}
|
||||||
|
resultExpected := []netstorage.Result{r}
|
||||||
|
f(q, resultExpected)
|
||||||
|
})
|
||||||
t.Run(`sum(label_graphite_group)`, func(t *testing.T) {
|
t.Run(`sum(label_graphite_group)`, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
q := `sort(sum by (__name__) (
|
q := `sort(sum by (__name__) (
|
||||||
|
@ -5161,21 +5179,6 @@ func TestExecSuccess(t *testing.T) {
|
||||||
resultExpected := []netstorage.Result{r1}
|
resultExpected := []netstorage.Result{r1}
|
||||||
f(q, resultExpected)
|
f(q, resultExpected)
|
||||||
})
|
})
|
||||||
t.Run(`limit_offset()`, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
q := `limit_offset(1, 0, (label_set(10, "foo", "bar"), label_set(time()/150, "xbaz", "sss")))`
|
|
||||||
r1 := netstorage.Result{
|
|
||||||
MetricName: metricNameExpected,
|
|
||||||
Values: []float64{10, 10, 10, 10, 10, 10},
|
|
||||||
Timestamps: timestampsExpected,
|
|
||||||
}
|
|
||||||
r1.MetricName.Tags = []storage.Tag{{
|
|
||||||
Key: []byte("foo"),
|
|
||||||
Value: []byte("bar"),
|
|
||||||
}}
|
|
||||||
resultExpected := []netstorage.Result{r1}
|
|
||||||
f(q, resultExpected)
|
|
||||||
})
|
|
||||||
t.Run(`limitk(10)`, func(t *testing.T) {
|
t.Run(`limitk(10)`, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
q := `sort(limitk(10, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss")))`
|
q := `sort(limitk(10, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss")))`
|
||||||
|
|
|
@ -1,9 +1,6 @@
|
||||||
package promql
|
package promql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
|
||||||
"github.com/VictoriaMetrics/metricsql"
|
"github.com/VictoriaMetrics/metricsql"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -43,21 +40,3 @@ func IsMetricSelectorWithRollup(s string) (childQuery string, window, offset *me
|
||||||
wrappedQuery := me.AppendString(nil)
|
wrappedQuery := me.AppendString(nil)
|
||||||
return string(wrappedQuery), re.Window, re.Offset
|
return string(wrappedQuery), re.Window, re.Offset
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseMetricSelector parses s containing PromQL metric selector
|
|
||||||
// and returns the corresponding LabelFilters.
|
|
||||||
func ParseMetricSelector(s string) ([]storage.TagFilter, error) {
|
|
||||||
expr, err := parsePromQLWithCache(s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
me, ok := expr.(*metricsql.MetricExpr)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
|
||||||
}
|
|
||||||
if len(me.LabelFilters) == 0 {
|
|
||||||
return nil, fmt.Errorf("labelFilters cannot be empty")
|
|
||||||
}
|
|
||||||
tfs := toTagFilters(me.LabelFilters)
|
|
||||||
return tfs, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,50 +0,0 @@
|
||||||
package promql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseMetricSelectorSuccess(t *testing.T) {
|
|
||||||
f := func(s string) {
|
|
||||||
t.Helper()
|
|
||||||
tfs, err := ParseMetricSelector(s)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
|
||||||
}
|
|
||||||
if tfs == nil {
|
|
||||||
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f("foo")
|
|
||||||
f(":foo")
|
|
||||||
f(" :fo:bar.baz")
|
|
||||||
f(`a{}`)
|
|
||||||
f(`{foo="bar"}`)
|
|
||||||
f(`{:f:oo=~"bar.+"}`)
|
|
||||||
f(`foo {bar != "baz"}`)
|
|
||||||
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
|
||||||
f(`(foo)`)
|
|
||||||
f(`\п\р\и\в\е\т{\ы="111"}`)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseMetricSelectorError(t *testing.T) {
|
|
||||||
f := func(s string) {
|
|
||||||
t.Helper()
|
|
||||||
tfs, err := ParseMetricSelector(s)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
|
||||||
}
|
|
||||||
if tfs != nil {
|
|
||||||
t.Fatalf("expecting nil tfs when parsing %q", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f("")
|
|
||||||
f(`{}`)
|
|
||||||
f(`foo bar`)
|
|
||||||
f(`foo+bar`)
|
|
||||||
f(`sum(bar)`)
|
|
||||||
f(`x{y}`)
|
|
||||||
f(`x{y+z}`)
|
|
||||||
f(`foo[5m]`)
|
|
||||||
f(`foo offset 5m`)
|
|
||||||
}
|
|
|
@ -194,7 +194,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
defer bbPool.Put(bb)
|
defer bbPool.Put(bb)
|
||||||
|
|
||||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||||
if len(metainfoBuf) == 0 {
|
if len(metainfoBuf) == 0 {
|
||||||
return nil, ec.Start
|
return nil, ec.Start
|
||||||
|
@ -214,7 +214,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
if len(compressedResultBuf.B) == 0 {
|
if len(compressedResultBuf.B) == 0 {
|
||||||
mi.RemoveKey(key)
|
mi.RemoveKey(key)
|
||||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
||||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||||
rrc.c.Set(bb.B, metainfoBuf)
|
rrc.c.Set(bb.B, metainfoBuf)
|
||||||
return nil, ec.Start
|
return nil, ec.Start
|
||||||
}
|
}
|
||||||
|
@ -317,7 +317,7 @@ func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
bb.B = key.Marshal(bb.B[:0])
|
bb.B = key.Marshal(bb.B[:0])
|
||||||
rrc.c.SetBig(bb.B, compressedResultBuf.B)
|
rrc.c.SetBig(bb.B, compressedResultBuf.B)
|
||||||
|
|
||||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||||
var mi rollupResultCacheMetainfo
|
var mi rollupResultCacheMetainfo
|
||||||
if len(metainfoBuf) > 0 {
|
if len(metainfoBuf) > 0 {
|
||||||
|
@ -347,15 +347,20 @@ var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
|
||||||
// Increment this value every time the format of the cache changes.
|
// Increment this value every time the format of the cache changes.
|
||||||
const rollupResultCacheVersion = 8
|
const rollupResultCacheVersion = 8
|
||||||
|
|
||||||
func marshalRollupResultCacheKey(dst []byte, expr metricsql.Expr, window, step int64, filters []storage.TagFilter) []byte {
|
func marshalRollupResultCacheKey(dst []byte, expr metricsql.Expr, window, step int64, etfs [][]storage.TagFilter) []byte {
|
||||||
dst = append(dst, rollupResultCacheVersion)
|
dst = append(dst, rollupResultCacheVersion)
|
||||||
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix)
|
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix)
|
||||||
dst = encoding.MarshalInt64(dst, window)
|
dst = encoding.MarshalInt64(dst, window)
|
||||||
dst = encoding.MarshalInt64(dst, step)
|
dst = encoding.MarshalInt64(dst, step)
|
||||||
dst = expr.AppendString(dst)
|
dst = expr.AppendString(dst)
|
||||||
for _, f := range filters {
|
for i, etf := range etfs {
|
||||||
|
for _, f := range etf {
|
||||||
dst = f.Marshal(dst)
|
dst = f.Marshal(dst)
|
||||||
}
|
}
|
||||||
|
if i+1 < len(etfs) {
|
||||||
|
dst = append(dst, '|')
|
||||||
|
}
|
||||||
|
}
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
|
@ -69,6 +70,7 @@ var transformFuncs = map[string]transformFunc{
|
||||||
"label_transform": transformLabelTransform,
|
"label_transform": transformLabelTransform,
|
||||||
"label_uppercase": transformLabelUppercase,
|
"label_uppercase": transformLabelUppercase,
|
||||||
"label_value": transformLabelValue,
|
"label_value": transformLabelValue,
|
||||||
|
"limit_offset": transformLimitOffset,
|
||||||
"ln": newTransformFuncOneArg(transformLn),
|
"ln": newTransformFuncOneArg(transformLn),
|
||||||
"log2": newTransformFuncOneArg(transformLog2),
|
"log2": newTransformFuncOneArg(transformLog2),
|
||||||
"log10": newTransformFuncOneArg(transformLog10),
|
"log10": newTransformFuncOneArg(transformLog10),
|
||||||
|
@ -218,7 +220,7 @@ func getAbsentTimeseries(ec *EvalConfig, arg metricsql.Expr) []*timeseries {
|
||||||
if !ok {
|
if !ok {
|
||||||
return rvs
|
return rvs
|
||||||
}
|
}
|
||||||
tfs := toTagFilters(me.LabelFilters)
|
tfs := searchutils.ToTagFilters(me.LabelFilters)
|
||||||
for i := range tfs {
|
for i := range tfs {
|
||||||
tf := &tfs[i]
|
tf := &tfs[i]
|
||||||
if len(tf.Key) == 0 {
|
if len(tf.Key) == 0 {
|
||||||
|
@ -1770,6 +1772,29 @@ func transformLabelGraphiteGroup(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
|
|
||||||
var dotSeparator = []byte(".")
|
var dotSeparator = []byte(".")
|
||||||
|
|
||||||
|
func transformLimitOffset(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
|
args := tfa.args
|
||||||
|
if err := expectTransformArgsNum(args, 3); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
limit, err := getIntNumber(args[0], 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot obtain limit arg: %w", err)
|
||||||
|
}
|
||||||
|
offset, err := getIntNumber(args[1], 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot obtain offset arg: %w", err)
|
||||||
|
}
|
||||||
|
rvs := args[2]
|
||||||
|
if len(rvs) >= offset {
|
||||||
|
rvs = rvs[offset:]
|
||||||
|
}
|
||||||
|
if len(rvs) > limit {
|
||||||
|
rvs = rvs[:limit]
|
||||||
|
}
|
||||||
|
return rvs, nil
|
||||||
|
}
|
||||||
|
|
||||||
func transformLn(v float64) float64 {
|
func transformLn(v float64) float64 {
|
||||||
return math.Log(v)
|
return math.Log(v)
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,9 +9,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/metricsql"
|
"github.com/VictoriaMetrics/metricsql"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -198,15 +197,17 @@ func (d *Deadline) String() string {
|
||||||
return fmt.Sprintf("%.3f seconds (elapsed %.3f seconds); the timeout can be adjusted with `%s` command-line flag", d.timeout.Seconds(), elapsed.Seconds(), d.flagHint)
|
return fmt.Sprintf("%.3f seconds (elapsed %.3f seconds); the timeout can be adjusted with `%s` command-line flag", d.timeout.Seconds(), elapsed.Seconds(), d.flagHint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEnforcedTagFiltersFromRequest returns additional filters from request.
|
// GetExtraTagFilters returns additional label filters from request.
|
||||||
func GetEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, error) {
|
//
|
||||||
// fast path.
|
// Label filters can be present in extra_label and extra_filters[] query args.
|
||||||
extraLabels := r.Form["extra_label"]
|
// They are combined. For example, the following query args:
|
||||||
if len(extraLabels) == 0 {
|
// extra_label=t1=v1&extra_label=t2=v2&extra_filters[]={env="prod",team="devops"}&extra_filters={env=~"dev|staging",team!="devops"}
|
||||||
return nil, nil
|
// should be translated to the following filters joined with "or":
|
||||||
}
|
// {env="prod",team="devops",t1="v1",t2="v2"}
|
||||||
tagFilters := make([]storage.TagFilter, 0, len(extraLabels))
|
// {env=~"dev|staging",team!="devops",t1="v1",t2="v2"}
|
||||||
for _, match := range extraLabels {
|
func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
|
||||||
|
var tagFilters []storage.TagFilter
|
||||||
|
for _, match := range r.Form["extra_label"] {
|
||||||
tmp := strings.SplitN(match, "=", 2)
|
tmp := strings.SplitN(match, "=", 2)
|
||||||
if len(tmp) != 2 {
|
if len(tmp) != 2 {
|
||||||
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
|
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
|
||||||
|
@ -216,5 +217,79 @@ func GetEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, err
|
||||||
Value: []byte(tmp[1]),
|
Value: []byte(tmp[1]),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return tagFilters, nil
|
extraFilters := r.Form["extra_filters"]
|
||||||
|
extraFilters = append(extraFilters, r.Form["extra_filters[]"]...)
|
||||||
|
if len(extraFilters) == 0 {
|
||||||
|
if len(tagFilters) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return [][]storage.TagFilter{tagFilters}, nil
|
||||||
|
}
|
||||||
|
var etfs [][]storage.TagFilter
|
||||||
|
for _, extraFilter := range extraFilters {
|
||||||
|
tfs, err := ParseMetricSelector(extraFilter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot parse extra_filters=%s: %w", extraFilter, err)
|
||||||
|
}
|
||||||
|
tfs = append(tfs, tagFilters...)
|
||||||
|
etfs = append(etfs, tfs)
|
||||||
|
}
|
||||||
|
return etfs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// JoinTagFilterss adds etfs to every src filter and returns the result.
|
||||||
|
func JoinTagFilterss(src, etfs [][]storage.TagFilter) [][]storage.TagFilter {
|
||||||
|
if len(src) == 0 {
|
||||||
|
return etfs
|
||||||
|
}
|
||||||
|
if len(etfs) == 0 {
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
var dst [][]storage.TagFilter
|
||||||
|
for _, tf := range src {
|
||||||
|
for _, etf := range etfs {
|
||||||
|
tfs := append([]storage.TagFilter{}, tf...)
|
||||||
|
tfs = append(tfs, etf...)
|
||||||
|
dst = append(dst, tfs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseMetricSelector parses s containing PromQL metric selector and returns the corresponding LabelFilters.
|
||||||
|
func ParseMetricSelector(s string) ([]storage.TagFilter, error) {
|
||||||
|
expr, err := metricsql.Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
me, ok := expr.(*metricsql.MetricExpr)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
||||||
|
}
|
||||||
|
if len(me.LabelFilters) == 0 {
|
||||||
|
return nil, fmt.Errorf("labelFilters cannot be empty")
|
||||||
|
}
|
||||||
|
tfs := ToTagFilters(me.LabelFilters)
|
||||||
|
return tfs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToTagFilters converts lfs to a slice of storage.TagFilter
|
||||||
|
func ToTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
||||||
|
tfs := make([]storage.TagFilter, len(lfs))
|
||||||
|
for i := range lfs {
|
||||||
|
toTagFilter(&tfs[i], &lfs[i])
|
||||||
|
}
|
||||||
|
return tfs
|
||||||
|
}
|
||||||
|
|
||||||
|
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
||||||
|
if src.Label != "__name__" {
|
||||||
|
dst.Key = []byte(src.Label)
|
||||||
|
} else {
|
||||||
|
// This is required for storage.Search.
|
||||||
|
dst.Key = nil
|
||||||
|
}
|
||||||
|
dst.Value = []byte(src.Value)
|
||||||
|
dst.IsRegexp = src.IsRegexp
|
||||||
|
dst.IsNegative = src.IsNegative
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
|
@ -80,47 +81,238 @@ func TestGetTimeError(t *testing.T) {
|
||||||
f("292277025-08-18T07:12:54.999999998Z")
|
f("292277025-08-18T07:12:54.999999998Z")
|
||||||
}
|
}
|
||||||
|
|
||||||
// helper for tests
|
func TestGetExtraTagFilters(t *testing.T) {
|
||||||
func tfFromKV(k, v string) storage.TagFilter {
|
httpReqWithForm := func(qs string) *http.Request {
|
||||||
return storage.TagFilter{
|
q, err := url.ParseQuery(qs)
|
||||||
Key: []byte(k),
|
if err != nil {
|
||||||
Value: []byte(v),
|
t.Fatalf("unexpected error: %s", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetEnforcedTagFiltersFromRequest(t *testing.T) {
|
|
||||||
httpReqWithForm := func(tfs []string) *http.Request {
|
|
||||||
return &http.Request{
|
return &http.Request{
|
||||||
Form: map[string][]string{
|
Form: q,
|
||||||
"extra_label": tfs,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
f := func(t *testing.T, r *http.Request, want []storage.TagFilter, wantErr bool) {
|
f := func(t *testing.T, r *http.Request, want []string, wantErr bool) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
got, err := GetEnforcedTagFiltersFromRequest(r)
|
result, err := GetExtraTagFilters(r)
|
||||||
if (err != nil) != wantErr {
|
if (err != nil) != wantErr {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
got := tagFilterssToStrings(result)
|
||||||
if !reflect.DeepEqual(got, want) {
|
if !reflect.DeepEqual(got, want) {
|
||||||
t.Fatalf("unxpected result for getEnforcedTagFiltersFromRequest, \ngot: %v,\n want: %v", want, got)
|
t.Fatalf("unxpected result for GetExtraTagFilters\ngot: %s\nwant: %s", got, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
f(t, httpReqWithForm("extra_label=label=value"),
|
||||||
f(t, httpReqWithForm([]string{"label=value"}),
|
[]string{`{label="value"}`},
|
||||||
[]storage.TagFilter{
|
|
||||||
tfFromKV("label", "value"),
|
|
||||||
},
|
|
||||||
false)
|
|
||||||
|
|
||||||
f(t, httpReqWithForm([]string{"job=vmagent", "dc=gce"}),
|
|
||||||
[]storage.TagFilter{tfFromKV("job", "vmagent"), tfFromKV("dc", "gce")},
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
f(t, httpReqWithForm([]string{"bad_filter"}),
|
f(t, httpReqWithForm("extra_label=job=vmagent&extra_label=dc=gce"),
|
||||||
|
[]string{`{job="vmagent",dc="gce"}`},
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
f(t, httpReqWithForm(`extra_filters={foo="bar"}`),
|
||||||
|
[]string{`{foo="bar"}`},
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
f(t, httpReqWithForm(`extra_filters={foo="bar"}&extra_filters[]={baz!~"aa",x=~"y"}`),
|
||||||
|
[]string{
|
||||||
|
`{foo="bar"}`,
|
||||||
|
`{baz!~"aa",x=~"y"}`,
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
f(t, httpReqWithForm(`extra_label=job=vmagent&extra_label=dc=gce&extra_filters={foo="bar"}`),
|
||||||
|
[]string{`{foo="bar",job="vmagent",dc="gce"}`},
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
f(t, httpReqWithForm(`extra_label=job=vmagent&extra_label=dc=gce&extra_filters[]={foo="bar"}&extra_filters[]={x=~"y|z",a="b"}`),
|
||||||
|
[]string{
|
||||||
|
`{foo="bar",job="vmagent",dc="gce"}`,
|
||||||
|
`{x=~"y|z",a="b",job="vmagent",dc="gce"}`,
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
f(t, httpReqWithForm("extra_label=bad_filter"),
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
f(t, &http.Request{},
|
f(t, httpReqWithForm(`extra_filters={bad_filter}`),
|
||||||
nil, false)
|
nil,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
f(t, httpReqWithForm(`extra_filters[]={bad_filter}`),
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
f(t, httpReqWithForm(""),
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseMetricSelectorSuccess(t *testing.T) {
|
||||||
|
f := func(s string) {
|
||||||
|
t.Helper()
|
||||||
|
tfs, err := ParseMetricSelector(s)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||||
|
}
|
||||||
|
if tfs == nil {
|
||||||
|
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f("foo")
|
||||||
|
f(":foo")
|
||||||
|
f(" :fo:bar.baz")
|
||||||
|
f(`a{}`)
|
||||||
|
f(`{foo="bar"}`)
|
||||||
|
f(`{:f:oo=~"bar.+"}`)
|
||||||
|
f(`foo {bar != "baz"}`)
|
||||||
|
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
||||||
|
f(`(foo)`)
|
||||||
|
f(`\п\р\и\в\е\т{\ы="111"}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseMetricSelectorError(t *testing.T) {
|
||||||
|
f := func(s string) {
|
||||||
|
t.Helper()
|
||||||
|
tfs, err := ParseMetricSelector(s)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||||
|
}
|
||||||
|
if tfs != nil {
|
||||||
|
t.Fatalf("expecting nil tfs when parsing %q", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f("")
|
||||||
|
f(`{}`)
|
||||||
|
f(`foo bar`)
|
||||||
|
f(`foo+bar`)
|
||||||
|
f(`sum(bar)`)
|
||||||
|
f(`x{y}`)
|
||||||
|
f(`x{y+z}`)
|
||||||
|
f(`foo[5m]`)
|
||||||
|
f(`foo offset 5m`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJoinTagFilterss(t *testing.T) {
|
||||||
|
f := func(t *testing.T, src, etfs [][]storage.TagFilter, want []string) {
|
||||||
|
t.Helper()
|
||||||
|
result := JoinTagFilterss(src, etfs)
|
||||||
|
got := tagFilterssToStrings(result)
|
||||||
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("unxpected result for JoinTagFilterss\ngot: %s\nwant: %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Single tag filter
|
||||||
|
f(t, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||||
|
}, nil, []string{
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||||
|
})
|
||||||
|
// Miltiple tag filters
|
||||||
|
f(t, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||||
|
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||||
|
}, nil, []string{
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||||
|
`{k5=~"v5"}`,
|
||||||
|
})
|
||||||
|
// Single extra filter
|
||||||
|
f(t, nil, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||||
|
}, []string{
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||||
|
})
|
||||||
|
// Multiple extra filters
|
||||||
|
f(t, nil, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||||
|
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||||
|
}, []string{
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||||
|
`{k5=~"v5"}`,
|
||||||
|
})
|
||||||
|
// Single tag filter and a single extra filter
|
||||||
|
f(t, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||||
|
}, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||||
|
}, []string{
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k5=~"v5"}`,
|
||||||
|
})
|
||||||
|
// Multiple tag filters and a single extra filter
|
||||||
|
f(t, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||||
|
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||||
|
}, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||||
|
}, []string{
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||||
|
`{k5=~"v5",k6=~"v6"}`,
|
||||||
|
})
|
||||||
|
// Single tag filter and multiple extra filters
|
||||||
|
f(t, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||||
|
}, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||||
|
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||||
|
}, []string{
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k5=~"v5"}`,
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||||
|
})
|
||||||
|
// Multiple tag filters and multiple extra filters
|
||||||
|
f(t, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||||
|
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||||
|
}, [][]storage.TagFilter{
|
||||||
|
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||||
|
mustParseMetricSelector(`{k7=~"v7"}`),
|
||||||
|
}, []string{
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||||
|
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k7=~"v7"}`,
|
||||||
|
`{k5=~"v5",k6=~"v6"}`,
|
||||||
|
`{k5=~"v5",k7=~"v7"}`,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustParseMetricSelector(s string) []storage.TagFilter {
|
||||||
|
tf, err := ParseMetricSelector(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("cannot parse %q: %w", s, err))
|
||||||
|
}
|
||||||
|
return tf
|
||||||
|
}
|
||||||
|
|
||||||
|
func tagFilterssToStrings(tfss [][]storage.TagFilter) []string {
|
||||||
|
var a []string
|
||||||
|
for _, tfs := range tfss {
|
||||||
|
a = append(a, tagFiltersToString(tfs))
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func tagFiltersToString(tfs []storage.TagFilter) string {
|
||||||
|
b := []byte("{")
|
||||||
|
for i, tf := range tfs {
|
||||||
|
b = append(b, tf.Key...)
|
||||||
|
if tf.IsNegative {
|
||||||
|
if tf.IsRegexp {
|
||||||
|
b = append(b, "!~"...)
|
||||||
|
} else {
|
||||||
|
b = append(b, "!="...)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if tf.IsRegexp {
|
||||||
|
b = append(b, "=~"...)
|
||||||
|
} else {
|
||||||
|
b = append(b, "="...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b = strconv.AppendQuote(b, string(tf.Value))
|
||||||
|
if i+1 < len(tfs) {
|
||||||
|
b = append(b, ',')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b = append(b, '}')
|
||||||
|
return string(b)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +1,19 @@
|
||||||
{
|
{
|
||||||
"files": {
|
"files": {
|
||||||
"main.css": "./static/css/main.674f8c98.chunk.css",
|
"main.css": "./static/css/main.83d9ae2d.chunk.css",
|
||||||
"main.js": "./static/js/main.f4cab8bc.chunk.js",
|
"main.js": "./static/js/main.6651c49c.chunk.js",
|
||||||
"runtime-main.js": "./static/js/runtime-main.f698388d.js",
|
"runtime-main.js": "./static/js/runtime-main.c4b656b8.js",
|
||||||
"static/css/2.77671664.chunk.css": "./static/css/2.77671664.chunk.css",
|
"static/css/2.77671664.chunk.css": "./static/css/2.77671664.chunk.css",
|
||||||
"static/js/2.bfcf9c30.chunk.js": "./static/js/2.bfcf9c30.chunk.js",
|
"static/js/2.ef1db8c8.chunk.js": "./static/js/2.ef1db8c8.chunk.js",
|
||||||
"static/js/3.e51afffb.chunk.js": "./static/js/3.e51afffb.chunk.js",
|
"static/js/3.65648506.chunk.js": "./static/js/3.65648506.chunk.js",
|
||||||
"index.html": "./index.html",
|
"index.html": "./index.html",
|
||||||
"static/js/2.bfcf9c30.chunk.js.LICENSE.txt": "./static/js/2.bfcf9c30.chunk.js.LICENSE.txt"
|
"static/js/2.ef1db8c8.chunk.js.LICENSE.txt": "./static/js/2.ef1db8c8.chunk.js.LICENSE.txt"
|
||||||
},
|
},
|
||||||
"entrypoints": [
|
"entrypoints": [
|
||||||
"static/js/runtime-main.f698388d.js",
|
"static/js/runtime-main.c4b656b8.js",
|
||||||
"static/css/2.77671664.chunk.css",
|
"static/css/2.77671664.chunk.css",
|
||||||
"static/js/2.bfcf9c30.chunk.js",
|
"static/js/2.ef1db8c8.chunk.js",
|
||||||
"static/css/main.674f8c98.chunk.css",
|
"static/css/main.83d9ae2d.chunk.css",
|
||||||
"static/js/main.f4cab8bc.chunk.js"
|
"static/js/main.6651c49c.chunk.js"
|
||||||
]
|
]
|
||||||
}
|
}
|
|
@ -1 +1 @@
|
||||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.77671664.chunk.css" rel="stylesheet"><link href="./static/css/main.674f8c98.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],f=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(l&&l(r);p.length;)p.shift()();return u.push.apply(u,f||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"e51afffb"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(f);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var f=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var f=0;f<a.length;f++)r(a[f]);var l=c;t()}([])</script><script src="./static/js/2.bfcf9c30.chunk.js"></script><script src="./static/js/main.f4cab8bc.chunk.js"></script></body></html>
|
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.77671664.chunk.css" rel="stylesheet"><link href="./static/css/main.83d9ae2d.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"65648506"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.ef1db8c8.chunk.js"></script><script src="./static/js/main.6651c49c.chunk.js"></script></body></html>
|
|
@ -1 +0,0 @@
|
||||||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:10px 0!important}.cm-activeLine{background-color:inherit!important}.cm-editor{border-radius:4px;border:1px solid #b9b9b9;font-size:10px}.one-line-scroll .cm-editor{height:24px}.cm-gutters{border-radius:4px 0 0 4px;height:100%}.multi-line-scroll .cm-content,.multi-line-scroll .cm-gutters{min-height:64px!important}.one-line-scroll .cm-content,.one-line-scroll .cm-gutters{min-height:auto}.u-tooltip{position:absolute;display:none;grid-gap:12px;max-width:300px;padding:8px;border-radius:4px;background:rgba(57,57,57,.9);color:#fff;font-size:10px;line-height:1.4em;font-weight:500;word-wrap:break-word;font-family:monospace;pointer-events:none;z-index:100}.u-tooltip-data{display:flex;flex-wrap:wrap;align-items:center;font-size:11px;line-height:150%}.u-tooltip-data__value{padding:4px;font-weight:700}.u-tooltip__info{display:grid;grid-gap:4px}.u-tooltip__marker{width:12px;height:12px;margin-right:4px}.legendWrapper{margin-top:20px}.legendItem{display:inline-grid;grid-template-columns:auto auto;grid-gap:4px;align-items:center;justify-content:start;padding:5px 10px;background-color:#fff;cursor:pointer;transition:.2s ease}.legendItemHide{text-decoration:line-through;opacity:.5}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{width:12px;height:12px;border-width:2px;border-style:solid;box-sizing:border-box;transition:.2s ease}.legendLabel{font-size:12px;font-weight:600}
|
|
1
app/vmselect/vmui/static/css/main.83d9ae2d.chunk.css
Normal file
1
app/vmselect/vmui/static/css/main.83d9ae2d.chunk.css
Normal file
|
@ -0,0 +1 @@
|
||||||
|
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:0!important}.cm-activeLine{background-color:inherit!important}.cm-editor{border:none;border-radius:4px;font-size:10px}.cm-gutters{border-radius:4px 0 0 4px;height:100%;overflow:hidden;border:none!important}.cm-activeLineGutter,.cm-gutters{background-color:#fff!important}.query-editor .cm-scroller{align-items:center!important}.query-editor .cm-editor.cm-focused{outline:none}.query-editor-container{position:relative;padding:12px;border:1px solid #b9b9b9;border-radius:4px}.query-editor-container_focus{border:1px solid #3f51b5}.query-editor-container_error{border-color:#ff4141}.query-editor-container-one-line .query-editor .cm-editor{height:22px}.query-editor-container-one-line{padding:6px}.query-editor-label{font-weight:400;font-size:12px;line-height:1;letter-spacing:normal;color:rgba(0,0,0,.6);padding:0 5px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;max-width:calc(133% - 24px);position:absolute;left:4px;top:-.71875em;z-index:1;background-color:#fff;-webkit-transform:scale(.75);transform:scale(.75)}.query-editor-container_error .query-editor-label{color:#ff4141}.u-tooltip{position:absolute;display:none;grid-gap:12px;max-width:300px;padding:8px;border-radius:4px;background:rgba(57,57,57,.9);color:#fff;font-size:10px;line-height:1.4em;font-weight:500;word-wrap:break-word;font-family:monospace;pointer-events:none;z-index:100}.u-tooltip-data{display:flex;flex-wrap:wrap;align-items:center;font-size:11px;line-height:150%}.u-tooltip-data__value{padding:4px;font-weight:700}.u-tooltip__info{display:grid;grid-gap:4px}.u-tooltip__marker{width:12px;height:12px;margin-right:4px}.legendWrapper{display:grid;grid-template-columns:repeat(auto-fit,minmax(400px,1fr));grid-gap:20px;margin-top:20px;cursor:default}.legendGroup{margin-bottom:24px}.legendGroupTitle{display:flex;align-items:center;padding:10px 0 5px;font-size:11px}.legendGroupLine{margin:0 10px}.legendItem{display:inline-grid;grid-template-columns:auto auto;grid-gap:6px;align-items:start;justify-content:start;padding:5px 10px;background-color:#fff;cursor:pointer;transition:.2s ease}.legendItemHide{text-decoration:line-through;opacity:.5}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{width:12px;height:12px;border-width:2px;border-style:solid;box-sizing:border-box;transition:.2s ease;margin:3px 0}.legendLabel{font-size:11px;font-weight:400}
|
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/2.ef1db8c8.chunk.js
Normal file
2
app/vmselect/vmui/static/js/2.ef1db8c8.chunk.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -1 +1 @@
|
||||||
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{351:function(e,t,n){"use strict";n.r(t),n.d(t,"getCLS",(function(){return y})),n.d(t,"getFCP",(function(){return g})),n.d(t,"getFID",(function(){return C})),n.d(t,"getLCP",(function(){return k})),n.d(t,"getTTFB",(function(){return D}));var i,r,a,o,u=function(e,t){return{name:e,value:void 0===t?-1:t,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},c=function(e,t){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var n=new PerformanceObserver((function(e){return e.getEntries().map(t)}));return n.observe({type:e,buffered:!0}),n}}catch(e){}},f=function(e,t){var n=function n(i){"pagehide"!==i.type&&"hidden"!==document.visibilityState||(e(i),t&&(removeEventListener("visibilitychange",n,!0),removeEventListener("pagehide",n,!0)))};addEventListener("visibilitychange",n,!0),addEventListener("pagehide",n,!0)},s=function(e){addEventListener("pageshow",(function(t){t.persisted&&e(t)}),!0)},m=function(e,t,n){var i;return function(r){t.value>=0&&(r||n)&&(t.delta=t.value-(i||0),(t.delta||void 0===i)&&(i=t.value,e(t)))}},v=-1,p=function(){return"hidden"===document.visibilityState?0:1/0},d=function(){f((function(e){var t=e.timeStamp;v=t}),!0)},l=function(){return v<0&&(v=p(),d(),s((function(){setTimeout((function(){v=p(),d()}),0)}))),{get firstHiddenTime(){return v}}},g=function(e,t){var n,i=l(),r=u("FCP"),a=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime<i.firstHiddenTime&&(r.value=e.startTime,r.entries.push(e),n(!0)))},o=window.performance&&performance.getEntriesByName&&performance.getEntriesByName("first-contentful-paint")[0],f=o?null:c("paint",a);(o||f)&&(n=m(e,r,t),o&&a(o),s((function(i){r=u("FCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,n(!0)}))}))})))},h=!1,T=-1,y=function(e,t){h||(g((function(e){T=e.value})),h=!0);var n,i=function(t){T>-1&&e(t)},r=u("CLS",0),a=0,o=[],v=function(e){if(!e.hadRecentInput){var t=o[0],i=o[o.length-1];a&&e.startTime-i.startTime<1e3&&e.startTime-t.startTime<5e3?(a+=e.value,o.push(e)):(a=e.value,o=[e]),a>r.value&&(r.value=a,r.entries=o,n())}},p=c("layout-shift",v);p&&(n=m(i,r,t),f((function(){p.takeRecords().map(v),n(!0)})),s((function(){a=0,T=-1,r=u("CLS",0),n=m(i,r,t)})))},E={passive:!0,capture:!0},w=new Date,L=function(e,t){i||(i=t,r=e,a=new Date,F(removeEventListener),S())},S=function(){if(r>=0&&r<a-w){var e={entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+r};o.forEach((function(t){t(e)})),o=[]}},b=function(e){if(e.cancelable){var t=(e.timeStamp>1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,t){var n=function(){L(e,t),r()},i=function(){r()},r=function(){removeEventListener("pointerup",n,E),removeEventListener("pointercancel",i,E)};addEventListener("pointerup",n,E),addEventListener("pointercancel",i,E)}(t,e):L(t,e)}},F=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(t){return e(t,b,E)}))},C=function(e,t){var n,a=l(),v=u("FID"),p=function(e){e.startTime<a.firstHiddenTime&&(v.value=e.processingStart-e.startTime,v.entries.push(e),n(!0))},d=c("first-input",p);n=m(e,v,t),d&&f((function(){d.takeRecords().map(p),d.disconnect()}),!0),d&&s((function(){var a;v=u("FID"),n=m(e,v,t),o=[],r=-1,i=null,F(addEventListener),a=p,o.push(a),S()}))},P={},k=function(e,t){var n,i=l(),r=u("LCP"),a=function(e){var t=e.startTime;t<i.firstHiddenTime&&(r.value=t,r.entries.push(e)),n()},o=c("largest-contentful-paint",a);if(o){n=m(e,r,t);var v=function(){P[r.id]||(o.takeRecords().map(a),o.disconnect(),P[r.id]=!0,n(!0))};["keydown","click"].forEach((function(e){addEventListener(e,v,{once:!0,capture:!0})})),f(v,!0),s((function(i){r=u("LCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,P[r.id]=!0,n(!0)}))}))}))}},D=function(e){var t,n=u("TTFB");t=function(){try{var t=performance.getEntriesByType("navigation")[0]||function(){var e=performance.timing,t={entryType:"navigation",startTime:0};for(var n in e)"navigationStart"!==n&&"toJSON"!==n&&(t[n]=Math.max(e[n]-e.navigationStart,0));return t}();if(n.value=n.delta=t.responseStart,n.value<0||n.value>performance.now())return;n.entries=[t],e(n)}catch(e){}},"complete"===document.readyState?setTimeout(t,0):addEventListener("pageshow",t)}}}]);
|
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{356:function(e,t,n){"use strict";n.r(t),n.d(t,"getCLS",(function(){return y})),n.d(t,"getFCP",(function(){return g})),n.d(t,"getFID",(function(){return C})),n.d(t,"getLCP",(function(){return k})),n.d(t,"getTTFB",(function(){return D}));var i,r,a,o,u=function(e,t){return{name:e,value:void 0===t?-1:t,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},c=function(e,t){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var n=new PerformanceObserver((function(e){return e.getEntries().map(t)}));return n.observe({type:e,buffered:!0}),n}}catch(e){}},f=function(e,t){var n=function n(i){"pagehide"!==i.type&&"hidden"!==document.visibilityState||(e(i),t&&(removeEventListener("visibilitychange",n,!0),removeEventListener("pagehide",n,!0)))};addEventListener("visibilitychange",n,!0),addEventListener("pagehide",n,!0)},s=function(e){addEventListener("pageshow",(function(t){t.persisted&&e(t)}),!0)},m=function(e,t,n){var i;return function(r){t.value>=0&&(r||n)&&(t.delta=t.value-(i||0),(t.delta||void 0===i)&&(i=t.value,e(t)))}},v=-1,p=function(){return"hidden"===document.visibilityState?0:1/0},d=function(){f((function(e){var t=e.timeStamp;v=t}),!0)},l=function(){return v<0&&(v=p(),d(),s((function(){setTimeout((function(){v=p(),d()}),0)}))),{get firstHiddenTime(){return v}}},g=function(e,t){var n,i=l(),r=u("FCP"),a=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime<i.firstHiddenTime&&(r.value=e.startTime,r.entries.push(e),n(!0)))},o=window.performance&&performance.getEntriesByName&&performance.getEntriesByName("first-contentful-paint")[0],f=o?null:c("paint",a);(o||f)&&(n=m(e,r,t),o&&a(o),s((function(i){r=u("FCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,n(!0)}))}))})))},h=!1,T=-1,y=function(e,t){h||(g((function(e){T=e.value})),h=!0);var n,i=function(t){T>-1&&e(t)},r=u("CLS",0),a=0,o=[],v=function(e){if(!e.hadRecentInput){var t=o[0],i=o[o.length-1];a&&e.startTime-i.startTime<1e3&&e.startTime-t.startTime<5e3?(a+=e.value,o.push(e)):(a=e.value,o=[e]),a>r.value&&(r.value=a,r.entries=o,n())}},p=c("layout-shift",v);p&&(n=m(i,r,t),f((function(){p.takeRecords().map(v),n(!0)})),s((function(){a=0,T=-1,r=u("CLS",0),n=m(i,r,t)})))},E={passive:!0,capture:!0},w=new Date,L=function(e,t){i||(i=t,r=e,a=new Date,F(removeEventListener),S())},S=function(){if(r>=0&&r<a-w){var e={entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+r};o.forEach((function(t){t(e)})),o=[]}},b=function(e){if(e.cancelable){var t=(e.timeStamp>1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,t){var n=function(){L(e,t),r()},i=function(){r()},r=function(){removeEventListener("pointerup",n,E),removeEventListener("pointercancel",i,E)};addEventListener("pointerup",n,E),addEventListener("pointercancel",i,E)}(t,e):L(t,e)}},F=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(t){return e(t,b,E)}))},C=function(e,t){var n,a=l(),v=u("FID"),p=function(e){e.startTime<a.firstHiddenTime&&(v.value=e.processingStart-e.startTime,v.entries.push(e),n(!0))},d=c("first-input",p);n=m(e,v,t),d&&f((function(){d.takeRecords().map(p),d.disconnect()}),!0),d&&s((function(){var a;v=u("FID"),n=m(e,v,t),o=[],r=-1,i=null,F(addEventListener),a=p,o.push(a),S()}))},P={},k=function(e,t){var n,i=l(),r=u("LCP"),a=function(e){var t=e.startTime;t<i.firstHiddenTime&&(r.value=t,r.entries.push(e)),n()},o=c("largest-contentful-paint",a);if(o){n=m(e,r,t);var v=function(){P[r.id]||(o.takeRecords().map(a),o.disconnect(),P[r.id]=!0,n(!0))};["keydown","click"].forEach((function(e){addEventListener(e,v,{once:!0,capture:!0})})),f(v,!0),s((function(i){r=u("LCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,P[r.id]=!0,n(!0)}))}))}))}},D=function(e){var t,n=u("TTFB");t=function(){try{var t=performance.getEntriesByType("navigation")[0]||function(){var e=performance.timing,t={entryType:"navigation",startTime:0};for(var n in e)"navigationStart"!==n&&"toJSON"!==n&&(t[n]=Math.max(e[n]-e.navigationStart,0));return t}();if(n.value=n.delta=t.responseStart,n.value<0||n.value>performance.now())return;n.entries=[t],e(n)}catch(e){}},"complete"===document.readyState?setTimeout(t,0):addEventListener("pageshow",t)}}}]);
|
1
app/vmselect/vmui/static/js/main.6651c49c.chunk.js
Normal file
1
app/vmselect/vmui/static/js/main.6651c49c.chunk.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1 +1 @@
|
||||||
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],f=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(l&&l(r);p.length;)p.shift()();return u.push.apply(u,f||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"e51afffb"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(f);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var f=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var f=0;f<a.length;f++)r(a[f]);var l=c;t()}([]);
|
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"65648506"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);
|
|
@ -2,19 +2,6 @@
|
||||||
|
|
||||||
Web UI for VictoriaMetrics
|
Web UI for VictoriaMetrics
|
||||||
|
|
||||||
Features:
|
|
||||||
|
|
||||||
- configurable Server URL
|
|
||||||
- configurable time range - every variant have own resolution to show around 30 data points
|
|
||||||
- query editor has basic highlighting and can be multi-line
|
|
||||||
- chart is responsive by width
|
|
||||||
- color assignment for series is automatic
|
|
||||||
- legend with reduced naming
|
|
||||||
- tooltips for closest data point
|
|
||||||
- auto-refresh mode with several time interval presets
|
|
||||||
- table and raw JSON Query viewer
|
|
||||||
|
|
||||||
|
|
||||||
## Docker image build
|
## Docker image build
|
||||||
|
|
||||||
Run the following command from the root of VictoriaMetrics repository in order to build `victoriametrics/vmui` Docker image:
|
Run the following command from the root of VictoriaMetrics repository in order to build `victoriametrics/vmui` Docker image:
|
||||||
|
@ -65,4 +52,4 @@ Then run the built binary with the following command:
|
||||||
bin/victoria-metrics -selfScrapeInterval=5s
|
bin/victoria-metrics -selfScrapeInterval=5s
|
||||||
```
|
```
|
||||||
|
|
||||||
Then navigate to `http://localhost:8428/vmui/`
|
Then navigate to `http://localhost:8428/vmui/`. See [these docs](https://docs.victoriametrics.com/#vmui) for more details.
|
||||||
|
|
1053
app/vmui/packages/vmui/package-lock.json
generated
1053
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -6,25 +6,25 @@
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@codemirror/autocomplete": "^0.19.9",
|
"@codemirror/autocomplete": "^0.19.9",
|
||||||
"@codemirror/basic-setup": "^0.19.0",
|
"@codemirror/basic-setup": "^0.19.0",
|
||||||
"@codemirror/commands": "^0.19.5",
|
"@codemirror/commands": "^0.19.6",
|
||||||
"@codemirror/highlight": "^0.19.6",
|
"@codemirror/highlight": "^0.19.6",
|
||||||
"@codemirror/state": "^0.19.6",
|
"@codemirror/state": "^0.19.6",
|
||||||
"@codemirror/view": "^0.19.21",
|
"@codemirror/view": "^0.19.29",
|
||||||
"@date-io/dayjs": "^2.11.0",
|
"@date-io/dayjs": "^2.11.0",
|
||||||
"@emotion/react": "^11.7.0",
|
"@emotion/react": "^11.7.1",
|
||||||
"@emotion/styled": "^11.6.0",
|
"@emotion/styled": "^11.6.0",
|
||||||
"@mui/icons-material": "^5.2.0",
|
"@mui/icons-material": "^5.2.1",
|
||||||
"@mui/lab": "^5.0.0-alpha.58",
|
"@mui/lab": "^5.0.0-alpha.59",
|
||||||
"@mui/material": "^5.2.2",
|
"@mui/material": "^5.2.3",
|
||||||
"@mui/styles": "^5.2.2",
|
"@mui/styles": "^5.2.3",
|
||||||
"@testing-library/jest-dom": "^5.15.1",
|
"@testing-library/jest-dom": "^5.16.1",
|
||||||
"@testing-library/react": "^12.1.2",
|
"@testing-library/react": "^12.1.2",
|
||||||
"@testing-library/user-event": "^13.5.0",
|
"@testing-library/user-event": "^13.5.0",
|
||||||
"@types/jest": "^27.0.3",
|
"@types/jest": "^27.0.3",
|
||||||
"@types/lodash.debounce": "^4.0.6",
|
"@types/lodash.debounce": "^4.0.6",
|
||||||
"@types/lodash.get": "^4.4.6",
|
"@types/lodash.get": "^4.4.6",
|
||||||
"@types/lodash.throttle": "^4.1.6",
|
"@types/lodash.throttle": "^4.1.6",
|
||||||
"@types/node": "^16.11.10",
|
"@types/node": "^16.11.12",
|
||||||
"@types/numeral": "^2.0.2",
|
"@types/numeral": "^2.0.2",
|
||||||
"@types/qs": "^6.9.7",
|
"@types/qs": "^6.9.7",
|
||||||
"@types/react": "^17.0.37",
|
"@types/react": "^17.0.37",
|
||||||
|
@ -36,12 +36,13 @@
|
||||||
"lodash.get": "^4.4.2",
|
"lodash.get": "^4.4.2",
|
||||||
"lodash.throttle": "^4.1.1",
|
"lodash.throttle": "^4.1.1",
|
||||||
"numeral": "^2.0.6",
|
"numeral": "^2.0.6",
|
||||||
"qs": "^6.10.1",
|
"qs": "^6.10.2",
|
||||||
"react": "^17.0.2",
|
"react": "^17.0.2",
|
||||||
"react-dom": "^17.0.2",
|
"react-dom": "^17.0.2",
|
||||||
|
"react-draggable": "^4.4.4",
|
||||||
"react-measure": "^2.5.2",
|
"react-measure": "^2.5.2",
|
||||||
"react-scripts": "4.0.3",
|
"react-scripts": "4.0.3",
|
||||||
"typescript": "~4.5.2",
|
"typescript": "~4.5.3",
|
||||||
"uplot": "^1.6.17",
|
"uplot": "^1.6.17",
|
||||||
"web-vitals": "^2.1.2"
|
"web-vitals": "^2.1.2"
|
||||||
},
|
},
|
||||||
|
@ -73,8 +74,8 @@
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.0",
|
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.0",
|
||||||
"@typescript-eslint/eslint-plugin": "^5.4.0",
|
"@typescript-eslint/eslint-plugin": "^5.6.0",
|
||||||
"@typescript-eslint/parser": "^5.4.0",
|
"@typescript-eslint/parser": "^5.6.0",
|
||||||
"customize-cra": "^1.0.0",
|
"customize-cra": "^1.0.0",
|
||||||
"eslint-plugin-react": "^7.27.1",
|
"eslint-plugin-react": "^7.27.1",
|
||||||
"react-app-rewired": "^2.1.8"
|
"react-app-rewired": "^2.1.8"
|
||||||
|
|
|
@ -4,44 +4,15 @@ import HomeLayout from "./components/Home/HomeLayout";
|
||||||
import {StateProvider} from "./state/common/StateContext";
|
import {StateProvider} from "./state/common/StateContext";
|
||||||
import {AuthStateProvider} from "./state/auth/AuthStateContext";
|
import {AuthStateProvider} from "./state/auth/AuthStateContext";
|
||||||
import {GraphStateProvider} from "./state/graph/GraphStateContext";
|
import {GraphStateProvider} from "./state/graph/GraphStateContext";
|
||||||
import { ThemeProvider, Theme, StyledEngineProvider, createTheme } from "@mui/material/styles";
|
import { ThemeProvider, StyledEngineProvider } from "@mui/material/styles";
|
||||||
|
import THEME from "./theme/theme";
|
||||||
import CssBaseline from "@mui/material/CssBaseline";
|
import CssBaseline from "@mui/material/CssBaseline";
|
||||||
|
|
||||||
import LocalizationProvider from "@mui/lab/LocalizationProvider";
|
import LocalizationProvider from "@mui/lab/LocalizationProvider";
|
||||||
// pick a date util library
|
|
||||||
import DayjsUtils from "@date-io/dayjs";
|
import DayjsUtils from "@date-io/dayjs";
|
||||||
|
|
||||||
|
|
||||||
declare module "@mui/styles/defaultTheme" {
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-empty-interface
|
|
||||||
interface DefaultTheme extends Theme {}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
const App: FC = () => {
|
const App: FC = () => {
|
||||||
|
|
||||||
const THEME = createTheme({
|
|
||||||
palette: {
|
|
||||||
primary: {
|
|
||||||
main: "#3F51B5"
|
|
||||||
},
|
|
||||||
secondary: {
|
|
||||||
main: "#F50057"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
components: {
|
|
||||||
MuiSwitch: {
|
|
||||||
defaultProps: {
|
|
||||||
color: "secondary"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
typography: {
|
|
||||||
"fontSize": 10
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return <>
|
return <>
|
||||||
<CssBaseline /> {/* CSS Baseline: kind of normalize.css made by materialUI team - can be scoped */}
|
<CssBaseline /> {/* CSS Baseline: kind of normalize.css made by materialUI team - can be scoped */}
|
||||||
<LocalizationProvider dateAdapter={DayjsUtils}> {/* Allows datepicker to work with DayJS */}
|
<LocalizationProvider dateAdapter={DayjsUtils}> {/* Allows datepicker to work with DayJS */}
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
export interface MetricBase {
|
export interface MetricBase {
|
||||||
|
group: number;
|
||||||
metric: {
|
metric: {
|
||||||
[key: string]: string;
|
[key: string]: string;
|
||||||
};
|
};
|
||||||
|
|
|
@ -26,8 +26,8 @@ import TabPanel from "./AuthTabPanel";
|
||||||
import PersonIcon from "@mui/icons-material/Person";
|
import PersonIcon from "@mui/icons-material/Person";
|
||||||
import LockIcon from "@mui/icons-material/Lock";
|
import LockIcon from "@mui/icons-material/Lock";
|
||||||
import makeStyles from "@mui/styles/makeStyles";
|
import makeStyles from "@mui/styles/makeStyles";
|
||||||
import {useAuthDispatch, useAuthState} from "../../../state/auth/AuthStateContext";
|
import {useAuthDispatch, useAuthState} from "../../../../state/auth/AuthStateContext";
|
||||||
import {AUTH_METHOD, WithCheckbox} from "../../../state/auth/reducer";
|
import {AUTH_METHOD, WithCheckbox} from "../../../../state/auth/reducer";
|
||||||
|
|
||||||
// TODO: make generic when creating second dialog
|
// TODO: make generic when creating second dialog
|
||||||
export interface DialogProps {
|
export interface DialogProps {
|
|
@ -0,0 +1,42 @@
|
||||||
|
import React, {FC, useCallback, useMemo} from "react";
|
||||||
|
import {Box, FormControlLabel, TextField} from "@mui/material";
|
||||||
|
import {useGraphDispatch, useGraphState} from "../../../../state/graph/GraphStateContext";
|
||||||
|
import debounce from "lodash.debounce";
|
||||||
|
import BasicSwitch from "../../../../theme/switch";
|
||||||
|
|
||||||
|
const AxesLimitsConfigurator: FC = () => {
|
||||||
|
|
||||||
|
const { yaxis } = useGraphState();
|
||||||
|
const graphDispatch = useGraphDispatch();
|
||||||
|
const axes = useMemo(() => Object.keys(yaxis.limits.range), [yaxis.limits.range]);
|
||||||
|
|
||||||
|
const onChangeYaxisLimits = () => { graphDispatch({type: "TOGGLE_ENABLE_YAXIS_LIMITS"}); };
|
||||||
|
|
||||||
|
const onChangeLimit = (e: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement>, axis: string, index: number) => {
|
||||||
|
const newLimits = yaxis.limits.range;
|
||||||
|
newLimits[axis][index] = +e.target.value;
|
||||||
|
graphDispatch({type: "SET_YAXIS_LIMITS", payload: newLimits});
|
||||||
|
};
|
||||||
|
const debouncedOnChangeLimit = useCallback(debounce(onChangeLimit, 500), [yaxis.limits.range]);
|
||||||
|
|
||||||
|
return <Box display="grid" alignItems="center" gap={2}>
|
||||||
|
<FormControlLabel
|
||||||
|
control={<BasicSwitch checked={yaxis.limits.enable} onChange={onChangeYaxisLimits}/>}
|
||||||
|
label="Fix the limits for y-axis"
|
||||||
|
/>
|
||||||
|
<Box display="grid" alignItems="center" gap={2}>
|
||||||
|
{axes.map(axis => <Box display="grid" gridTemplateColumns="120px 120px" gap={1} key={axis}>
|
||||||
|
<TextField label={`Min ${axis}`} type="number" size="small" variant="outlined"
|
||||||
|
disabled={!yaxis.limits.enable}
|
||||||
|
defaultValue={yaxis.limits.range[axis][0]}
|
||||||
|
onChange={(e) => debouncedOnChangeLimit(e, axis, 0)}/>
|
||||||
|
<TextField label={`Max ${axis}`} type="number" size="small" variant="outlined"
|
||||||
|
disabled={!yaxis.limits.enable}
|
||||||
|
defaultValue={yaxis.limits.range[axis][1]}
|
||||||
|
onChange={(e) => debouncedOnChangeLimit(e, axis, 1)} />
|
||||||
|
</Box>)}
|
||||||
|
</Box>
|
||||||
|
</Box>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default AxesLimitsConfigurator;
|
|
@ -0,0 +1,64 @@
|
||||||
|
import SettingsIcon from "@mui/icons-material/Settings";
|
||||||
|
import React, {FC, useState, useRef} from "react";
|
||||||
|
import AxesLimitsConfigurator from "./AxesLimitsConfigurator";
|
||||||
|
import {Box, Button, IconButton, Paper, Typography} from "@mui/material";
|
||||||
|
import Draggable from "react-draggable";
|
||||||
|
import makeStyles from "@mui/styles/makeStyles";
|
||||||
|
import CloseIcon from "@mui/icons-material/Close";
|
||||||
|
|
||||||
|
const useStyles = makeStyles({
|
||||||
|
popover: {
|
||||||
|
position: "absolute",
|
||||||
|
display: "grid",
|
||||||
|
gridGap: "16px",
|
||||||
|
padding: "0 0 25px",
|
||||||
|
zIndex: 2,
|
||||||
|
},
|
||||||
|
popoverHeader: {
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
background: "#3F51B5",
|
||||||
|
padding: "6px 6px 6px 12px",
|
||||||
|
borderRadius: "4px 4px 0 0",
|
||||||
|
color: "#FFF",
|
||||||
|
cursor: "move",
|
||||||
|
},
|
||||||
|
popoverBody: {
|
||||||
|
display: "grid",
|
||||||
|
gridGap: "6px",
|
||||||
|
padding: "0 14px",
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const GraphSettings: FC = () => {
|
||||||
|
const [open, setOpen] = useState(false);
|
||||||
|
const draggableRef = useRef<HTMLDivElement>(null);
|
||||||
|
const position = { x: 173, y: 0 };
|
||||||
|
|
||||||
|
const classes = useStyles();
|
||||||
|
|
||||||
|
return <Box display="flex" px={2}>
|
||||||
|
<Button onClick={() => setOpen((old) => !old)} variant="outlined">
|
||||||
|
<SettingsIcon sx={{fontSize: 16, marginRight: "4px"}}/>
|
||||||
|
<span style={{lineHeight: 1, paddingTop: "1px"}}>{open ? "Hide" : "Show"} graph settings</span>
|
||||||
|
</Button>
|
||||||
|
{open && (
|
||||||
|
<Draggable nodeRef={draggableRef} defaultPosition={position} handle="#handle">
|
||||||
|
<Paper elevation={3} className={classes.popover} ref={draggableRef}>
|
||||||
|
<div id="handle" className={classes.popoverHeader}>
|
||||||
|
<Typography variant="body1"><b>Graph Settings</b></Typography>
|
||||||
|
<IconButton size="small" onClick={() => setOpen(false)}>
|
||||||
|
<CloseIcon style={{color: "white"}}/>
|
||||||
|
</IconButton>
|
||||||
|
</div>
|
||||||
|
<Box className={classes.popoverBody}>
|
||||||
|
<AxesLimitsConfigurator/>
|
||||||
|
</Box>
|
||||||
|
</Paper>
|
||||||
|
</Draggable>
|
||||||
|
)}
|
||||||
|
</Box>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default GraphSettings;
|
|
@ -0,0 +1,40 @@
|
||||||
|
import React, {FC} from "react";
|
||||||
|
import {Box, FormControlLabel} from "@mui/material";
|
||||||
|
import {saveToStorage} from "../../../../utils/storage";
|
||||||
|
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||||
|
import BasicSwitch from "../../../../theme/switch";
|
||||||
|
import StepConfigurator from "./StepConfigurator";
|
||||||
|
|
||||||
|
const AdditionalSettings: FC = () => {
|
||||||
|
|
||||||
|
const {queryControls: {autocomplete, nocache}} = useAppState();
|
||||||
|
const dispatch = useAppDispatch();
|
||||||
|
|
||||||
|
const onChangeAutocomplete = () => {
|
||||||
|
dispatch({type: "TOGGLE_AUTOCOMPLETE"});
|
||||||
|
saveToStorage("AUTOCOMPLETE", !autocomplete);
|
||||||
|
};
|
||||||
|
|
||||||
|
const onChangeCache = () => {
|
||||||
|
dispatch({type: "NO_CACHE"});
|
||||||
|
saveToStorage("NO_CACHE", !nocache);
|
||||||
|
};
|
||||||
|
|
||||||
|
return <Box display="flex" alignItems="center">
|
||||||
|
<Box>
|
||||||
|
<FormControlLabel label="Enable autocomplete"
|
||||||
|
control={<BasicSwitch checked={autocomplete} onChange={onChangeAutocomplete}/>}
|
||||||
|
/>
|
||||||
|
</Box>
|
||||||
|
<Box ml={2}>
|
||||||
|
<FormControlLabel label="Enable cache"
|
||||||
|
control={<BasicSwitch checked={!nocache} onChange={onChangeCache}/>}
|
||||||
|
/>
|
||||||
|
</Box>
|
||||||
|
<Box ml={2}>
|
||||||
|
<StepConfigurator/>
|
||||||
|
</Box>
|
||||||
|
</Box>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default AdditionalSettings;
|
|
@ -0,0 +1,137 @@
|
||||||
|
import React, {FC, useEffect, useRef, useState} from "react";
|
||||||
|
import {
|
||||||
|
Accordion, AccordionDetails, AccordionSummary, Box, Grid, IconButton, Typography, Tooltip, Button
|
||||||
|
} from "@mui/material";
|
||||||
|
import QueryEditor from "./QueryEditor";
|
||||||
|
import {TimeSelector} from "../Time/TimeSelector";
|
||||||
|
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||||
|
import ExpandMoreIcon from "@mui/icons-material/ExpandMore";
|
||||||
|
import HighlightOffIcon from "@mui/icons-material/HighlightOff";
|
||||||
|
import AddIcon from "@mui/icons-material/Add";
|
||||||
|
import PlayCircleOutlineIcon from "@mui/icons-material/PlayCircleOutline";
|
||||||
|
import Portal from "@mui/material/Portal";
|
||||||
|
import ServerConfigurator from "./ServerConfigurator";
|
||||||
|
import AdditionalSettings from "./AdditionalSettings";
|
||||||
|
import {ErrorTypes} from "../../../../types";
|
||||||
|
|
||||||
|
export interface QueryConfiguratorProps {
|
||||||
|
error?: ErrorTypes | string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const QueryConfigurator: FC<QueryConfiguratorProps> = ({error}) => {
|
||||||
|
|
||||||
|
const {serverUrl, query, queryHistory, time: {duration}, queryControls: {autocomplete}} = useAppState();
|
||||||
|
const dispatch = useAppDispatch();
|
||||||
|
const [expanded, setExpanded] = useState(true);
|
||||||
|
const queryContainer = useRef<HTMLDivElement>(null);
|
||||||
|
const queryRef = useRef(query);
|
||||||
|
useEffect(() => {
|
||||||
|
queryRef.current = query;
|
||||||
|
}, [query]);
|
||||||
|
|
||||||
|
const onSetDuration = (dur: string) => dispatch({type: "SET_DURATION", payload: dur});
|
||||||
|
|
||||||
|
const updateHistory = () => {
|
||||||
|
dispatch({
|
||||||
|
type: "SET_QUERY_HISTORY", payload: query.map((q, i) => {
|
||||||
|
const h = queryHistory[i] || {values: []};
|
||||||
|
const queryEqual = q === h.values[h.values.length - 1];
|
||||||
|
return {
|
||||||
|
index: h.values.length - Number(queryEqual),
|
||||||
|
values: !queryEqual && q ? [...h.values, q] : h.values
|
||||||
|
};
|
||||||
|
})
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
const onRunQuery = () => {
|
||||||
|
updateHistory();
|
||||||
|
dispatch({type: "SET_QUERY", payload: query});
|
||||||
|
dispatch({type: "RUN_QUERY"});
|
||||||
|
};
|
||||||
|
|
||||||
|
const onAddQuery = () => dispatch({type: "SET_QUERY", payload: [...queryRef.current, ""]});
|
||||||
|
|
||||||
|
const onRemoveQuery = (index: number) => {
|
||||||
|
const newQuery = [...queryRef.current];
|
||||||
|
newQuery.splice(index, 1);
|
||||||
|
dispatch({type: "SET_QUERY", payload: newQuery});
|
||||||
|
};
|
||||||
|
|
||||||
|
const onSetQuery = (value: string, index: number) => {
|
||||||
|
const newQuery = [...queryRef.current];
|
||||||
|
newQuery[index] = value;
|
||||||
|
dispatch({type: "SET_QUERY", payload: newQuery});
|
||||||
|
};
|
||||||
|
|
||||||
|
const setHistoryIndex = (step: number, indexQuery: number) => {
|
||||||
|
const {index, values} = queryHistory[indexQuery];
|
||||||
|
const newIndexHistory = index + step;
|
||||||
|
if (newIndexHistory < 0 || newIndexHistory >= values.length) return;
|
||||||
|
onSetQuery(values[newIndexHistory] || "", indexQuery);
|
||||||
|
dispatch({
|
||||||
|
type: "SET_QUERY_HISTORY_BY_INDEX",
|
||||||
|
payload: {value: {values, index: newIndexHistory}, queryNumber: indexQuery}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
return <>
|
||||||
|
<Accordion expanded={expanded} onChange={() => setExpanded(prev => !prev)}>
|
||||||
|
<AccordionSummary
|
||||||
|
expandIcon={<IconButton><ExpandMoreIcon/></IconButton>}
|
||||||
|
aria-controls="panel1a-content"
|
||||||
|
id="panel1a-header"
|
||||||
|
sx={{alignItems: "flex-start", padding: "15px"}}
|
||||||
|
>
|
||||||
|
<Box mr={2}>
|
||||||
|
<Typography variant="h6" component="h2">Query Configuration</Typography>
|
||||||
|
</Box>
|
||||||
|
<Box flexGrow={1} onClick={e => e.stopPropagation()} onFocusCapture={e => e.stopPropagation()}>
|
||||||
|
<Portal disablePortal={!expanded} container={queryContainer.current}>
|
||||||
|
{query.map((q, i) =>
|
||||||
|
<Box key={i} display="grid" gridTemplateColumns="1fr auto" gap="4px" width="100%"
|
||||||
|
mb={i === query.length - 1 ? 0 : 2}>
|
||||||
|
<QueryEditor server={serverUrl} query={query[i]} index={i} oneLiner={!expanded}
|
||||||
|
autocomplete={autocomplete} queryHistory={queryHistory[i]} error={error}
|
||||||
|
setHistoryIndex={setHistoryIndex} runQuery={onRunQuery}
|
||||||
|
setQuery={onSetQuery}/>
|
||||||
|
{i === 0 && <Tooltip title="Execute Query">
|
||||||
|
<IconButton onClick={onRunQuery}>
|
||||||
|
<PlayCircleOutlineIcon/>
|
||||||
|
</IconButton>
|
||||||
|
</Tooltip>}
|
||||||
|
{i > 0 && <Tooltip title="Remove Query">
|
||||||
|
<IconButton onClick={() => onRemoveQuery(i)}>
|
||||||
|
<HighlightOffIcon/>
|
||||||
|
</IconButton>
|
||||||
|
</Tooltip>}
|
||||||
|
</Box>)}
|
||||||
|
</Portal>
|
||||||
|
</Box>
|
||||||
|
</AccordionSummary>
|
||||||
|
<AccordionDetails>
|
||||||
|
<Grid container columnSpacing={2}>
|
||||||
|
<Grid item xs={6} minWidth={400}>
|
||||||
|
<ServerConfigurator error={error}/>
|
||||||
|
{/* for portal QueryEditor */}
|
||||||
|
<div ref={queryContainer}/>
|
||||||
|
{query.length < 2 && <Box display="inline-block" minHeight="40px" mt={2}>
|
||||||
|
<Button onClick={onAddQuery} variant="outlined">
|
||||||
|
<AddIcon sx={{fontSize: 16, marginRight: "4px"}}/>
|
||||||
|
<span style={{lineHeight: 1, paddingTop: "1px"}}>Query</span>
|
||||||
|
</Button>
|
||||||
|
</Box>}
|
||||||
|
</Grid>
|
||||||
|
<Grid item xs>
|
||||||
|
<TimeSelector setDuration={onSetDuration} duration={duration}/>
|
||||||
|
</Grid>
|
||||||
|
<Grid item xs={12} pt={1}>
|
||||||
|
<AdditionalSettings/>
|
||||||
|
</Grid>
|
||||||
|
</Grid>
|
||||||
|
</AccordionDetails>
|
||||||
|
</Accordion>
|
||||||
|
</>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default QueryConfigurator;
|
|
@ -2,28 +2,41 @@ import {EditorState} from "@codemirror/state";
|
||||||
import {EditorView, keymap} from "@codemirror/view";
|
import {EditorView, keymap} from "@codemirror/view";
|
||||||
import {defaultKeymap} from "@codemirror/commands";
|
import {defaultKeymap} from "@codemirror/commands";
|
||||||
import React, {FC, useEffect, useRef, useState} from "react";
|
import React, {FC, useEffect, useRef, useState} from "react";
|
||||||
import { PromQLExtension } from "codemirror-promql";
|
import {PromQLExtension} from "codemirror-promql";
|
||||||
import { basicSetup } from "@codemirror/basic-setup";
|
import {basicSetup} from "@codemirror/basic-setup";
|
||||||
import {QueryHistory} from "../../../state/common/reducer";
|
import {QueryHistory} from "../../../../state/common/reducer";
|
||||||
|
import {ErrorTypes} from "../../../../types";
|
||||||
|
|
||||||
export interface QueryEditorProps {
|
export interface QueryEditorProps {
|
||||||
setHistoryIndex: (step: number) => void;
|
setHistoryIndex: (step: number, index: number) => void;
|
||||||
setQuery: (query: string) => void;
|
setQuery: (query: string, index: number) => void;
|
||||||
runQuery: () => void;
|
runQuery: () => void;
|
||||||
query: string;
|
query: string;
|
||||||
|
index: number;
|
||||||
queryHistory: QueryHistory;
|
queryHistory: QueryHistory;
|
||||||
server: string;
|
server: string;
|
||||||
oneLiner?: boolean;
|
oneLiner?: boolean;
|
||||||
autocomplete: boolean
|
autocomplete: boolean;
|
||||||
|
error?: ErrorTypes | string;
|
||||||
}
|
}
|
||||||
|
|
||||||
const QueryEditor: FC<QueryEditorProps> = ({
|
const QueryEditor: FC<QueryEditorProps> = ({
|
||||||
query, queryHistory, setHistoryIndex, setQuery, runQuery, server, oneLiner = false, autocomplete
|
index,
|
||||||
|
query,
|
||||||
|
queryHistory,
|
||||||
|
setHistoryIndex,
|
||||||
|
setQuery,
|
||||||
|
runQuery,
|
||||||
|
server,
|
||||||
|
oneLiner = false,
|
||||||
|
autocomplete,
|
||||||
|
error
|
||||||
}) => {
|
}) => {
|
||||||
|
|
||||||
const ref = useRef<HTMLDivElement>(null);
|
const ref = useRef<HTMLDivElement>(null);
|
||||||
|
|
||||||
const [editorView, setEditorView] = useState<EditorView>();
|
const [editorView, setEditorView] = useState<EditorView>();
|
||||||
|
const [focusEditor, setFocusEditor] = useState(false);
|
||||||
|
|
||||||
// init editor view on load
|
// init editor view on load
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
@ -41,11 +54,14 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const promQL = new PromQLExtension();
|
const promQL = new PromQLExtension();
|
||||||
promQL.activateCompletion(autocomplete);
|
promQL.activateCompletion(autocomplete);
|
||||||
promQL.setComplete({ remote: { url: server } });
|
promQL.setComplete({remote: {url: server}});
|
||||||
|
|
||||||
const listenerExtension = EditorView.updateListener.of(editorUpdate => {
|
const listenerExtension = EditorView.updateListener.of(editorUpdate => {
|
||||||
|
if (editorUpdate.focusChanged) {
|
||||||
|
setFocusEditor(editorView?.hasFocus || false);
|
||||||
|
}
|
||||||
if (editorUpdate.docChanged) {
|
if (editorUpdate.docChanged) {
|
||||||
setQuery(editorUpdate.state.doc.toJSON().map(el => el.trim()).join(""));
|
setQuery(editorUpdate.state.doc.toJSON().map(el => el.trim()).join(""), index);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -66,18 +82,20 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||||
if (key === "Enter" && ctrlMetaKey) {
|
if (key === "Enter" && ctrlMetaKey) {
|
||||||
runQuery();
|
runQuery();
|
||||||
} else if (key === "ArrowUp" && ctrlMetaKey) {
|
} else if (key === "ArrowUp" && ctrlMetaKey) {
|
||||||
setHistoryIndex(-1);
|
setHistoryIndex(-1, index);
|
||||||
} else if (key === "ArrowDown" && ctrlMetaKey) {
|
} else if (key === "ArrowDown" && ctrlMetaKey) {
|
||||||
setHistoryIndex(1);
|
setHistoryIndex(1, index);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
return (
|
return <div className={`query-editor-container
|
||||||
<>
|
${focusEditor ? "query-editor-container_focus" : ""}
|
||||||
|
query-editor-container-${oneLiner ? "one-line" : "multi-line"}
|
||||||
|
${error === ErrorTypes.validQuery ? "query-editor-container_error" : ""}`}>
|
||||||
{/*Class one-line-scroll and other codemirror styles are declared in index.css*/}
|
{/*Class one-line-scroll and other codemirror styles are declared in index.css*/}
|
||||||
<div ref={ref} className={oneLiner ? "one-line-scroll" : "multi-line-scroll"} onKeyUp={onKeyUp}/>
|
<label className="query-editor-label">Query</label>
|
||||||
</>
|
<div className="query-editor" ref={ref} onKeyUp={onKeyUp}/>
|
||||||
);
|
</div>;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default QueryEditor;
|
export default QueryEditor;
|
|
@ -0,0 +1,40 @@
|
||||||
|
import React, {FC, useState} from "react";
|
||||||
|
import {Box, TextField, Tooltip, IconButton} from "@mui/material";
|
||||||
|
import SecurityIcon from "@mui/icons-material/Security";
|
||||||
|
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||||
|
import {AuthDialog} from "../Auth/AuthDialog";
|
||||||
|
import {ErrorTypes} from "../../../../types";
|
||||||
|
|
||||||
|
export interface ServerConfiguratorProps {
|
||||||
|
error?: ErrorTypes | string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ServerConfigurator: FC<ServerConfiguratorProps> = ({error}) => {
|
||||||
|
|
||||||
|
const {serverUrl} = useAppState();
|
||||||
|
const dispatch = useAppDispatch();
|
||||||
|
|
||||||
|
const onSetServer = ({target: {value}}: {target: {value: string}}) => {
|
||||||
|
dispatch({type: "SET_SERVER", payload: value});
|
||||||
|
};
|
||||||
|
const [dialogOpen, setDialogOpen] = useState(false);
|
||||||
|
|
||||||
|
return <>
|
||||||
|
<Box display="grid" gridTemplateColumns="1fr auto" gap="4px" alignItems="center" width="100%" mb={2} minHeight={50}>
|
||||||
|
<TextField variant="outlined" fullWidth label="Server URL" value={serverUrl}
|
||||||
|
error={error === ErrorTypes.validServer || error === ErrorTypes.emptyServer}
|
||||||
|
inputProps={{style: {fontFamily: "Monospace"}}}
|
||||||
|
onChange={onSetServer}/>
|
||||||
|
<Box>
|
||||||
|
<Tooltip title="Request Auth Settings">
|
||||||
|
<IconButton onClick={() => setDialogOpen(true)}>
|
||||||
|
<SecurityIcon/>
|
||||||
|
</IconButton>
|
||||||
|
</Tooltip>
|
||||||
|
</Box>
|
||||||
|
</Box>
|
||||||
|
<AuthDialog open={dialogOpen} onClose={() => setDialogOpen(false)}/>
|
||||||
|
</>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default ServerConfigurator;
|
|
@ -0,0 +1,54 @@
|
||||||
|
import React, {FC, useCallback, useEffect, useState} from "react";
|
||||||
|
import {Box, FormControlLabel, TextField} from "@mui/material";
|
||||||
|
import BasicSwitch from "../../../../theme/switch";
|
||||||
|
import {useGraphDispatch, useGraphState} from "../../../../state/graph/GraphStateContext";
|
||||||
|
import {useAppState} from "../../../../state/common/StateContext";
|
||||||
|
import debounce from "lodash.debounce";
|
||||||
|
|
||||||
|
const StepConfigurator: FC = () => {
|
||||||
|
const {customStep} = useGraphState();
|
||||||
|
const graphDispatch = useGraphDispatch();
|
||||||
|
const [error, setError] = useState(false);
|
||||||
|
const {time: {period: {step}, duration}} = useAppState();
|
||||||
|
|
||||||
|
const onChangeStep = (e: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||||
|
const value = +e.target.value;
|
||||||
|
if (value > 0) {
|
||||||
|
graphDispatch({type: "SET_CUSTOM_STEP", payload: value});
|
||||||
|
setError(false);
|
||||||
|
} else {
|
||||||
|
setError(true);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const debouncedOnChangeStep = useCallback(debounce(onChangeStep, 500), [customStep.value]);
|
||||||
|
|
||||||
|
const onChangeEnableStep = () => {
|
||||||
|
setError(false);
|
||||||
|
graphDispatch({type: "TOGGLE_CUSTOM_STEP"});
|
||||||
|
};
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (customStep.enable) onChangeEnableStep();
|
||||||
|
}, [duration]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!customStep.enable) graphDispatch({type: "SET_CUSTOM_STEP", payload: step || 1});
|
||||||
|
}, [step]);
|
||||||
|
|
||||||
|
return <Box display="grid" gridTemplateColumns="auto 120px" alignItems="center">
|
||||||
|
<FormControlLabel
|
||||||
|
control={<BasicSwitch checked={customStep.enable} onChange={onChangeEnableStep}/>}
|
||||||
|
label="Override step value"
|
||||||
|
/>
|
||||||
|
{customStep.enable &&
|
||||||
|
<TextField label="Step value" type="number" size="small" variant="outlined"
|
||||||
|
defaultValue={customStep.value}
|
||||||
|
error={error}
|
||||||
|
helperText={error ? "step is out of allowed range" : " "}
|
||||||
|
onChange={debouncedOnChangeStep}/>
|
||||||
|
}
|
||||||
|
</Box>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default StepConfigurator;
|
|
@ -1,26 +1,28 @@
|
||||||
import {useEffect, useMemo, useState} from "react";
|
import {useEffect, useMemo, useState} from "react";
|
||||||
import {getQueryRangeUrl, getQueryUrl} from "../../../api/query-range";
|
import {getQueryRangeUrl, getQueryUrl} from "../../../../api/query-range";
|
||||||
import {useAppState} from "../../../state/common/StateContext";
|
import {useAppState} from "../../../../state/common/StateContext";
|
||||||
import {InstantMetricResult, MetricResult} from "../../../api/types";
|
import {InstantMetricResult, MetricBase, MetricResult} from "../../../../api/types";
|
||||||
import {isValidHttpUrl} from "../../../utils/url";
|
import {isValidHttpUrl} from "../../../../utils/url";
|
||||||
import {useAuthState} from "../../../state/auth/AuthStateContext";
|
import {useAuthState} from "../../../../state/auth/AuthStateContext";
|
||||||
import {TimeParams} from "../../../types";
|
import {ErrorTypes, TimeParams} from "../../../../types";
|
||||||
|
import {useGraphState} from "../../../../state/graph/GraphStateContext";
|
||||||
|
|
||||||
export const useFetchQuery = (): {
|
export const useFetchQuery = (): {
|
||||||
fetchUrl?: string,
|
fetchUrl?: string[],
|
||||||
isLoading: boolean,
|
isLoading: boolean,
|
||||||
graphData?: MetricResult[],
|
graphData?: MetricResult[],
|
||||||
liveData?: InstantMetricResult[],
|
liveData?: InstantMetricResult[],
|
||||||
error?: string,
|
error?: ErrorTypes | string,
|
||||||
} => {
|
} => {
|
||||||
const {query, displayType, serverUrl, time: {period}, queryControls: {nocache}} = useAppState();
|
const {query, displayType, serverUrl, time: {period}, queryControls: {nocache}} = useAppState();
|
||||||
|
|
||||||
const {basicData, bearerData, authMethod} = useAuthState();
|
const {basicData, bearerData, authMethod} = useAuthState();
|
||||||
|
const {customStep} = useGraphState();
|
||||||
|
|
||||||
const [isLoading, setIsLoading] = useState(false);
|
const [isLoading, setIsLoading] = useState(false);
|
||||||
const [graphData, setGraphData] = useState<MetricResult[]>();
|
const [graphData, setGraphData] = useState<MetricResult[]>();
|
||||||
const [liveData, setLiveData] = useState<InstantMetricResult[]>();
|
const [liveData, setLiveData] = useState<InstantMetricResult[]>();
|
||||||
const [error, setError] = useState<string>();
|
const [error, setError] = useState<ErrorTypes | string>();
|
||||||
const [prevPeriod, setPrevPeriod] = useState<TimeParams>();
|
const [prevPeriod, setPrevPeriod] = useState<TimeParams>();
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
@ -40,7 +42,7 @@ export const useFetchQuery = (): {
|
||||||
}, [period]);
|
}, [period]);
|
||||||
|
|
||||||
const fetchData = async () => {
|
const fetchData = async () => {
|
||||||
if (!fetchUrl) return;
|
if (!fetchUrl?.length) return;
|
||||||
setIsLoading(true);
|
setIsLoading(true);
|
||||||
setPrevPeriod(period);
|
setPrevPeriod(period);
|
||||||
|
|
||||||
|
@ -53,16 +55,25 @@ export const useFetchQuery = (): {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const response = await fetch(fetchUrl, { headers });
|
const responses = await Promise.all(fetchUrl.map(url => fetch(url, {headers})));
|
||||||
if (response.ok) {
|
const tempData = [];
|
||||||
|
let counter = 1;
|
||||||
|
for await (const response of responses) {
|
||||||
const resp = await response.json();
|
const resp = await response.json();
|
||||||
|
if (response.ok) {
|
||||||
setError(undefined);
|
setError(undefined);
|
||||||
displayType === "chart" ? setGraphData(resp.data.result) : setLiveData(resp.data.result);
|
tempData.push(...resp.data.result.map((d: MetricBase) => {
|
||||||
|
d.group = counter;
|
||||||
|
return d;
|
||||||
|
}));
|
||||||
|
counter++;
|
||||||
} else {
|
} else {
|
||||||
setError((await response.json())?.error);
|
setError(`${resp.errorType}\r\n${resp?.error}`);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
displayType === "chart" ? setGraphData(tempData) : setLiveData(tempData);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (e instanceof Error) setError(e.message);
|
if (e instanceof Error) setError(`${e.name}: ${e.message}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
setIsLoading(false);
|
setIsLoading(false);
|
||||||
|
@ -71,20 +82,21 @@ export const useFetchQuery = (): {
|
||||||
const fetchUrl = useMemo(() => {
|
const fetchUrl = useMemo(() => {
|
||||||
if (!period) return;
|
if (!period) return;
|
||||||
if (!serverUrl) {
|
if (!serverUrl) {
|
||||||
setError("Please enter Server URL");
|
setError(ErrorTypes.emptyServer);
|
||||||
} else if (!query.trim()) {
|
} else if (query.every(q => !q.trim())) {
|
||||||
setError("Please enter a valid Query and execute it");
|
setError(ErrorTypes.validQuery);
|
||||||
} else if (isValidHttpUrl(serverUrl)) {
|
} else if (isValidHttpUrl(serverUrl)) {
|
||||||
const duration = (period.end - period.start) / 2;
|
const duration = (period.end - period.start) / 2;
|
||||||
const bufferPeriod = {...period, start: period.start - duration, end: period.end + duration};
|
const bufferPeriod = {...period, start: period.start - duration, end: period.end + duration};
|
||||||
return displayType === "chart"
|
if (customStep.enable) bufferPeriod.step = customStep.value;
|
||||||
? getQueryRangeUrl(serverUrl, query, bufferPeriod, nocache)
|
return query.filter(q => q.trim()).map(q => displayType === "chart"
|
||||||
: getQueryUrl(serverUrl, query, period);
|
? getQueryRangeUrl(serverUrl, q, bufferPeriod, nocache)
|
||||||
|
: getQueryUrl(serverUrl, q, period));
|
||||||
} else {
|
} else {
|
||||||
setError("Please provide a valid URL");
|
setError(ErrorTypes.validServer);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[serverUrl, period, displayType]);
|
[serverUrl, period, displayType, customStep]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
setPrevPeriod(undefined);
|
setPrevPeriod(undefined);
|
||||||
|
@ -94,7 +106,7 @@ export const useFetchQuery = (): {
|
||||||
// Doing it on each query change - looks to be a bad idea. Probably can be done on blur
|
// Doing it on each query change - looks to be a bad idea. Probably can be done on blur
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
fetchData();
|
fetchData();
|
||||||
}, [serverUrl, displayType]);
|
}, [serverUrl, displayType, customStep]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (needUpdateData) {
|
if (needUpdateData) {
|
|
@ -1,148 +0,0 @@
|
||||||
import React, {FC, useRef, useState} from "react";
|
|
||||||
import { Accordion, AccordionDetails, AccordionSummary, Box, Grid, IconButton, TextField, Typography, FormControlLabel,
|
|
||||||
Tooltip, Switch } from "@mui/material";
|
|
||||||
import QueryEditor from "./QueryEditor";
|
|
||||||
import {TimeSelector} from "./TimeSelector";
|
|
||||||
import {useAppDispatch, useAppState} from "../../../state/common/StateContext";
|
|
||||||
import ExpandMoreIcon from "@mui/icons-material/ExpandMore";
|
|
||||||
import SecurityIcon from "@mui/icons-material/Security";
|
|
||||||
import {AuthDialog} from "./AuthDialog";
|
|
||||||
import PlayCircleOutlineIcon from "@mui/icons-material/PlayCircleOutline";
|
|
||||||
import Portal from "@mui/material/Portal";
|
|
||||||
import {saveToStorage} from "../../../utils/storage";
|
|
||||||
import {useGraphDispatch, useGraphState} from "../../../state/graph/GraphStateContext";
|
|
||||||
import debounce from "lodash.debounce";
|
|
||||||
|
|
||||||
const QueryConfigurator: FC = () => {
|
|
||||||
const {serverUrl, query, queryHistory, time: {duration}, queryControls: {autocomplete, nocache}} = useAppState();
|
|
||||||
const dispatch = useAppDispatch();
|
|
||||||
|
|
||||||
const onChangeAutocomplete = () => {
|
|
||||||
dispatch({type: "TOGGLE_AUTOCOMPLETE"});
|
|
||||||
saveToStorage("AUTOCOMPLETE", !autocomplete);
|
|
||||||
};
|
|
||||||
const onChangeCache = () => {
|
|
||||||
dispatch({type: "NO_CACHE"});
|
|
||||||
saveToStorage("NO_CACHE", !nocache);
|
|
||||||
};
|
|
||||||
|
|
||||||
const { yaxis } = useGraphState();
|
|
||||||
const graphDispatch = useGraphDispatch();
|
|
||||||
|
|
||||||
const onChangeYaxisLimits = () => { graphDispatch({type: "TOGGLE_ENABLE_YAXIS_LIMITS"}); };
|
|
||||||
|
|
||||||
const setMinLimit = ({target: {value}}: {target: {value: string}}) => {
|
|
||||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: [+value, yaxis.limits.range[1]]});
|
|
||||||
};
|
|
||||||
const setMaxLimit = ({target: {value}}: {target: {value: string}}) => {
|
|
||||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: [yaxis.limits.range[0], +value]});
|
|
||||||
};
|
|
||||||
|
|
||||||
const [dialogOpen, setDialogOpen] = useState(false);
|
|
||||||
const [expanded, setExpanded] = useState(true);
|
|
||||||
|
|
||||||
const queryContainer = useRef<HTMLDivElement>(null);
|
|
||||||
|
|
||||||
const onSetDuration = (dur: string) => dispatch({type: "SET_DURATION", payload: dur});
|
|
||||||
|
|
||||||
const onRunQuery = () => {
|
|
||||||
const { values } = queryHistory;
|
|
||||||
dispatch({type: "RUN_QUERY"});
|
|
||||||
if (query === values[values.length - 1]) return;
|
|
||||||
dispatch({type: "SET_QUERY_HISTORY_INDEX", payload: values.length});
|
|
||||||
dispatch({type: "SET_QUERY_HISTORY_VALUES", payload: [...values, query]});
|
|
||||||
};
|
|
||||||
const onSetQuery = (newQuery: string) => {
|
|
||||||
if (query === newQuery) return;
|
|
||||||
dispatch({type: "SET_QUERY", payload: newQuery});
|
|
||||||
};
|
|
||||||
const setHistoryIndex = (step: number) => {
|
|
||||||
const index = queryHistory.index + step;
|
|
||||||
if (index < -1 || index > queryHistory.values.length) return;
|
|
||||||
dispatch({type: "SET_QUERY_HISTORY_INDEX", payload: index});
|
|
||||||
onSetQuery(queryHistory.values[index] || "");
|
|
||||||
};
|
|
||||||
const onSetServer = ({target: {value}}: {target: {value: string}}) => {
|
|
||||||
dispatch({type: "SET_SERVER", payload: value});
|
|
||||||
};
|
|
||||||
|
|
||||||
return <>
|
|
||||||
<Accordion expanded={expanded} onChange={() => setExpanded(prev => !prev)}>
|
|
||||||
<AccordionSummary
|
|
||||||
expandIcon={<ExpandMoreIcon/>}
|
|
||||||
aria-controls="panel1a-content"
|
|
||||||
id="panel1a-header"
|
|
||||||
>
|
|
||||||
<Box display="flex" alignItems="center" mr={2}><Typography variant="h6" component="h2">Query Configuration</Typography></Box>
|
|
||||||
<Box flexGrow={1} onClick={e => e.stopPropagation()} onFocusCapture={e => e.stopPropagation()}>
|
|
||||||
<Portal disablePortal={!expanded} container={queryContainer.current}>
|
|
||||||
<Box display="flex" alignItems="center">
|
|
||||||
<Box width="100%">
|
|
||||||
<QueryEditor server={serverUrl} query={query} oneLiner={!expanded} autocomplete={autocomplete}
|
|
||||||
queryHistory={queryHistory} setHistoryIndex={setHistoryIndex} runQuery={onRunQuery} setQuery={onSetQuery}/>
|
|
||||||
</Box>
|
|
||||||
<Tooltip title="Execute Query">
|
|
||||||
<IconButton onClick={onRunQuery} size="large"><PlayCircleOutlineIcon /></IconButton>
|
|
||||||
</Tooltip>
|
|
||||||
</Box>
|
|
||||||
</Portal>
|
|
||||||
</Box>
|
|
||||||
</AccordionSummary>
|
|
||||||
<AccordionDetails>
|
|
||||||
<Grid container spacing={2}>
|
|
||||||
<Grid item xs={12} md={6}>
|
|
||||||
<Box display="grid" gap={2} gridTemplateRows="auto 1fr">
|
|
||||||
<Box display="flex" alignItems="center">
|
|
||||||
<TextField variant="outlined" fullWidth label="Server URL" value={serverUrl}
|
|
||||||
inputProps={{style: {fontFamily: "Monospace"}}}
|
|
||||||
onChange={onSetServer}/>
|
|
||||||
<Box>
|
|
||||||
<Tooltip title="Request Auth Settings">
|
|
||||||
<IconButton onClick={() => setDialogOpen(true)} size="large"><SecurityIcon/></IconButton>
|
|
||||||
</Tooltip>
|
|
||||||
</Box>
|
|
||||||
</Box>
|
|
||||||
<Box flexGrow={1} ><div ref={queryContainer} />{/* for portal QueryEditor */}</Box>
|
|
||||||
</Box>
|
|
||||||
</Grid>
|
|
||||||
<Grid item xs={8} md={6} >
|
|
||||||
<Box style={{
|
|
||||||
minHeight: "128px",
|
|
||||||
padding: "10px 0",
|
|
||||||
borderRadius: "4px",
|
|
||||||
borderColor: "#b9b9b9",
|
|
||||||
borderStyle: "solid",
|
|
||||||
borderWidth: "1px"}}>
|
|
||||||
<TimeSelector setDuration={onSetDuration} duration={duration}/>
|
|
||||||
</Box>
|
|
||||||
</Grid>
|
|
||||||
<Grid item xs={12}>
|
|
||||||
<Box px={1} display="flex" alignItems="center" minHeight={52}>
|
|
||||||
<Box><FormControlLabel label="Enable autocomplete"
|
|
||||||
control={<Switch size="small" checked={autocomplete} onChange={onChangeAutocomplete}/>}
|
|
||||||
/></Box>
|
|
||||||
<Box ml={2}><FormControlLabel label="Enable cache"
|
|
||||||
control={<Switch size="small" checked={!nocache} onChange={onChangeCache}/>}
|
|
||||||
/></Box>
|
|
||||||
<Box ml={2} display="flex" alignItems="center">
|
|
||||||
<FormControlLabel
|
|
||||||
control={<Switch size="small" checked={yaxis.limits.enable} onChange={onChangeYaxisLimits}/>}
|
|
||||||
label="Fix the limits for y-axis"
|
|
||||||
/>
|
|
||||||
{yaxis.limits.enable && <Box display="grid" gridTemplateColumns="120px 120px" gap={1}>
|
|
||||||
<TextField label="Min" type="number" size="small" variant="outlined"
|
|
||||||
defaultValue={yaxis.limits.range[0]} onChange={debounce(setMinLimit, 750)}/>
|
|
||||||
<TextField label="Max" type="number" size="small" variant="outlined"
|
|
||||||
defaultValue={yaxis.limits.range[1]} onChange={debounce(setMaxLimit, 750)}/>
|
|
||||||
</Box>}
|
|
||||||
</Box>
|
|
||||||
</Box>
|
|
||||||
</Grid>
|
|
||||||
</Grid>
|
|
||||||
</AccordionDetails>
|
|
||||||
</Accordion>
|
|
||||||
<AuthDialog open={dialogOpen} onClose={() => setDialogOpen(false)}/>
|
|
||||||
</>;
|
|
||||||
};
|
|
||||||
|
|
||||||
export default QueryConfigurator;
|
|
|
@ -1,9 +1,10 @@
|
||||||
import React, {FC, useEffect, useState} from "react";
|
import React, {FC, useEffect, useState} from "react";
|
||||||
import {Box, FormControlLabel, IconButton, Switch, Tooltip} from "@mui/material";
|
import {Box, FormControlLabel, IconButton, Tooltip} from "@mui/material";
|
||||||
import EqualizerIcon from "@mui/icons-material/Equalizer";
|
import EqualizerIcon from "@mui/icons-material/Equalizer";
|
||||||
import {useAppDispatch, useAppState} from "../../../state/common/StateContext";
|
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||||
import CircularProgressWithLabel from "../../common/CircularProgressWithLabel";
|
import CircularProgressWithLabel from "../../../common/CircularProgressWithLabel";
|
||||||
import makeStyles from "@mui/styles/makeStyles";
|
import makeStyles from "@mui/styles/makeStyles";
|
||||||
|
import BasicSwitch from "../../../../theme/switch";
|
||||||
|
|
||||||
const useStyles = makeStyles({
|
const useStyles = makeStyles({
|
||||||
colorizing: {
|
colorizing: {
|
||||||
|
@ -69,7 +70,7 @@ export const ExecutionControls: FC = () => {
|
||||||
|
|
||||||
return <Box display="flex" alignItems="center">
|
return <Box display="flex" alignItems="center">
|
||||||
{<FormControlLabel
|
{<FormControlLabel
|
||||||
control={<Switch size="small" className={classes.colorizing} checked={autoRefresh} onChange={handleChange} />}
|
control={<BasicSwitch className={classes.colorizing} checked={autoRefresh} onChange={handleChange} />}
|
||||||
label="Auto-refresh"
|
label="Auto-refresh"
|
||||||
/>}
|
/>}
|
||||||
|
|
||||||
|
@ -78,7 +79,9 @@ export const ExecutionControls: FC = () => {
|
||||||
onClick={() => {iterateDelays();}} />
|
onClick={() => {iterateDelays();}} />
|
||||||
<Tooltip title="Change delay refresh">
|
<Tooltip title="Change delay refresh">
|
||||||
<Box ml={1}>
|
<Box ml={1}>
|
||||||
<IconButton onClick={() => {iterateDelays();}} size="large"><EqualizerIcon style={{color: "white"}} /></IconButton>
|
<IconButton onClick={() => {iterateDelays();}}>
|
||||||
|
<EqualizerIcon style={{color: "white"}} />
|
||||||
|
</IconButton>
|
||||||
</Box>
|
</Box>
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
</>}
|
</>}
|
|
@ -1,6 +1,6 @@
|
||||||
import React, {FC} from "react";
|
import React, {FC} from "react";
|
||||||
import {Paper, Table, TableBody, TableCell, TableContainer, TableHead, TableRow} from "@mui/material";
|
import {Paper, Table, TableBody, TableCell, TableContainer, TableHead, TableRow} from "@mui/material";
|
||||||
import {supportedDurations} from "../../../utils/time";
|
import {supportedDurations} from "../../../../utils/time";
|
||||||
|
|
||||||
export const TimeDurationPopover: FC = () => {
|
export const TimeDurationPopover: FC = () => {
|
||||||
|
|
|
@ -2,17 +2,33 @@ import React, {FC, useEffect, useState} from "react";
|
||||||
import {Box, Popover, TextField, Typography} from "@mui/material";
|
import {Box, Popover, TextField, Typography} from "@mui/material";
|
||||||
import DateTimePicker from "@mui/lab/DateTimePicker";
|
import DateTimePicker from "@mui/lab/DateTimePicker";
|
||||||
import {TimeDurationPopover} from "./TimeDurationPopover";
|
import {TimeDurationPopover} from "./TimeDurationPopover";
|
||||||
import {useAppDispatch, useAppState} from "../../../state/common/StateContext";
|
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||||
import {checkDurationLimit, dateFromSeconds, formatDateForNativeInput} from "../../../utils/time";
|
import {checkDurationLimit, dateFromSeconds, formatDateForNativeInput} from "../../../../utils/time";
|
||||||
import {InlineBtn} from "../../common/InlineBtn";
|
import {InlineBtn} from "../../../common/InlineBtn";
|
||||||
|
import makeStyles from "@mui/styles/makeStyles";
|
||||||
|
|
||||||
interface TimeSelectorProps {
|
interface TimeSelectorProps {
|
||||||
setDuration: (str: string) => void;
|
setDuration: (str: string) => void;
|
||||||
duration: string;
|
duration: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const useStyles = makeStyles({
|
||||||
|
container: {
|
||||||
|
display: "grid",
|
||||||
|
gridTemplateColumns: "auto auto",
|
||||||
|
height: "100%",
|
||||||
|
padding: "18px 14px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
borderColor: "#b9b9b9",
|
||||||
|
borderStyle: "solid",
|
||||||
|
borderWidth: "1px"
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
||||||
|
|
||||||
|
const classes = useStyles();
|
||||||
|
|
||||||
const [durationStringFocused, setFocused] = useState(false);
|
const [durationStringFocused, setFocused] = useState(false);
|
||||||
const [anchorEl, setAnchorEl] = React.useState<Element | null>(null);
|
const [anchorEl, setAnchorEl] = React.useState<Element | null>(null);
|
||||||
const [until, setUntil] = useState<string>();
|
const [until, setUntil] = useState<string>();
|
||||||
|
@ -60,7 +76,7 @@ export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
||||||
|
|
||||||
const open = Boolean(anchorEl);
|
const open = Boolean(anchorEl);
|
||||||
|
|
||||||
return <Box m={1} flexDirection="row" display="flex">
|
return <Box className={classes.container}>
|
||||||
{/*setup duration*/}
|
{/*setup duration*/}
|
||||||
<Box px={1}>
|
<Box px={1}>
|
||||||
<Box>
|
<Box>
|
||||||
|
@ -72,7 +88,7 @@ export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
||||||
onFocus={() => {setFocused(true);}}
|
onFocus={() => {setFocused(true);}}
|
||||||
/>
|
/>
|
||||||
</Box>
|
</Box>
|
||||||
<Box my={2}>
|
<Box mt={2}>
|
||||||
<Typography variant="body2">
|
<Typography variant="body2">
|
||||||
<span aria-owns={open ? "mouse-over-popover" : undefined}
|
<span aria-owns={open ? "mouse-over-popover" : undefined}
|
||||||
aria-haspopup="true"
|
aria-haspopup="true"
|
||||||
|
@ -119,7 +135,7 @@ export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
||||||
/>
|
/>
|
||||||
</Box>
|
</Box>
|
||||||
|
|
||||||
<Box my={2}>
|
<Box mt={2}>
|
||||||
<Typography variant="body2">
|
<Typography variant="body2">
|
||||||
Will be changed to current time for auto-refresh mode.
|
Will be changed to current time for auto-refresh mode.
|
||||||
<InlineBtn handler={() => dispatch({type: "RUN_QUERY_TO_NOW"})} text="Switch to now"/>
|
<InlineBtn handler={() => dispatch({type: "RUN_QUERY_TO_NOW"})} text="Switch to now"/>
|
|
@ -1,20 +1,19 @@
|
||||||
import React, {FC} from "react";
|
import React, {FC} from "react";
|
||||||
import {Alert, AppBar, Box, CircularProgress, Fade, Link, Toolbar, Typography} from "@mui/material";
|
import {Alert, AppBar, Box, CircularProgress, Fade, Link, Toolbar, Typography} from "@mui/material";
|
||||||
import {ExecutionControls} from "./Configurator/ExecutionControls";
|
import {ExecutionControls} from "./Configurator/Time/ExecutionControls";
|
||||||
import {DisplayTypeSwitch} from "./Configurator/DisplayTypeSwitch";
|
import {DisplayTypeSwitch} from "./Configurator/DisplayTypeSwitch";
|
||||||
import GraphView from "./Views/GraphView";
|
import GraphView from "./Views/GraphView";
|
||||||
import TableView from "./Views/TableView";
|
import TableView from "./Views/TableView";
|
||||||
import {useAppState} from "../../state/common/StateContext";
|
import {useAppState} from "../../state/common/StateContext";
|
||||||
import QueryConfigurator from "./Configurator/QueryConfigurator";
|
import QueryConfigurator from "./Configurator/Query/QueryConfigurator";
|
||||||
import {useFetchQuery} from "./Configurator/useFetchQuery";
|
import {useFetchQuery} from "./Configurator/Query/useFetchQuery";
|
||||||
import JsonView from "./Views/JsonView";
|
import JsonView from "./Views/JsonView";
|
||||||
import {UrlCopy} from "./UrlCopy";
|
|
||||||
|
|
||||||
const HomeLayout: FC = () => {
|
const HomeLayout: FC = () => {
|
||||||
|
|
||||||
const {displayType, time: {period}} = useAppState();
|
const {displayType, time: {period}} = useAppState();
|
||||||
|
|
||||||
const {fetchUrl, isLoading, liveData, graphData, error} = useFetchQuery();
|
const {isLoading, liveData, graphData, error} = useFetchQuery();
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
|
@ -46,12 +45,11 @@ const HomeLayout: FC = () => {
|
||||||
<ExecutionControls/>
|
<ExecutionControls/>
|
||||||
</Box>
|
</Box>
|
||||||
<DisplayTypeSwitch/>
|
<DisplayTypeSwitch/>
|
||||||
<UrlCopy url={fetchUrl}/>
|
|
||||||
</Toolbar>
|
</Toolbar>
|
||||||
</AppBar>
|
</AppBar>
|
||||||
<Box p={2} display="grid" gridTemplateRows="auto 1fr" gap={"20px"} style={{minHeight: "calc(100vh - 64px)"}}>
|
<Box p={4} display="grid" gridTemplateRows="auto 1fr" gap={"20px"} style={{minHeight: "calc(100vh - 64px)"}}>
|
||||||
<Box>
|
<Box>
|
||||||
<QueryConfigurator/>
|
<QueryConfigurator error={error}/>
|
||||||
</Box>
|
</Box>
|
||||||
<Box height={"100%"}>
|
<Box height={"100%"}>
|
||||||
{isLoading && <Fade in={isLoading} style={{
|
{isLoading && <Fade in={isLoading} style={{
|
||||||
|
@ -68,9 +66,9 @@ const HomeLayout: FC = () => {
|
||||||
<CircularProgress/>
|
<CircularProgress/>
|
||||||
</Box>
|
</Box>
|
||||||
</Fade>}
|
</Fade>}
|
||||||
{<Box height={"100%"} p={3} bgcolor={"#fff"}>
|
{<Box height={"100%"} bgcolor={"#fff"}>
|
||||||
{error &&
|
{error &&
|
||||||
<Alert color="error" severity="error" style={{fontSize: "14px"}}>
|
<Alert color="error" severity="error" style={{fontSize: "14px", whiteSpace: "pre-wrap"}}>
|
||||||
{error}
|
{error}
|
||||||
</Alert>}
|
</Alert>}
|
||||||
{graphData && period && (displayType === "chart") &&
|
{graphData && period && (displayType === "chart") &&
|
||||||
|
|
|
@ -2,50 +2,51 @@ import React, {FC, useEffect, useState} from "react";
|
||||||
import {MetricResult} from "../../../api/types";
|
import {MetricResult} from "../../../api/types";
|
||||||
import LineChart from "../../LineChart/LineChart";
|
import LineChart from "../../LineChart/LineChart";
|
||||||
import {AlignedData as uPlotData, Series as uPlotSeries} from "uplot";
|
import {AlignedData as uPlotData, Series as uPlotSeries} from "uplot";
|
||||||
import {Legend, LegendItem} from "../../Legend/Legend";
|
import Legend from "../../Legend/Legend";
|
||||||
import {useGraphDispatch, useGraphState} from "../../../state/graph/GraphStateContext";
|
import {useGraphDispatch} from "../../../state/graph/GraphStateContext";
|
||||||
import {getHideSeries, getLegendItem, getLimitsYAxis, getSeriesItem, getTimeSeries} from "../../../utils/uPlot";
|
import {getHideSeries, getLegendItem, getSeriesItem} from "../../../utils/uplot/series";
|
||||||
|
import {getLimitsYAxis, getTimeSeries} from "../../../utils/uplot/axes";
|
||||||
|
import {LegendItem} from "../../../utils/uplot/types";
|
||||||
|
import {AxisRange} from "../../../state/graph/reducer";
|
||||||
|
import GraphSettings from "../Configurator/Graph/GraphSettings";
|
||||||
|
|
||||||
export interface GraphViewProps {
|
export interface GraphViewProps {
|
||||||
data?: MetricResult[];
|
data?: MetricResult[];
|
||||||
}
|
}
|
||||||
|
|
||||||
const GraphView: FC<GraphViewProps> = ({data = []}) => {
|
const GraphView: FC<GraphViewProps> = ({data = []}) => {
|
||||||
const { yaxis } = useGraphState();
|
|
||||||
const graphDispatch = useGraphDispatch();
|
const graphDispatch = useGraphDispatch();
|
||||||
|
|
||||||
const [dataChart, setDataChart] = useState<uPlotData>([[]]);
|
const [dataChart, setDataChart] = useState<uPlotData>([[]]);
|
||||||
const [series, setSeries] = useState<uPlotSeries[]>([]);
|
const [series, setSeries] = useState<uPlotSeries[]>([]);
|
||||||
const [legend, setLegend] = useState<LegendItem[]>([]);
|
const [legend, setLegend] = useState<LegendItem[]>([]);
|
||||||
const [hideSeries, setHideSeries] = useState<string[]>([]);
|
const [hideSeries, setHideSeries] = useState<string[]>([]);
|
||||||
const [valuesLimit, setValuesLimit] = useState<[number, number]>([0, 1]);
|
const [valuesLimit, setValuesLimit] = useState<AxisRange>({"1": [0, 1]});
|
||||||
|
|
||||||
const setLimitsYaxis = (values: number[]) => {
|
const setLimitsYaxis = (values: {[key: string]: number[]}) => {
|
||||||
if (!yaxis.limits.enable || (yaxis.limits.range.every(item => !item))) {
|
|
||||||
const limits = getLimitsYAxis(values);
|
const limits = getLimitsYAxis(values);
|
||||||
setValuesLimit(limits);
|
setValuesLimit(limits);
|
||||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: limits});
|
graphDispatch({type: "SET_YAXIS_LIMITS", payload: limits});
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const onChangeLegend = (label: string, metaKey: boolean) => {
|
const onChangeLegend = (legend: LegendItem, metaKey: boolean) => {
|
||||||
setHideSeries(getHideSeries({hideSeries, label, metaKey, series}));
|
setHideSeries(getHideSeries({hideSeries, legend, metaKey, series}));
|
||||||
};
|
};
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const tempTimes: number[] = [];
|
const tempTimes: number[] = [];
|
||||||
const tempValues: number[] = [];
|
const tempValues: {[key: string]: number[]} = {};
|
||||||
const tempLegend: LegendItem[] = [];
|
const tempLegend: LegendItem[] = [];
|
||||||
const tempSeries: uPlotSeries[] = [];
|
const tempSeries: uPlotSeries[] = [];
|
||||||
|
|
||||||
data?.forEach(d => {
|
data?.forEach((d) => {
|
||||||
const seriesItem = getSeriesItem(d, hideSeries);
|
const seriesItem = getSeriesItem(d, hideSeries);
|
||||||
tempSeries.push(seriesItem);
|
tempSeries.push(seriesItem);
|
||||||
tempLegend.push(getLegendItem(seriesItem));
|
tempLegend.push(getLegendItem(seriesItem, d.group));
|
||||||
|
|
||||||
d.values.forEach(v => {
|
d.values.forEach(v => {
|
||||||
tempTimes.push(v[0]);
|
tempTimes.push(v[0]);
|
||||||
tempValues.push(+v[1]);
|
tempValues[d.group] ? tempValues[d.group].push(+v[1]) : tempValues[d.group] = [+v[1]];
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -68,7 +69,7 @@ const GraphView: FC<GraphViewProps> = ({data = []}) => {
|
||||||
data?.forEach(d => {
|
data?.forEach(d => {
|
||||||
const seriesItem = getSeriesItem(d, hideSeries);
|
const seriesItem = getSeriesItem(d, hideSeries);
|
||||||
tempSeries.push(seriesItem);
|
tempSeries.push(seriesItem);
|
||||||
tempLegend.push(getLegendItem(seriesItem));
|
tempLegend.push(getLegendItem(seriesItem, d.group));
|
||||||
});
|
});
|
||||||
setSeries([{}, ...tempSeries]);
|
setSeries([{}, ...tempSeries]);
|
||||||
setLegend(tempLegend);
|
setLegend(tempLegend);
|
||||||
|
@ -77,6 +78,7 @@ const GraphView: FC<GraphViewProps> = ({data = []}) => {
|
||||||
return <>
|
return <>
|
||||||
{(data.length > 0)
|
{(data.length > 0)
|
||||||
? <div>
|
? <div>
|
||||||
|
<GraphSettings/>
|
||||||
<LineChart data={dataChart} series={series} metrics={data} limits={valuesLimit}/>
|
<LineChart data={dataChart} series={series} metrics={data} limits={valuesLimit}/>
|
||||||
<Legend labels={legend} onChange={onChangeLegend}/>
|
<Legend labels={legend} onChange={onChangeLegend}/>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -1,31 +1,48 @@
|
||||||
import React, {FC} from "react";
|
import React, {FC, useMemo} from "react";
|
||||||
import {hexToRGB} from "../../utils/color";
|
import {hexToRGB} from "../../utils/color";
|
||||||
|
import {useAppState} from "../../state/common/StateContext";
|
||||||
|
import {LegendItem} from "../../utils/uplot/types";
|
||||||
import "./legend.css";
|
import "./legend.css";
|
||||||
|
import {getDashLine} from "../../utils/uplot/helpers";
|
||||||
export interface LegendItem {
|
|
||||||
label: string;
|
|
||||||
color: string;
|
|
||||||
checked: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface LegendProps {
|
export interface LegendProps {
|
||||||
labels: LegendItem[];
|
labels: LegendItem[];
|
||||||
onChange: (legend: string, metaKey: boolean) => void;
|
onChange: (item: LegendItem, metaKey: boolean) => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
export const Legend: FC<LegendProps> = ({labels, onChange}) => {
|
const Legend: FC<LegendProps> = ({labels, onChange}) => {
|
||||||
|
const {query} = useAppState();
|
||||||
|
|
||||||
|
const groups = useMemo(() => {
|
||||||
|
return Array.from(new Set(labels.map(l => l.group)));
|
||||||
|
}, [labels]);
|
||||||
|
|
||||||
return <div className="legendWrapper">
|
return <div className="legendWrapper">
|
||||||
{labels.map((legendItem: LegendItem) =>
|
{groups.map((group) => <div className="legendGroup" key={group}>
|
||||||
|
<div className="legendGroupTitle">
|
||||||
|
<svg className="legendGroupLine" width="33" height="3" version="1.1" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<line strokeWidth="3" x1="0" y1="0" x2="33" y2="0" stroke="#363636"
|
||||||
|
strokeDasharray={getDashLine(group).join(",")}
|
||||||
|
/>
|
||||||
|
</svg>
|
||||||
|
<b>"{query[group - 1]}"</b>:
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
{labels.filter(l => l.group === group).map((legendItem: LegendItem) =>
|
||||||
<div className={legendItem.checked ? "legendItem" : "legendItem legendItemHide"}
|
<div className={legendItem.checked ? "legendItem" : "legendItem legendItemHide"}
|
||||||
key={legendItem.label}
|
key={`${legendItem.group}.${legendItem.label}`}
|
||||||
onClick={(e) => onChange(legendItem.label, e.ctrlKey || e.metaKey)}>
|
onClick={(e) => onChange(legendItem, e.ctrlKey || e.metaKey)}>
|
||||||
<div className="legendMarker"
|
<div className="legendMarker"
|
||||||
style={{
|
style={{
|
||||||
borderColor: legendItem.color,
|
borderColor: legendItem.color,
|
||||||
backgroundColor: `rgba(${hexToRGB(legendItem.color)}, 0.1)`
|
backgroundColor: `rgba(${hexToRGB(legendItem.color)}, 0.1)`
|
||||||
}}/>
|
}}/>
|
||||||
<div className="legendLabel">{legendItem.checked} {legendItem.label}</div>
|
<div className="legendLabel">{legendItem.label}</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
</div>
|
||||||
|
</div>)}
|
||||||
</div>;
|
</div>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export default Legend;
|
|
@ -1,12 +1,31 @@
|
||||||
.legendWrapper {
|
.legendWrapper {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: repeat(auto-fit, minmax(400px, 1fr));
|
||||||
|
grid-gap: 20px;
|
||||||
margin-top: 20px;
|
margin-top: 20px;
|
||||||
|
cursor: default;
|
||||||
|
}
|
||||||
|
|
||||||
|
.legendGroup {
|
||||||
|
margin-bottom: 24px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.legendGroupTitle {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
padding: 10px 0 5px;
|
||||||
|
font-size: 11px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.legendGroupLine {
|
||||||
|
margin: 0 10px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.legendItem {
|
.legendItem {
|
||||||
display: inline-grid;
|
display: inline-grid;
|
||||||
grid-template-columns: auto auto;
|
grid-template-columns: auto auto;
|
||||||
grid-gap: 4px;
|
grid-gap: 6px;
|
||||||
align-items: center;
|
align-items: start;
|
||||||
justify-content: start;
|
justify-content: start;
|
||||||
padding: 5px 10px;
|
padding: 5px 10px;
|
||||||
background-color: #FFF;
|
background-color: #FFF;
|
||||||
|
@ -30,9 +49,10 @@
|
||||||
border-style: solid;
|
border-style: solid;
|
||||||
box-sizing: border-box;
|
box-sizing: border-box;
|
||||||
transition: 0.2s ease;
|
transition: 0.2s ease;
|
||||||
|
margin: 3px 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
.legendLabel {
|
.legendLabel {
|
||||||
font-size: 12px;
|
font-size: 11px;
|
||||||
font-weight: 600;
|
font-weight: normal;
|
||||||
}
|
}
|
|
@ -1,45 +1,46 @@
|
||||||
import React, {FC, useCallback, useEffect, useRef, useState} from "react";
|
import React, {FC, useCallback, useEffect, useRef, useState} from "react";
|
||||||
import {useAppDispatch, useAppState} from "../../state/common/StateContext";
|
import {useAppDispatch, useAppState} from "../../state/common/StateContext";
|
||||||
import uPlot, {AlignedData as uPlotData, Options as uPlotOptions, Series as uPlotSeries, Range} from "uplot";
|
import uPlot, {AlignedData as uPlotData, Options as uPlotOptions, Series as uPlotSeries, Range, Scales, Scale} from "uplot";
|
||||||
import {useGraphState} from "../../state/graph/GraphStateContext";
|
import {useGraphState} from "../../state/graph/GraphStateContext";
|
||||||
import {defaultOptions, dragChart, setTooltip} from "../../utils/uPlot";
|
import {defaultOptions} from "../../utils/uplot/helpers";
|
||||||
|
import {dragChart} from "../../utils/uplot/events";
|
||||||
|
import {getAxes} from "../../utils/uplot/axes";
|
||||||
|
import {setTooltip} from "../../utils/uplot/tooltip";
|
||||||
import {MetricResult} from "../../api/types";
|
import {MetricResult} from "../../api/types";
|
||||||
import {limitsDurations} from "../../utils/time";
|
import {limitsDurations} from "../../utils/time";
|
||||||
import throttle from "lodash.throttle";
|
import throttle from "lodash.throttle";
|
||||||
import "uplot/dist/uPlot.min.css";
|
import "uplot/dist/uPlot.min.css";
|
||||||
import "./tooltip.css";
|
import "./tooltip.css";
|
||||||
|
import {AxisRange} from "../../state/graph/reducer";
|
||||||
|
|
||||||
export interface LineChartProps {
|
export interface LineChartProps {
|
||||||
metrics: MetricResult[]
|
metrics: MetricResult[];
|
||||||
data: uPlotData;
|
data: uPlotData;
|
||||||
series: uPlotSeries[],
|
series: uPlotSeries[];
|
||||||
limits: [number, number]
|
limits: AxisRange;
|
||||||
}
|
}
|
||||||
|
enum typeChartUpdate {xRange = "xRange", yRange = "yRange", data = "data"}
|
||||||
enum typeChartUpdate { xRange = "xRange", yRange = "yRange", data = "data" }
|
|
||||||
|
|
||||||
const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) => {
|
const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) => {
|
||||||
const dispatch = useAppDispatch();
|
const dispatch = useAppDispatch();
|
||||||
const {time: {period}} = useAppState();
|
const {time: {period}} = useAppState();
|
||||||
const { yaxis } = useGraphState();
|
const {yaxis} = useGraphState();
|
||||||
const containerRef = useRef<HTMLDivElement>(null);
|
const containerRef = useRef<HTMLDivElement>(null);
|
||||||
const uPlotRef = useRef<HTMLDivElement>(null);
|
const uPlotRef = useRef<HTMLDivElement>(null);
|
||||||
const [isPanning, setPanning] = useState(false);
|
const [isPanning, setPanning] = useState(false);
|
||||||
const [zoomPos, setZoomPos] = useState(0);
|
const [xRange, setXRange] = useState({min: period.start, max: period.end});
|
||||||
const [xRange, setXRange] = useState({ min: period.start, max: period.end });
|
|
||||||
const [uPlotInst, setUPlotInst] = useState<uPlot>();
|
const [uPlotInst, setUPlotInst] = useState<uPlot>();
|
||||||
|
|
||||||
const tooltip = document.createElement("div");
|
const tooltip = document.createElement("div");
|
||||||
tooltip.className = "u-tooltip";
|
tooltip.className = "u-tooltip";
|
||||||
const tooltipIdx = { seriesIdx: 1, dataIdx: 0 };
|
const tooltipIdx = {seriesIdx: 1, dataIdx: 0};
|
||||||
const tooltipOffset = { left: 0, top: 0 };
|
const tooltipOffset = {left: 0, top: 0};
|
||||||
|
|
||||||
const setScale = ({min, max}: {min: number, max: number}): void => {
|
const setScale = ({min, max}: { min: number, max: number }): void => {
|
||||||
dispatch({type: "SET_PERIOD", payload: {from: new Date(min * 1000), to: new Date(max * 1000)}});
|
dispatch({type: "SET_PERIOD", payload: {from: new Date(min * 1000), to: new Date(max * 1000)}});
|
||||||
};
|
};
|
||||||
const throttledSetScale = useCallback(throttle(setScale, 500), []);
|
const throttledSetScale = useCallback(throttle(setScale, 500), []);
|
||||||
|
const setPlotScale = ({u, min, max}: { u: uPlot, min: number, max: number }) => {
|
||||||
const setPlotScale = ({u, min, max}: {u: uPlot, min: number, max: number}) => {
|
|
||||||
const delta = (max - min) * 1000;
|
const delta = (max - min) * 1000;
|
||||||
if ((delta < limitsDurations.min) || (delta > limitsDurations.max)) return;
|
if ((delta < limitsDurations.min) || (delta > limitsDurations.max)) return;
|
||||||
u.setScale("x", {min, max});
|
u.setScale("x", {min, max});
|
||||||
|
@ -52,22 +53,18 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) =>
|
||||||
tooltipOffset.left = parseFloat(u.over.style.left);
|
tooltipOffset.left = parseFloat(u.over.style.left);
|
||||||
tooltipOffset.top = parseFloat(u.over.style.top);
|
tooltipOffset.top = parseFloat(u.over.style.top);
|
||||||
u.root.querySelector(".u-wrap")?.appendChild(tooltip);
|
u.root.querySelector(".u-wrap")?.appendChild(tooltip);
|
||||||
|
|
||||||
// wheel drag pan
|
// wheel drag pan
|
||||||
u.over.addEventListener("mousedown", e => {
|
u.over.addEventListener("mousedown", e => dragChart({u, e, setPanning, setPlotScale, factor}));
|
||||||
dragChart({u, e, setPanning, setPlotScale, factor});
|
|
||||||
});
|
|
||||||
|
|
||||||
// wheel scroll zoom
|
// wheel scroll zoom
|
||||||
u.over.addEventListener("wheel", e => {
|
u.over.addEventListener("wheel", e => {
|
||||||
if (!e.ctrlKey && !e.metaKey) return;
|
if (!e.ctrlKey && !e.metaKey) return;
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
const {width} = u.over.getBoundingClientRect();
|
const {width} = u.over.getBoundingClientRect();
|
||||||
if (u.cursor.left && u.cursor.left > 0) setZoomPos(u.cursor.left);
|
const zoomPos = u.cursor.left && u.cursor.left > 0 ? u.cursor.left : 0;
|
||||||
const xVal = u.posToVal(zoomPos, "x");
|
const xVal = u.posToVal(zoomPos, "x");
|
||||||
const oxRange = (u.scales.x.max || 0) - (u.scales.x.min || 0);
|
const oxRange = (u.scales.x.max || 0) - (u.scales.x.min || 0);
|
||||||
const nxRange = e.deltaY < 0 ? oxRange * factor : oxRange / factor;
|
const nxRange = e.deltaY < 0 ? oxRange * factor : oxRange / factor;
|
||||||
const min = xVal - (zoomPos/width) * nxRange;
|
const min = xVal - (zoomPos / width) * nxRange;
|
||||||
const max = min + nxRange;
|
const max = min + nxRange;
|
||||||
u.batch(() => setPlotScale({u, min, max}));
|
u.batch(() => setPlotScale({u, min, max}));
|
||||||
});
|
});
|
||||||
|
@ -88,25 +85,27 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) =>
|
||||||
? setTooltip({u, tooltipIdx, metrics, series, tooltip, tooltipOffset})
|
? setTooltip({u, tooltipIdx, metrics, series, tooltip, tooltipOffset})
|
||||||
: tooltip.style.display = "none";
|
: tooltip.style.display = "none";
|
||||||
};
|
};
|
||||||
|
const getRangeX = (): Range.MinMax => [xRange.min, xRange.max];
|
||||||
const getRangeY = (u: uPlot, min = 0, max = 1): Range.MinMax => {
|
const getRangeY = (u: uPlot, min = 0, max = 1, axis: string): Range.MinMax => {
|
||||||
if (yaxis.limits.enable) return yaxis.limits.range;
|
if (yaxis.limits.enable) return yaxis.limits.range[axis];
|
||||||
return min && max ? [min - (min * 0.05), max + (max * 0.05)] : limits;
|
return min && max ? [min - (min * 0.05), max + (max * 0.05)] : limits[axis];
|
||||||
};
|
};
|
||||||
|
|
||||||
const getRangeX = (): Range.MinMax => {
|
const getScales = (): Scales => {
|
||||||
return [xRange.min, xRange.max];
|
const scales: { [key: string]: { range: Scale.Range } } = {x: {range: getRangeX}};
|
||||||
|
Object.keys(yaxis.limits.range).forEach(axis => {
|
||||||
|
scales[axis] = {range: (u: uPlot, min = 0, max = 1) => getRangeY(u, min, max, axis)};
|
||||||
|
});
|
||||||
|
return scales;
|
||||||
};
|
};
|
||||||
|
|
||||||
const options: uPlotOptions = {
|
const options: uPlotOptions = {
|
||||||
...defaultOptions,
|
...defaultOptions,
|
||||||
width: containerRef.current ? containerRef.current.offsetWidth : 400,
|
|
||||||
series,
|
series,
|
||||||
plugins: [{ hooks: { ready: onReadyChart, setCursor, setSeries: seriesFocus }}],
|
axes: getAxes(series),
|
||||||
scales: {
|
scales: {...getScales()},
|
||||||
x: { range: getRangeX },
|
width: containerRef.current ? containerRef.current.offsetWidth : 400,
|
||||||
y: { range: getRangeY }
|
plugins: [{hooks: {ready: onReadyChart, setCursor, setSeries: seriesFocus}}],
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const updateChart = (type: typeChartUpdate): void => {
|
const updateChart = (type: typeChartUpdate): void => {
|
||||||
|
@ -116,7 +115,10 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) =>
|
||||||
uPlotInst.scales.x.range = getRangeX;
|
uPlotInst.scales.x.range = getRangeX;
|
||||||
break;
|
break;
|
||||||
case typeChartUpdate.yRange:
|
case typeChartUpdate.yRange:
|
||||||
uPlotInst.scales.y.range = getRangeY;
|
Object.keys(yaxis.limits.range).forEach(axis => {
|
||||||
|
if (!uPlotInst.scales[axis]) return;
|
||||||
|
uPlotInst.scales[axis].range = (u: uPlot, min = 0, max = 1) => getRangeY(u, min, max, axis);
|
||||||
|
});
|
||||||
break;
|
break;
|
||||||
case typeChartUpdate.data:
|
case typeChartUpdate.data:
|
||||||
uPlotInst.setData(data);
|
uPlotInst.setData(data);
|
||||||
|
@ -125,13 +127,13 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) =>
|
||||||
uPlotInst.redraw();
|
uPlotInst.redraw();
|
||||||
};
|
};
|
||||||
|
|
||||||
useEffect(() => setXRange({ min: period.start, max: period.end }), [period]);
|
useEffect(() => setXRange({min: period.start, max: period.end}), [period]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!uPlotRef.current) return;
|
if (!uPlotRef.current) return;
|
||||||
const u = new uPlot(options, data, uPlotRef.current);
|
const u = new uPlot(options, data, uPlotRef.current);
|
||||||
setUPlotInst(u);
|
setUPlotInst(u);
|
||||||
setXRange({ min: period.start, max: period.end });
|
setXRange({min: period.start, max: period.end});
|
||||||
return u.destroy;
|
return u.destroy;
|
||||||
}, [uPlotRef.current, series]);
|
}, [uPlotRef.current, series]);
|
||||||
|
|
||||||
|
|
|
@ -7,43 +7,89 @@ body {
|
||||||
}
|
}
|
||||||
|
|
||||||
code {
|
code {
|
||||||
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
|
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace;
|
||||||
monospace;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*Material UI global classes*/
|
/*Material UI global classes*/
|
||||||
|
|
||||||
.MuiAccordionSummary-content {
|
.MuiAccordionSummary-content {
|
||||||
margin: 10px 0 !important;
|
margin: 0 !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*Codemirror classes*/
|
||||||
|
|
||||||
/* TODO: find better way to override codemirror styles */
|
/* TODO: find better way to override codemirror styles */
|
||||||
.cm-activeLine {
|
.cm-activeLine {
|
||||||
background-color: inherit !important;
|
background-color: inherit !important;
|
||||||
}
|
}
|
||||||
.cm-editor {
|
.cm-editor {
|
||||||
|
border: none;
|
||||||
border-radius: 4px;
|
border-radius: 4px;
|
||||||
border-color: #b9b9b9;
|
|
||||||
border-style: solid;
|
|
||||||
border-width: 1px;
|
|
||||||
font-size: 10px;
|
font-size: 10px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.one-line-scroll .cm-editor {
|
|
||||||
height: 24px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.cm-gutters {
|
.cm-gutters {
|
||||||
border-radius: 4px 0 0 4px;
|
border-radius: 4px 0 0 4px;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
|
overflow: hidden;
|
||||||
|
background-color: #FFFFFF !important;
|
||||||
|
border: none !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
.multi-line-scroll .cm-content,
|
.cm-activeLineGutter {
|
||||||
.multi-line-scroll .cm-gutters {
|
background-color: #FFFFFF !important;
|
||||||
min-height: 64px !important;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.one-line-scroll .cm-content,
|
.query-editor .cm-scroller {
|
||||||
.one-line-scroll .cm-gutters {
|
align-items: center !important;
|
||||||
min-height: auto;
|
}
|
||||||
|
|
||||||
|
.query-editor .cm-editor.cm-focused {
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.query-editor-container {
|
||||||
|
position: relative;
|
||||||
|
padding: 12px;
|
||||||
|
border: 1px solid #b9b9b9;
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.query-editor-container_focus {
|
||||||
|
border: 1px solid #3F51B5;
|
||||||
|
}
|
||||||
|
|
||||||
|
.query-editor-container_error {
|
||||||
|
border-color: #FF4141;
|
||||||
|
}
|
||||||
|
|
||||||
|
.query-editor-container-one-line .query-editor .cm-editor {
|
||||||
|
height: 22px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.query-editor-container-one-line {
|
||||||
|
padding: 6px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.query-editor-label {
|
||||||
|
font-weight: 400;
|
||||||
|
font-size: 12px;
|
||||||
|
line-height: 1;
|
||||||
|
letter-spacing: normal;
|
||||||
|
color: rgba(0, 0, 0, 0.6);
|
||||||
|
padding: 0 5px;
|
||||||
|
white-space: nowrap;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
max-width: calc(133% - 24px);
|
||||||
|
position: absolute;
|
||||||
|
left: 4px;
|
||||||
|
top: -0.71875em;
|
||||||
|
z-index: 1;
|
||||||
|
background-color: #FFFFFF;
|
||||||
|
transform: scale(0.75);
|
||||||
|
}
|
||||||
|
|
||||||
|
.query-editor-container_error .query-editor-label {
|
||||||
|
color: #FF4141;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import {TimeParams, TimePeriod} from "../../types";
|
||||||
import {dateFromSeconds, formatDateToLocal, getDateNowUTC, getDurationFromPeriod, getTimeperiodForDuration} from "../../utils/time";
|
import {dateFromSeconds, formatDateToLocal, getDateNowUTC, getDurationFromPeriod, getTimeperiodForDuration} from "../../utils/time";
|
||||||
import {getFromStorage} from "../../utils/storage";
|
import {getFromStorage} from "../../utils/storage";
|
||||||
import {getDefaultServer} from "../../utils/default-server-url";
|
import {getDefaultServer} from "../../utils/default-server-url";
|
||||||
import {getQueryStringValue} from "../../utils/query-string";
|
import {getQueryArray, getQueryStringValue} from "../../utils/query-string";
|
||||||
|
|
||||||
export interface TimeState {
|
export interface TimeState {
|
||||||
duration: string;
|
duration: string;
|
||||||
|
@ -19,9 +19,9 @@ export interface QueryHistory {
|
||||||
export interface AppState {
|
export interface AppState {
|
||||||
serverUrl: string;
|
serverUrl: string;
|
||||||
displayType: DisplayType;
|
displayType: DisplayType;
|
||||||
query: string;
|
query: string[];
|
||||||
time: TimeState;
|
time: TimeState;
|
||||||
queryHistory: QueryHistory,
|
queryHistory: QueryHistory[],
|
||||||
queryControls: {
|
queryControls: {
|
||||||
autoRefresh: boolean;
|
autoRefresh: boolean;
|
||||||
autocomplete: boolean,
|
autocomplete: boolean,
|
||||||
|
@ -32,9 +32,9 @@ export interface AppState {
|
||||||
export type Action =
|
export type Action =
|
||||||
| { type: "SET_DISPLAY_TYPE", payload: DisplayType }
|
| { type: "SET_DISPLAY_TYPE", payload: DisplayType }
|
||||||
| { type: "SET_SERVER", payload: string }
|
| { type: "SET_SERVER", payload: string }
|
||||||
| { type: "SET_QUERY", payload: string }
|
| { type: "SET_QUERY", payload: string[] }
|
||||||
| { type: "SET_QUERY_HISTORY_INDEX", payload: number }
|
| { type: "SET_QUERY_HISTORY_BY_INDEX", payload: {value: QueryHistory, queryNumber: number} }
|
||||||
| { type: "SET_QUERY_HISTORY_VALUES", payload: string[] }
|
| { type: "SET_QUERY_HISTORY", payload: QueryHistory[] }
|
||||||
| { type: "SET_DURATION", payload: string }
|
| { type: "SET_DURATION", payload: string }
|
||||||
| { type: "SET_UNTIL", payload: Date }
|
| { type: "SET_UNTIL", payload: Date }
|
||||||
| { type: "SET_PERIOD", payload: TimePeriod }
|
| { type: "SET_PERIOD", payload: TimePeriod }
|
||||||
|
@ -46,13 +46,13 @@ export type Action =
|
||||||
|
|
||||||
const duration = getQueryStringValue("g0.range_input", "1h") as string;
|
const duration = getQueryStringValue("g0.range_input", "1h") as string;
|
||||||
const endInput = formatDateToLocal(getQueryStringValue("g0.end_input", getDateNowUTC()) as Date);
|
const endInput = formatDateToLocal(getQueryStringValue("g0.end_input", getDateNowUTC()) as Date);
|
||||||
const query = getQueryStringValue("g0.expr", "") as string;
|
const query = getQueryArray();
|
||||||
|
|
||||||
export const initialState: AppState = {
|
export const initialState: AppState = {
|
||||||
serverUrl: getDefaultServer(),
|
serverUrl: getDefaultServer(),
|
||||||
displayType: getQueryStringValue("tab", "chart") as DisplayType,
|
displayType: getQueryStringValue("tab", "chart") as DisplayType,
|
||||||
query: query, // demo_memory_usage_bytes
|
query: query, // demo_memory_usage_bytes
|
||||||
queryHistory: { index: 0, values: [query] },
|
queryHistory: query.map(q => ({index: 0, values: [q]})),
|
||||||
time: {
|
time: {
|
||||||
duration,
|
duration,
|
||||||
period: getTimeperiodForDuration(duration, new Date(endInput))
|
period: getTimeperiodForDuration(duration, new Date(endInput))
|
||||||
|
@ -81,21 +81,16 @@ export function reducer(state: AppState, action: Action): AppState {
|
||||||
...state,
|
...state,
|
||||||
query: action.payload
|
query: action.payload
|
||||||
};
|
};
|
||||||
case "SET_QUERY_HISTORY_INDEX":
|
case "SET_QUERY_HISTORY":
|
||||||
return {
|
return {
|
||||||
...state,
|
...state,
|
||||||
queryHistory: {
|
queryHistory: action.payload
|
||||||
...state.queryHistory,
|
|
||||||
index: action.payload
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
case "SET_QUERY_HISTORY_VALUES":
|
case "SET_QUERY_HISTORY_BY_INDEX":
|
||||||
|
state.queryHistory.splice(action.payload.queryNumber, 1, action.payload.value);
|
||||||
return {
|
return {
|
||||||
...state,
|
...state,
|
||||||
queryHistory: {
|
queryHistory: state.queryHistory
|
||||||
...state.queryHistory,
|
|
||||||
values: action.payload
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
case "SET_DURATION":
|
case "SET_DURATION":
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -1,21 +1,34 @@
|
||||||
|
export interface AxisRange {
|
||||||
|
[key: string]: [number, number]
|
||||||
|
}
|
||||||
|
|
||||||
export interface YaxisState {
|
export interface YaxisState {
|
||||||
limits: {
|
limits: {
|
||||||
enable: boolean,
|
enable: boolean,
|
||||||
range: [number, number]
|
range: AxisRange
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface CustomStep {
|
||||||
|
enable: boolean,
|
||||||
|
value: number
|
||||||
|
}
|
||||||
|
|
||||||
export interface GraphState {
|
export interface GraphState {
|
||||||
|
customStep: CustomStep
|
||||||
yaxis: YaxisState
|
yaxis: YaxisState
|
||||||
}
|
}
|
||||||
|
|
||||||
export type GraphAction =
|
export type GraphAction =
|
||||||
| { type: "TOGGLE_ENABLE_YAXIS_LIMITS" }
|
| { type: "TOGGLE_ENABLE_YAXIS_LIMITS" }
|
||||||
| { type: "SET_YAXIS_LIMITS", payload: [number, number] }
|
| { type: "SET_YAXIS_LIMITS", payload: { [key: string]: [number, number] } }
|
||||||
|
| { type: "TOGGLE_CUSTOM_STEP" }
|
||||||
|
| { type: "SET_CUSTOM_STEP", payload: number}
|
||||||
|
|
||||||
export const initialGraphState: GraphState = {
|
export const initialGraphState: GraphState = {
|
||||||
|
customStep: {enable: false, value: 1},
|
||||||
yaxis: {
|
yaxis: {
|
||||||
limits: {enable: false, range: [0, 0]}
|
limits: {enable: false, range: {"1": [0, 0]}}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -32,6 +45,22 @@ export function reducer(state: GraphState, action: GraphAction): GraphState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
case "TOGGLE_CUSTOM_STEP":
|
||||||
|
return {
|
||||||
|
...state,
|
||||||
|
customStep: {
|
||||||
|
...state.customStep,
|
||||||
|
enable: !state.customStep.enable
|
||||||
|
}
|
||||||
|
};
|
||||||
|
case "SET_CUSTOM_STEP":
|
||||||
|
return {
|
||||||
|
...state,
|
||||||
|
customStep: {
|
||||||
|
...state.customStep,
|
||||||
|
value: action.payload
|
||||||
|
}
|
||||||
|
};
|
||||||
case "SET_YAXIS_LIMITS":
|
case "SET_YAXIS_LIMITS":
|
||||||
return {
|
return {
|
||||||
...state,
|
...state,
|
||||||
|
|
25
app/vmui/packages/vmui/src/theme/switch.ts
Normal file
25
app/vmui/packages/vmui/src/theme/switch.ts
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
import {styled} from "@mui/material/styles";
|
||||||
|
import Switch from "@mui/material/Switch";
|
||||||
|
|
||||||
|
const BasicSwitch = styled(Switch)(() => ({
|
||||||
|
padding: 10,
|
||||||
|
"& .MuiSwitch-track": {
|
||||||
|
borderRadius: 14,
|
||||||
|
"&:before, &:after": {
|
||||||
|
content: "\"\"",
|
||||||
|
position: "absolute",
|
||||||
|
top: "50%",
|
||||||
|
transform: "translateY(-50%)",
|
||||||
|
width: 14,
|
||||||
|
height: 14,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"& .MuiSwitch-thumb": {
|
||||||
|
boxShadow: "none",
|
||||||
|
width: 12,
|
||||||
|
height: 12,
|
||||||
|
margin: 4,
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
export default BasicSwitch;
|
87
app/vmui/packages/vmui/src/theme/theme.ts
Normal file
87
app/vmui/packages/vmui/src/theme/theme.ts
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
import {createTheme} from "@mui/material/styles";
|
||||||
|
|
||||||
|
const THEME = createTheme({
|
||||||
|
palette: {
|
||||||
|
primary: {
|
||||||
|
main: "#3F51B5"
|
||||||
|
},
|
||||||
|
secondary: {
|
||||||
|
main: "#F50057"
|
||||||
|
},
|
||||||
|
error: {
|
||||||
|
main: "#FF4141"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
components: {
|
||||||
|
MuiFormHelperText: {
|
||||||
|
styleOverrides: {
|
||||||
|
root: {
|
||||||
|
position: "absolute",
|
||||||
|
top: "36px",
|
||||||
|
left: "2px",
|
||||||
|
margin: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
MuiInputLabel: {
|
||||||
|
styleOverrides: {
|
||||||
|
root: {
|
||||||
|
fontSize: "12px",
|
||||||
|
letterSpacing: "normal",
|
||||||
|
lineHeight: "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
MuiInputBase: {
|
||||||
|
styleOverrides: {
|
||||||
|
"root": {
|
||||||
|
"&.Mui-focused fieldset": {
|
||||||
|
"borderWidth": "1px !important"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
MuiSwitch: {
|
||||||
|
defaultProps: {
|
||||||
|
color: "secondary"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
MuiAccordion: {
|
||||||
|
styleOverrides: {
|
||||||
|
root: {
|
||||||
|
boxShadow: "rgba(0, 0, 0, 0.16) 0px 1px 4px;"
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
MuiPaper: {
|
||||||
|
styleOverrides: {
|
||||||
|
elevation3: {
|
||||||
|
boxShadow: "rgba(0, 0, 0, 0.2) 0px 3px 8px;"
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
MuiIconButton: {
|
||||||
|
defaultProps: {
|
||||||
|
size: "large",
|
||||||
|
},
|
||||||
|
styleOverrides: {
|
||||||
|
sizeLarge: {
|
||||||
|
borderRadius: "20%",
|
||||||
|
height: "40px",
|
||||||
|
width: "41px"
|
||||||
|
},
|
||||||
|
sizeMedium: {
|
||||||
|
borderRadius: "20%",
|
||||||
|
},
|
||||||
|
sizeSmall: {
|
||||||
|
borderRadius: "20%",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
typography: {
|
||||||
|
"fontSize": 10
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
export default THEME;
|
|
@ -24,8 +24,13 @@ export interface DataSeries extends MetricBase{
|
||||||
values: DataValue[]; // sorted by key which is timestamp
|
values: DataValue[]; // sorted by key which is timestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
export interface InstantDataSeries {
|
export interface InstantDataSeries {
|
||||||
metadata: string[]; // just ordered columns
|
metadata: string[]; // just ordered columns
|
||||||
value: string;
|
value: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export enum ErrorTypes {
|
||||||
|
emptyServer = "Please enter Server URL",
|
||||||
|
validServer = "Please provide a valid Server URL",
|
||||||
|
validQuery = "Please enter a valid Query and execute it"
|
||||||
|
}
|
|
@ -2,10 +2,9 @@ import qs from "qs";
|
||||||
import get from "lodash.get";
|
import get from "lodash.get";
|
||||||
|
|
||||||
const stateToUrlParams = {
|
const stateToUrlParams = {
|
||||||
"query": "g0.expr",
|
"time.duration": "range_input",
|
||||||
"time.duration": "g0.range_input",
|
"time.period.date": "end_input",
|
||||||
"time.period.date": "g0.end_input",
|
"time.period.step": "step_input",
|
||||||
"time.period.step": "g0.step_input",
|
|
||||||
"displayType": "tab"
|
"displayType": "tab"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -39,15 +38,19 @@ export const setQueryStringWithoutPageReload = (qsValue: string): void => {
|
||||||
|
|
||||||
export const setQueryStringValue = (newValue: Record<string, unknown>): void => {
|
export const setQueryStringValue = (newValue: Record<string, unknown>): void => {
|
||||||
const queryMap = new Map(Object.entries(stateToUrlParams));
|
const queryMap = new Map(Object.entries(stateToUrlParams));
|
||||||
|
const query = get(newValue, "query", "") as string[];
|
||||||
const newQsValue: string[] = [];
|
const newQsValue: string[] = [];
|
||||||
|
query.forEach((q, i) => {
|
||||||
queryMap.forEach((queryKey, stateKey) => {
|
queryMap.forEach((queryKey, stateKey) => {
|
||||||
// const queryKeyEncoded = encodeURIComponent(queryKey);
|
|
||||||
const value = get(newValue, stateKey, "") as string;
|
const value = get(newValue, stateKey, "") as string;
|
||||||
if (value) {
|
if (value) {
|
||||||
const valueEncoded = encodeURIComponent(value);
|
const valueEncoded = encodeURIComponent(value);
|
||||||
newQsValue.push(`${queryKey}=${valueEncoded}`);
|
newQsValue.push(`g${i}.${queryKey}=${valueEncoded}`);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
newQsValue.push(`g${i}.expr=${q}`);
|
||||||
|
});
|
||||||
|
|
||||||
setQueryStringWithoutPageReload(newQsValue.join("&"));
|
setQueryStringWithoutPageReload(newQsValue.join("&"));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -59,3 +62,10 @@ export const getQueryStringValue = (
|
||||||
const values = qs.parse(queryString, { ignoreQueryPrefix: true });
|
const values = qs.parse(queryString, { ignoreQueryPrefix: true });
|
||||||
return get(values, key, defaultValue || "");
|
return get(values, key, defaultValue || "");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export const getQueryArray = (): string[] => {
|
||||||
|
const queryLength = window.location.search.match(/g\d+.expr/gmi)?.length || 1;
|
||||||
|
return new Array(queryLength).fill(1).map((q, i) => {
|
||||||
|
return getQueryStringValue(`g${i}.expr`, "") as string;
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
|
@ -1,147 +0,0 @@
|
||||||
import uPlot, {Series as uPlotSeries, Series} from "uplot";
|
|
||||||
import {getColorFromString} from "./color";
|
|
||||||
import dayjs from "dayjs";
|
|
||||||
import {MetricResult} from "../api/types";
|
|
||||||
import {LegendItem} from "../components/Legend/Legend";
|
|
||||||
import {getNameForMetric} from "./metric";
|
|
||||||
import {getMaxFromArray, getMinFromArray} from "./math";
|
|
||||||
import {roundTimeSeconds} from "./time";
|
|
||||||
import numeral from "numeral";
|
|
||||||
|
|
||||||
interface SetupTooltip {
|
|
||||||
u: uPlot,
|
|
||||||
metrics: MetricResult[],
|
|
||||||
series: Series[],
|
|
||||||
tooltip: HTMLDivElement,
|
|
||||||
tooltipOffset: {left: number, top: number},
|
|
||||||
tooltipIdx: {seriesIdx: number, dataIdx: number}
|
|
||||||
}
|
|
||||||
|
|
||||||
interface HideSeriesArgs {
|
|
||||||
hideSeries: string[],
|
|
||||||
label: string,
|
|
||||||
metaKey: boolean,
|
|
||||||
series: Series[]
|
|
||||||
}
|
|
||||||
|
|
||||||
interface DragArgs {
|
|
||||||
e: MouseEvent,
|
|
||||||
u: uPlot,
|
|
||||||
factor: number,
|
|
||||||
setPanning: (enable: boolean) => void,
|
|
||||||
setPlotScale: ({u, min, max}: {u: uPlot, min: number, max: number}) => void
|
|
||||||
}
|
|
||||||
|
|
||||||
const stub = (): null => null;
|
|
||||||
|
|
||||||
export const defaultOptions = {
|
|
||||||
height: 500,
|
|
||||||
legend: { show: false },
|
|
||||||
axes: [
|
|
||||||
{ space: 80 },
|
|
||||||
{
|
|
||||||
show: true,
|
|
||||||
font: "10px Arial",
|
|
||||||
values: (self: uPlot, ticks: number[]): (string | number)[] => ticks.map(n => n > 1000 ? numeral(n).format("0.0a") : n)
|
|
||||||
}
|
|
||||||
],
|
|
||||||
cursor: {
|
|
||||||
drag: { x: false, y: false },
|
|
||||||
focus: { prox: 30 },
|
|
||||||
bind: { mouseup: stub, mousedown: stub, click: stub, dblclick: stub, mouseenter: stub }
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const setTooltip = ({ u, tooltipIdx, metrics, series, tooltip, tooltipOffset }: SetupTooltip) : void => {
|
|
||||||
const {seriesIdx, dataIdx} = tooltipIdx;
|
|
||||||
const dataSeries = u.data[seriesIdx][dataIdx];
|
|
||||||
const dataTime = u.data[0][dataIdx];
|
|
||||||
const metric = metrics[seriesIdx - 1]?.metric || {};
|
|
||||||
const color = getColorFromString(series[seriesIdx].label || "");
|
|
||||||
|
|
||||||
const {width, height} = u.over.getBoundingClientRect();
|
|
||||||
const top = u.valToPos((dataSeries || 0), "y");
|
|
||||||
const lft = u.valToPos(dataTime, "x");
|
|
||||||
const {width: tooltipWidth, height: tooltipHeight} = tooltip.getBoundingClientRect();
|
|
||||||
const overflowX = lft + tooltipWidth >= width;
|
|
||||||
const overflowY = top + tooltipHeight >= height;
|
|
||||||
|
|
||||||
tooltip.style.display = "grid";
|
|
||||||
tooltip.style.top = `${tooltipOffset.top + top + 10 - (overflowY ? tooltipHeight + 10 : 0)}px`;
|
|
||||||
tooltip.style.left = `${tooltipOffset.left + lft + 10 - (overflowX ? tooltipWidth + 20 : 0)}px`;
|
|
||||||
const date = dayjs(new Date(dataTime * 1000)).format("YYYY-MM-DD HH:mm:ss:SSS (Z)");
|
|
||||||
const info = Object.keys(metric).filter(k => k !== "__name__").map(k => `<div><b>${k}</b>: ${metric[k]}</div>`).join("");
|
|
||||||
const marker = `<div class="u-tooltip__marker" style="background: ${color}"></div>`;
|
|
||||||
tooltip.innerHTML = `<div>${date}</div>
|
|
||||||
<div class="u-tooltip-data">
|
|
||||||
${marker}${metric.__name__ || ""}: <b class="u-tooltip-data__value">${dataSeries}</b>
|
|
||||||
</div>
|
|
||||||
<div class="u-tooltip__info">${info}</div>`;
|
|
||||||
};
|
|
||||||
|
|
||||||
export const getHideSeries = ({hideSeries, label, metaKey, series}: HideSeriesArgs): string[] => {
|
|
||||||
const include = hideSeries.includes(label);
|
|
||||||
const labels = series.map(s => s.label || "").filter(l => l);
|
|
||||||
if (metaKey && include) {
|
|
||||||
return [...labels.filter(l => l !== label)];
|
|
||||||
} else if (metaKey && !include) {
|
|
||||||
return hideSeries.length === series.length - 2 ? [] : [...labels.filter(l => l !== label)];
|
|
||||||
}
|
|
||||||
return include ? hideSeries.filter(l => l !== label) : [...hideSeries, label];
|
|
||||||
};
|
|
||||||
|
|
||||||
export const getTimeSeries = (times: number[]): number[] => {
|
|
||||||
const allTimes = Array.from(new Set(times)).sort((a,b) => a-b);
|
|
||||||
const step = getMinFromArray(allTimes.map((t, i) => allTimes[i + 1] - t));
|
|
||||||
const length = allTimes.length;
|
|
||||||
const startTime = allTimes[0] || 0;
|
|
||||||
return new Array(length).fill(startTime).map((d, i) => roundTimeSeconds(d + (step * i)));
|
|
||||||
};
|
|
||||||
|
|
||||||
export const getLimitsYAxis = (values: number[]): [number, number] => {
|
|
||||||
const min = getMinFromArray(values);
|
|
||||||
const max = getMaxFromArray(values);
|
|
||||||
return [min - (min * 0.05), max + (max * 0.05)];
|
|
||||||
};
|
|
||||||
|
|
||||||
export const getSeriesItem = (d: MetricResult, hideSeries: string[]): Series => {
|
|
||||||
const label = getNameForMetric(d);
|
|
||||||
return {
|
|
||||||
label,
|
|
||||||
width: 1.5,
|
|
||||||
stroke: getColorFromString(label),
|
|
||||||
show: !hideSeries.includes(label),
|
|
||||||
scale: "y"
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
export const getLegendItem = (s: uPlotSeries): LegendItem => ({
|
|
||||||
label: s.label || "",
|
|
||||||
color: s.stroke as string,
|
|
||||||
checked: s.show || false
|
|
||||||
});
|
|
||||||
|
|
||||||
export const dragChart = ({e, factor = 0.85, u, setPanning, setPlotScale}: DragArgs): void => {
|
|
||||||
if (e.button !== 0) return;
|
|
||||||
e.preventDefault();
|
|
||||||
setPanning(true);
|
|
||||||
const leftStart = e.clientX;
|
|
||||||
const xUnitsPerPx = u.posToVal(1, "x") - u.posToVal(0, "x");
|
|
||||||
const scXMin = u.scales.x.min || 0;
|
|
||||||
const scXMax = u.scales.x.max || 0;
|
|
||||||
|
|
||||||
const mouseMove = (e: MouseEvent) => {
|
|
||||||
e.preventDefault();
|
|
||||||
const dx = xUnitsPerPx * ((e.clientX - leftStart) * factor);
|
|
||||||
setPlotScale({u, min: scXMin - dx, max: scXMax - dx});
|
|
||||||
};
|
|
||||||
|
|
||||||
const mouseUp = () => {
|
|
||||||
setPanning(false);
|
|
||||||
document.removeEventListener("mousemove", mouseMove);
|
|
||||||
document.removeEventListener("mouseup", mouseUp);
|
|
||||||
};
|
|
||||||
|
|
||||||
document.addEventListener("mousemove", mouseMove);
|
|
||||||
document.addEventListener("mouseup", mouseUp);
|
|
||||||
};
|
|
30
app/vmui/packages/vmui/src/utils/uplot/axes.ts
Normal file
30
app/vmui/packages/vmui/src/utils/uplot/axes.ts
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
import {Axis, Series} from "uplot";
|
||||||
|
import {getMaxFromArray, getMinFromArray} from "../math";
|
||||||
|
import {roundTimeSeconds} from "../time";
|
||||||
|
import {AxisRange} from "../../state/graph/reducer";
|
||||||
|
import {formatTicks} from "./helpers";
|
||||||
|
|
||||||
|
export const getAxes = (series: Series[]): Axis[] => Array.from(new Set(series.map(s => s.scale))).map(a => {
|
||||||
|
const axis = {scale: a, show: true, font: "10px Arial", values: formatTicks};
|
||||||
|
if (!a) return {space: 80};
|
||||||
|
if (!(Number(a) % 2)) return {...axis, side: 1};
|
||||||
|
return axis;
|
||||||
|
});
|
||||||
|
|
||||||
|
export const getTimeSeries = (times: number[]): number[] => {
|
||||||
|
const allTimes = Array.from(new Set(times)).sort((a, b) => a - b);
|
||||||
|
const step = getMinFromArray(allTimes.map((t, i) => allTimes[i + 1] - t));
|
||||||
|
const startTime = allTimes[0] || 0;
|
||||||
|
return new Array(allTimes.length).fill(startTime).map((d, i) => roundTimeSeconds(d + (step * i)));
|
||||||
|
};
|
||||||
|
|
||||||
|
export const getLimitsYAxis = (values: { [key: string]: number[] }): AxisRange => {
|
||||||
|
const result: AxisRange = {};
|
||||||
|
for (const key in values) {
|
||||||
|
const numbers = values[key];
|
||||||
|
const min = getMinFromArray(numbers);
|
||||||
|
const max = getMaxFromArray(numbers);
|
||||||
|
result[key] = [min - (min * 0.05), max + (max * 0.05)];
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
};
|
25
app/vmui/packages/vmui/src/utils/uplot/events.ts
Normal file
25
app/vmui/packages/vmui/src/utils/uplot/events.ts
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
import {DragArgs} from "./types";
|
||||||
|
|
||||||
|
export const dragChart = ({e, factor = 0.85, u, setPanning, setPlotScale}: DragArgs): void => {
|
||||||
|
if (e.button !== 0) return;
|
||||||
|
e.preventDefault();
|
||||||
|
setPanning(true);
|
||||||
|
const leftStart = e.clientX;
|
||||||
|
const xUnitsPerPx = u.posToVal(1, "x") - u.posToVal(0, "x");
|
||||||
|
const scXMin = u.scales.x.min || 0;
|
||||||
|
const scXMax = u.scales.x.max || 0;
|
||||||
|
|
||||||
|
const mouseMove = (e: MouseEvent) => {
|
||||||
|
e.preventDefault();
|
||||||
|
const dx = xUnitsPerPx * ((e.clientX - leftStart) * factor);
|
||||||
|
setPlotScale({u, min: scXMin - dx, max: scXMax - dx});
|
||||||
|
};
|
||||||
|
const mouseUp = () => {
|
||||||
|
setPanning(false);
|
||||||
|
document.removeEventListener("mousemove", mouseMove);
|
||||||
|
document.removeEventListener("mouseup", mouseUp);
|
||||||
|
};
|
||||||
|
|
||||||
|
document.addEventListener("mousemove", mouseMove);
|
||||||
|
document.addEventListener("mouseup", mouseUp);
|
||||||
|
};
|
34
app/vmui/packages/vmui/src/utils/uplot/helpers.ts
Normal file
34
app/vmui/packages/vmui/src/utils/uplot/helpers.ts
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
import uPlot from "uplot";
|
||||||
|
import numeral from "numeral";
|
||||||
|
import {getColorFromString} from "../color";
|
||||||
|
|
||||||
|
export const defaultOptions = {
|
||||||
|
height: 500,
|
||||||
|
legend: {
|
||||||
|
show: false
|
||||||
|
},
|
||||||
|
cursor: {
|
||||||
|
drag: {
|
||||||
|
x: false,
|
||||||
|
y: false
|
||||||
|
},
|
||||||
|
focus: {
|
||||||
|
prox: 30
|
||||||
|
},
|
||||||
|
bind: {
|
||||||
|
mouseup: (): null => null,
|
||||||
|
mousedown: (): null => null,
|
||||||
|
click: (): null => null,
|
||||||
|
dblclick: (): null => null,
|
||||||
|
mouseenter: (): null => null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export const formatTicks = (u: uPlot, ticks: number[]): (string | number)[] => {
|
||||||
|
return ticks.map(n => n > 1000 ? numeral(n).format("0.0a") : n);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const getColorLine = (scale: number, label: string): string => getColorFromString(`${scale}${label}`);
|
||||||
|
|
||||||
|
export const getDashLine = (group: number): number[] => group <= 1 ? [] : [group*4, group*1.2];
|
41
app/vmui/packages/vmui/src/utils/uplot/series.ts
Normal file
41
app/vmui/packages/vmui/src/utils/uplot/series.ts
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
import {MetricResult} from "../../api/types";
|
||||||
|
import {Series} from "uplot";
|
||||||
|
import {getNameForMetric} from "../metric";
|
||||||
|
import {LegendItem} from "./types";
|
||||||
|
import {getColorLine, getDashLine} from "./helpers";
|
||||||
|
import {HideSeriesArgs} from "./types";
|
||||||
|
|
||||||
|
export const getSeriesItem = (d: MetricResult, hideSeries: string[]): Series => {
|
||||||
|
const label = getNameForMetric(d);
|
||||||
|
return {
|
||||||
|
label,
|
||||||
|
dash: getDashLine(d.group),
|
||||||
|
width: 1.5,
|
||||||
|
stroke: getColorLine(d.group, label),
|
||||||
|
show: !includesHideSeries(label, d.group, hideSeries),
|
||||||
|
scale: String(d.group)
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export const getLegendItem = (s: Series, group: number): LegendItem => ({
|
||||||
|
group,
|
||||||
|
label: s.label || "",
|
||||||
|
color: s.stroke as string,
|
||||||
|
checked: s.show || false
|
||||||
|
});
|
||||||
|
|
||||||
|
export const getHideSeries = ({hideSeries, legend, metaKey, series}: HideSeriesArgs): string[] => {
|
||||||
|
const label = `${legend.group}.${legend.label}`;
|
||||||
|
const include = includesHideSeries(legend.label, legend.group, hideSeries);
|
||||||
|
const labels = series.map(s => `${s.scale}.${s.label}`);
|
||||||
|
if (metaKey && include) {
|
||||||
|
return [...labels.filter(l => l !== label)];
|
||||||
|
} else if (metaKey && !include) {
|
||||||
|
return hideSeries.length >= series.length - 1 ? [] : [...labels.filter(l => l !== label)];
|
||||||
|
}
|
||||||
|
return include ? hideSeries.filter(l => l !== label) : [...hideSeries, label];
|
||||||
|
};
|
||||||
|
|
||||||
|
export const includesHideSeries = (label: string, group: string | number, hideSeries: string[]): boolean => {
|
||||||
|
return hideSeries.includes(`${group}.${label}`);
|
||||||
|
};
|
30
app/vmui/packages/vmui/src/utils/uplot/tooltip.ts
Normal file
30
app/vmui/packages/vmui/src/utils/uplot/tooltip.ts
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
import dayjs from "dayjs";
|
||||||
|
import {SetupTooltip} from "./types";
|
||||||
|
import {getColorLine} from "./helpers";
|
||||||
|
|
||||||
|
export const setTooltip = ({u, tooltipIdx, metrics, series, tooltip, tooltipOffset}: SetupTooltip): void => {
|
||||||
|
const {seriesIdx, dataIdx} = tooltipIdx;
|
||||||
|
const dataSeries = u.data[seriesIdx][dataIdx];
|
||||||
|
const dataTime = u.data[0][dataIdx];
|
||||||
|
const metric = metrics[seriesIdx - 1]?.metric || {};
|
||||||
|
const color = getColorLine(Number(series[seriesIdx].scale || 0), series[seriesIdx].label || "");
|
||||||
|
|
||||||
|
const {width, height} = u.over.getBoundingClientRect();
|
||||||
|
const top = u.valToPos((dataSeries || 0), series[seriesIdx]?.scale || "1");
|
||||||
|
const lft = u.valToPos(dataTime, "x");
|
||||||
|
const {width: tooltipWidth, height: tooltipHeight} = tooltip.getBoundingClientRect();
|
||||||
|
const overflowX = lft + tooltipWidth >= width;
|
||||||
|
const overflowY = top + tooltipHeight >= height;
|
||||||
|
|
||||||
|
tooltip.style.display = "grid";
|
||||||
|
tooltip.style.top = `${tooltipOffset.top + top + 10 - (overflowY ? tooltipHeight + 10 : 0)}px`;
|
||||||
|
tooltip.style.left = `${tooltipOffset.left + lft + 10 - (overflowX ? tooltipWidth + 20 : 0)}px`;
|
||||||
|
const date = dayjs(new Date(dataTime * 1000)).format("YYYY-MM-DD HH:mm:ss:SSS (Z)");
|
||||||
|
const info = Object.keys(metric).filter(k => k !== "__name__").map(k => `<div><b>${k}</b>: ${metric[k]}</div>`).join("");
|
||||||
|
const marker = `<div class="u-tooltip__marker" style="background: ${color}"></div>`;
|
||||||
|
tooltip.innerHTML = `<div>${date}</div>
|
||||||
|
<div class="u-tooltip-data">
|
||||||
|
${marker}${metric.__name__ || ""}: <b class="u-tooltip-data__value">${dataSeries}</b>
|
||||||
|
</div>
|
||||||
|
<div class="u-tooltip__info">${info}</div>`;
|
||||||
|
};
|
39
app/vmui/packages/vmui/src/utils/uplot/types.ts
Normal file
39
app/vmui/packages/vmui/src/utils/uplot/types.ts
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
import uPlot, {Series} from "uplot";
|
||||||
|
import {MetricResult} from "../../api/types";
|
||||||
|
|
||||||
|
export interface SetupTooltip {
|
||||||
|
u: uPlot,
|
||||||
|
metrics: MetricResult[],
|
||||||
|
series: Series[],
|
||||||
|
tooltip: HTMLDivElement,
|
||||||
|
tooltipOffset: {
|
||||||
|
left: number,
|
||||||
|
top: number
|
||||||
|
},
|
||||||
|
tooltipIdx: {
|
||||||
|
seriesIdx: number,
|
||||||
|
dataIdx: number
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface HideSeriesArgs {
|
||||||
|
hideSeries: string[],
|
||||||
|
legend: LegendItem,
|
||||||
|
metaKey: boolean,
|
||||||
|
series: Series[]
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface DragArgs {
|
||||||
|
e: MouseEvent,
|
||||||
|
u: uPlot,
|
||||||
|
factor: number,
|
||||||
|
setPanning: (enable: boolean) => void,
|
||||||
|
setPlotScale: ({u, min, max}: { u: uPlot, min: number, max: number }) => void
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface LegendItem {
|
||||||
|
group: number;
|
||||||
|
label: string;
|
||||||
|
color: string;
|
||||||
|
checked: boolean;
|
||||||
|
}
|
|
@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
|
||||||
|
|
||||||
ROOT_IMAGE ?= alpine:3.15.0
|
ROOT_IMAGE ?= alpine:3.15.0
|
||||||
CERTS_IMAGE := alpine:3.15.0
|
CERTS_IMAGE := alpine:3.15.0
|
||||||
GO_BUILDER_IMAGE := golang:1.17.3-alpine
|
GO_BUILDER_IMAGE := golang:1.17.5-alpine
|
||||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)
|
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)
|
||||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ services:
|
||||||
restart: always
|
restart: always
|
||||||
grafana:
|
grafana:
|
||||||
container_name: grafana
|
container_name: grafana
|
||||||
image: grafana/grafana:8.2.2
|
image: grafana/grafana:8.3.2
|
||||||
depends_on:
|
depends_on:
|
||||||
- "victoriametrics"
|
- "victoriametrics"
|
||||||
ports:
|
ports:
|
||||||
|
|
|
@ -6,6 +6,21 @@ sort: 15
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to analyze the correlation between two queries on a single graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1916).
|
||||||
|
* FEATURE: accept optional `extra_filters[]=series_selector` query args at Prometheus query APIs additionally to `extra_label` query args. This allows enforcing additional filters for all the Prometheus query APIs by using [vmgateway](https://docs.victoriametrics.com/vmgateway.html) or [vmauth](https://docs.victoriametrics.com/vmauth.html). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1863).
|
||||||
|
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): allow specifying `http` and `https` urls in `-auth.config` command-line flag. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1898). Thanks for @TFM93 .
|
||||||
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying `http` and `https` urls in the following command-line flags: `-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`.
|
||||||
|
* FEATURE: vminsert: allow specifying `http` and `https` urls in `-relabelConfig` command-line flag.
|
||||||
|
* FEATURE: vminsert: add `-maxLabelValueLen` command-line flag for the ability to configure the maximum length of label value. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1908).
|
||||||
|
* FEATURE: preserve the order of time series passed to [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset) function. This allows implementing series paging via `limit_offset(limit, offset, sort_by_label(...))`. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1920) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/951) issues.
|
||||||
|
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to override the interval between returned datapoints. By default it is automatically calculated depending on the selected time range and horizontal resolution of the graph. Now it is possible to override it with custom values. This may be useful during data exploration and debugging.
|
||||||
|
* FEATURE: automaticall convert `(value1|...|valueN)` into `{value1,...,valueN}` inside `__graphite__` pseudo-label. This allows using [Grafana multi-value template variables](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, `{__graphite__=~"foo.($bar)"}` is expanded to `{__graphite__=~"foo.{x,y}"}` if both `x` and `y` are selected for `$bar` template variable. See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics) for details.
|
||||||
|
|
||||||
|
* BUGFIX: fix `unaligned 64-bit atomic operation` panic on 32-bit architectures, which has been introduced in v1.70.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1944).
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): restore the ability to use `$labels.alertname` in labels templating. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1921).
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): add missing `query` caption to the input field for the query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1900).
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix navigation over query history with `Ctrl+up/down` and fix zoom relatively to the cursor position. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1936).
|
||||||
|
|
||||||
|
|
||||||
## [v1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0)
|
## [v1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0)
|
||||||
|
|
||||||
|
@ -14,7 +29,7 @@ sort: 15
|
||||||
* FEATURE: vmauth: allow using optional `name` field in configs. This field is then used as `username` label value for `vmauth_user_requests_total` metric. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1805).
|
* FEATURE: vmauth: allow using optional `name` field in configs. This field is then used as `username` label value for `vmauth_user_requests_total` metric. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1805).
|
||||||
* FEATURE: vmagent: export `vm_persistentqueue_read_duration_seconds_total` and `vm_persistentqueue_write_duration_seconds_total` metrics, which can be used for detecting persistent queue saturation with `rate(vm_persistentqueue_write_duration_seconds_total) > 0.9` alerting rule.
|
* FEATURE: vmagent: export `vm_persistentqueue_read_duration_seconds_total` and `vm_persistentqueue_write_duration_seconds_total` metrics, which can be used for detecting persistent queue saturation with `rate(vm_persistentqueue_write_duration_seconds_total) > 0.9` alerting rule.
|
||||||
* FEATURE: export `vm_filestream_read_duration_seconds_total` and `vm_filestream_write_duration_seconds_total` metrics, which can be used for detecting persistent disk saturation with `rate(vm_filestream_read_duration_seconds_total) > 0.9` alerting rule.
|
* FEATURE: export `vm_filestream_read_duration_seconds_total` and `vm_filestream_write_duration_seconds_total` metrics, which can be used for detecting persistent disk saturation with `rate(vm_filestream_read_duration_seconds_total) > 0.9` alerting rule.
|
||||||
* FEATURE: export `vm_cache_size_max_bytes` metrics, which show capacity for various caches. These metrics can be used for determining caches reaches its capacity with `vm_cache_size_bytes / vm_cache_size_max_bytes > 0.9` query.
|
* FEATURE: export `vm_cache_size_max_bytes` metrics, which show capacity for various caches. These metrics can be used for determining caches with reach its capacity with `vm_cache_size_bytes / vm_cache_size_max_bytes > 0.9` query.
|
||||||
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html), [vmrestore](https://docs.victoriametrics.com/vmrestore.html): add `-s3ForcePathStyle` command-line flag, which can be used for making backups to [Aliyun OSS](https://www.aliyun.com/product/oss). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1802).
|
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html), [vmrestore](https://docs.victoriametrics.com/vmrestore.html): add `-s3ForcePathStyle` command-line flag, which can be used for making backups to [Aliyun OSS](https://www.aliyun.com/product/oss). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1802).
|
||||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): improve data migration from OpenTSDB. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1809). Thanks to @johnseekins .
|
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): improve data migration from OpenTSDB. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1809). Thanks to @johnseekins .
|
||||||
* FEATURE: suppress `connection reset by peer` errors when remote client resets TCP connection to VictoriaMetrics / vmagent while ingesting the data via InfluxDB line protocol, Graphite protocol or OpenTSDB protocol. This error is expected, so there is no need in logging it.
|
* FEATURE: suppress `connection reset by peer` errors when remote client resets TCP connection to VictoriaMetrics / vmagent while ingesting the data via InfluxDB line protocol, Graphite protocol or OpenTSDB protocol. This error is expected, so there is no need in logging it.
|
||||||
|
@ -22,7 +37,7 @@ sort: 15
|
||||||
* FEATURE: vmalert: make `-notifier.url` command-line flag optional. This flag can be omitted if `vmalert` is used solely for recording rules and doesn't evaluate alerting rules. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1870).
|
* FEATURE: vmalert: make `-notifier.url` command-line flag optional. This flag can be omitted if `vmalert` is used solely for recording rules and doesn't evaluate alerting rules. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1870).
|
||||||
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html), [vmrestore](https://docs.victoriametrics.com/vmrestore.html): export internal metrics at `http://vmbackup:8420/metrics` and `http://vmrestore:8421/metrics` for better visibility of the backup/restore process.
|
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html), [vmrestore](https://docs.victoriametrics.com/vmrestore.html): export internal metrics at `http://vmbackup:8420/metrics` and `http://vmrestore:8421/metrics` for better visibility of the backup/restore process.
|
||||||
* FEATURE: allow trailing whitespace after the timestamp when [parsing Graphite plaintext lines](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1865).
|
* FEATURE: allow trailing whitespace after the timestamp when [parsing Graphite plaintext lines](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1865).
|
||||||
* FEATURE: expose `/-/healthy` and `/-/ready` endpoints as Prometheus does. This is needed for improving integration with third-party solutions, which rely on these endpoints. See [tis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1833).
|
* FEATURE: expose `/-/healthy` and `/-/ready` endpoints as Prometheus does. This is needed for improving integration with third-party solutions, which rely on these endpoints. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1833).
|
||||||
|
|
||||||
* BUGFIX: vmagent: prevent from scraping duplicate targets if `-promscrape.dropOriginalLabels` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1830). Thanks to @guidao for the fix.
|
* BUGFIX: vmagent: prevent from scraping duplicate targets if `-promscrape.dropOriginalLabels` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1830). Thanks to @guidao for the fix.
|
||||||
* BUGFIX: vmstorage [enterprise](https://victoriametrics.com/enterprise.html): added missing `vm_tenant_used_tenant_bytes` metric, which shows the approximate per-tenant disk usage. See [these docs](https://docs.victoriametrics.com/PerTenantStatistic.html) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1605).
|
* BUGFIX: vmstorage [enterprise](https://victoriametrics.com/enterprise.html): added missing `vm_tenant_used_tenant_bytes` metric, which shows the approximate per-tenant disk usage. See [these docs](https://docs.victoriametrics.com/PerTenantStatistic.html) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1605).
|
||||||
|
|
|
@ -544,8 +544,10 @@ Below is the output for `/path/to/vminsert -help`:
|
||||||
-maxInsertRequestSize size
|
-maxInsertRequestSize size
|
||||||
The maximum size in bytes of a single Prometheus remote_write API request
|
The maximum size in bytes of a single Prometheus remote_write API request
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
||||||
|
-maxLabelValueLen int
|
||||||
|
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
|
||||||
-maxLabelsPerTimeseries int
|
-maxLabelsPerTimeseries int
|
||||||
The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30)
|
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
|
||||||
-memory.allowedBytes size
|
-memory.allowedBytes size
|
||||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
|
@ -563,7 +565,7 @@ Below is the output for `/path/to/vminsert -help`:
|
||||||
-opentsdbhttpTrimTimestamp duration
|
-opentsdbhttpTrimTimestamp duration
|
||||||
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||||
-relabelConfig string
|
-relabelConfig string
|
||||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||||
-relabelDebug
|
-relabelDebug
|
||||||
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
||||||
-replicationFactor int
|
-replicationFactor int
|
||||||
|
|
|
@ -30,7 +30,7 @@ MetricsQL implements [PromQL](https://medium.com/@valyala/promql-tutorial-for-be
|
||||||
|
|
||||||
This functionality can be evaluated at [an editable Grafana dashboard](https://play-grafana.victoriametrics.com/d/4ome8yJmz/node-exporter-on-victoriametrics-demo) or at your own [VictoriaMetrics instance](https://docs.victoriametrics.com/#how-to-start-victoriametrics).
|
This functionality can be evaluated at [an editable Grafana dashboard](https://play-grafana.victoriametrics.com/d/4ome8yJmz/node-exporter-on-victoriametrics-demo) or at your own [VictoriaMetrics instance](https://docs.victoriametrics.com/#how-to-start-victoriametrics).
|
||||||
|
|
||||||
- Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax. This is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but usually works faster and is easier to use when migrating from Graphite. VictoriaMetrics also can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details. See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
- Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax. See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics). VictoriaMetrics also can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details. See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||||
- Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries)). For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`. It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
- Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries)). For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`. It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||||
- [Aggregate functions](#aggregate-functions) accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point across time series returned by `q1`, `q2` and `q3`.
|
- [Aggregate functions](#aggregate-functions) accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point across time series returned by `q1`, `q2` and `q3`.
|
||||||
- [offset](https://prometheus.io/docs/prometheus/latest/querying/basics/#offset-modifier), lookbehind window in square brackets and `step` value for [subquery](#subqueries) may refer to the current step aka `$__interval` value from Grafana with `[Ni]` syntax. For instance, `rate(metric[10i] offset 5i)` would return per-second rate over a range covering 10 previous steps with the offset of 5 steps.
|
- [offset](https://prometheus.io/docs/prometheus/latest/querying/basics/#offset-modifier), lookbehind window in square brackets and `step` value for [subquery](#subqueries) may refer to the current step aka `$__interval` value from Grafana with `[Ni]` syntax. For instance, `rate(metric[10i] offset 5i)` would return per-second rate over a range covering 10 previous steps with the offset of 5 steps.
|
||||||
|
@ -487,6 +487,10 @@ See also [implicit query conversions](#implicit-query-conversions).
|
||||||
|
|
||||||
`keep_next_value(q)` fills gaps with the value of the next non-empty point in every time series returned by `q`. See also [keep_last_value](#keep_last_value) and [interpolate](#interpolate).
|
`keep_next_value(q)` fills gaps with the value of the next non-empty point in every time series returned by `q`. See also [keep_last_value](#keep_last_value) and [interpolate](#interpolate).
|
||||||
|
|
||||||
|
#### limit_offset
|
||||||
|
|
||||||
|
`limit_offset(limit, offset, q)` skips `offset` time series from series returned by `q` and then returns up to `limit` of the remaining time series per each group. This allows implementing simple paging for `q` time series. See also [limitk](#limitk).
|
||||||
|
|
||||||
#### ln
|
#### ln
|
||||||
|
|
||||||
`ln(q)` calculates `ln(v)` for every point `v` of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
`ln(q)` calculates `ln(v)` for every point `v` of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
||||||
|
@ -823,11 +827,6 @@ See also [implicit query conversions](#implicit-query-conversions).
|
||||||
|
|
||||||
`histogram(q)` calculates [VictoriaMetrics histogram](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) per each group of points with the same timestamp. Useful for visualizing big number of time series via a heatmap. See [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) for more details.
|
`histogram(q)` calculates [VictoriaMetrics histogram](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) per each group of points with the same timestamp. Useful for visualizing big number of time series via a heatmap. See [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) for more details.
|
||||||
|
|
||||||
#### limit_offset
|
|
||||||
|
|
||||||
`limit_offset(limit, offset, q)` skips `offset` time series from series returned by `q` and then returns up to `limit` of the remaining time series. This allows implementing simple paging for `q` time series. See also [limitk](#limitk).
|
|
||||||
|
|
||||||
|
|
||||||
#### limitk
|
#### limitk
|
||||||
|
|
||||||
`limitk(k, q) by (group_labels)` returns up to `k` time series per each `group_labels` out of time series returned by `q`. The returned set of time series remain the same across calls. See also [limit_offset](#limit_offset).
|
`limitk(k, q) by (group_labels)` returns up to `k` time series per each `group_labels` out of time series returned by `q`. The returned set of time series remain the same across calls. See also [limit_offset](#limit_offset).
|
||||||
|
|
114
docs/README.md
114
docs/README.md
|
@ -13,46 +13,13 @@
|
||||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||||
|
|
||||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and follow [these instructions](#how-to-start-victoriametrics).
|
||||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||||
|
|
||||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||||
|
|
||||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
|
||||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
|
||||||
|
|
||||||
|
|
||||||
## Case studies and talks
|
|
||||||
|
|
||||||
Case studies:
|
|
||||||
|
|
||||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
|
||||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
|
||||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
|
||||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
|
||||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
|
||||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
|
||||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
|
||||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
|
||||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
|
||||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
|
||||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
|
||||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
|
||||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
|
||||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
|
||||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
|
||||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
|
||||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
|
||||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
|
||||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
|
||||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
|
||||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
|
||||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
|
||||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
|
||||||
|
|
||||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
|
||||||
|
|
||||||
|
|
||||||
## Prominent features
|
## Prominent features
|
||||||
|
@ -95,6 +62,37 @@ VictoriaMetrics has the following prominent features:
|
||||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||||
|
|
||||||
|
|
||||||
|
## Case studies and talks
|
||||||
|
|
||||||
|
Case studies:
|
||||||
|
|
||||||
|
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||||
|
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||||
|
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||||
|
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||||
|
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||||
|
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||||
|
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||||
|
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||||
|
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||||
|
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||||
|
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||||
|
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||||
|
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||||
|
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||||
|
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||||
|
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||||
|
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||||
|
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||||
|
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||||
|
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||||
|
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||||
|
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||||
|
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||||
|
|
||||||
|
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||||
|
|
||||||
|
|
||||||
## Operation
|
## Operation
|
||||||
|
|
||||||
## How to start VictoriaMetrics
|
## How to start VictoriaMetrics
|
||||||
|
@ -418,9 +416,15 @@ The `/api/v1/export` endpoint should return the following response:
|
||||||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||||
|
|
||||||
* [Graphite API](#graphite-api-usage)
|
* [Graphite API](#graphite-api-usage)
|
||||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
* [Prometheus querying API](#prometheus-querying-api-usage). See also [selecting Graphite metrics](#selecting-graphite-metrics).
|
||||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||||
|
|
||||||
|
## Selecting Graphite metrics
|
||||||
|
|
||||||
|
VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See [docs for Graphite paths and wildcards](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||||
|
|
||||||
|
The `__graphite__` pseudo-label supports e.g. alternate regexp filters such as `(value1|...|valueN)`. They are transparently converted to `{value1,...,valueN}` syntax [used in Graphite](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). This allows using [multi-value template variables in Grafana](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, Grafana expands `{__graphite__=~"foo.$bar.baz"}` into `{__graphite__=~"foo.(x|y).baz"}` if `$bar` template variable contains `x` and `y` values. In this case the query is automatically converted into `{__graphite__=~"foo.{x,y}.baz"}` before execution.
|
||||||
|
|
||||||
## How to send data from OpenTSDB-compatible agents
|
## How to send data from OpenTSDB-compatible agents
|
||||||
|
|
||||||
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||||
|
@ -517,9 +521,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
||||||
### Prometheus querying API enhancements
|
### Prometheus querying API enhancements
|
||||||
|
|
||||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
|
||||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||||
|
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
|
|
||||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||||
|
@ -556,12 +561,11 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
||||||
|
|
||||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||||
|
|
||||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
|
||||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||||
|
|
||||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). See [these docs](#selecting-graphite-metrics).
|
||||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
|
||||||
|
|
||||||
|
|
||||||
### Graphite Render API usage
|
### Graphite Render API usage
|
||||||
|
@ -612,6 +616,10 @@ Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressin
|
||||||
|
|
||||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||||
|
|
||||||
|
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||||
|
|
||||||
|
VMUI allows investigating correlations between two queries on the same graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||||
|
|
||||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||||
|
|
||||||
|
|
||||||
|
@ -1025,6 +1033,7 @@ VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-sc
|
||||||
|
|
||||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||||
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
||||||
|
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||||
|
|
||||||
Example contents for `-relabelConfig` file:
|
Example contents for `-relabelConfig` file:
|
||||||
|
@ -1217,7 +1226,8 @@ Consider setting the following command-line flags:
|
||||||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
* `-configAuthKey` for protecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||||
|
- `-pprofAuthKey` for protecting `/debug/pprof/*` endpoints, which can be used for [profiling](#profiling).
|
||||||
|
|
||||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||||
|
@ -1372,9 +1382,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
||||||
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
||||||
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
||||||
|
|
||||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then `{__graphite__="foo.*.baz"}` filter can be used for selecting such metrics. See [these docs](#selecting-graphite-metrics) for details.
|
||||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
|
||||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
|
||||||
|
|
||||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||||
|
|
||||||
|
@ -1493,9 +1501,11 @@ Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics
|
||||||
Feel free asking any questions regarding VictoriaMetrics:
|
Feel free asking any questions regarding VictoriaMetrics:
|
||||||
|
|
||||||
* [slack](https://slack.victoriametrics.com/)
|
* [slack](https://slack.victoriametrics.com/)
|
||||||
|
* [linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||||
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||||
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
||||||
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||||
|
* [articles and talks about VictoriaMetrics in Russian](https://github.com/denisgolius/victoriametrics-ru-links)
|
||||||
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||||
|
|
||||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||||
|
@ -1650,8 +1660,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-maxInsertRequestSize size
|
-maxInsertRequestSize size
|
||||||
The maximum size in bytes of a single Prometheus remote_write API request
|
The maximum size in bytes of a single Prometheus remote_write API request
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
||||||
|
-maxLabelValueLen int
|
||||||
|
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
|
||||||
-maxLabelsPerTimeseries int
|
-maxLabelsPerTimeseries int
|
||||||
The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30)
|
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
|
||||||
-memory.allowedBytes size
|
-memory.allowedBytes size
|
||||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
|
@ -1681,7 +1693,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.cluster.replicationFactor int
|
-promscrape.cluster.replicationFactor int
|
||||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||||
-promscrape.config string
|
-promscrape.config string
|
||||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||||
-promscrape.config.dryRun
|
-promscrape.config.dryRun
|
||||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||||
-promscrape.config.strictParse
|
-promscrape.config.strictParse
|
||||||
|
@ -1748,7 +1760,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.suppressScrapeErrors
|
-promscrape.suppressScrapeErrors
|
||||||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
||||||
-relabelConfig string
|
-relabelConfig string
|
||||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||||
-relabelDebug
|
-relabelDebug
|
||||||
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
||||||
-retentionPeriod value
|
-retentionPeriod value
|
||||||
|
|
|
@ -31,10 +31,12 @@ sort: 17
|
||||||
|
|
||||||
### Public Announcement
|
### Public Announcement
|
||||||
|
|
||||||
1. Publish message in slack (victoriametrics.slack.com, general channel)
|
- Publish message in Slack at https://victoriametrics.slack.com
|
||||||
2. Post twit with release notes URL
|
- Post at Twitter at https://twitter.com/MetricsVictoria
|
||||||
3. Post in subreddit https://www.reddit.com/r/VictoriaMetrics/
|
- Post in Reddit at https://www.reddit.com/r/VictoriaMetrics/
|
||||||
4. Post in linkedin
|
- Post in Linkedin at https://www.linkedin.com/company/victoriametrics/
|
||||||
|
- Publish message in Telegram at https://t.me/VictoriaMetrics_en and https://t.me/VictoriaMetrics_ru1
|
||||||
|
- Publish message in google groups at https://groups.google.com/forum/#!forum/victorametrics-users
|
||||||
|
|
||||||
## Helm Charts
|
## Helm Charts
|
||||||
|
|
||||||
|
|
|
@ -17,46 +17,13 @@ sort: 1
|
||||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||||
|
|
||||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and follow [these instructions](#how-to-start-victoriametrics).
|
||||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||||
|
|
||||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||||
|
|
||||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
|
||||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
|
||||||
|
|
||||||
|
|
||||||
## Case studies and talks
|
|
||||||
|
|
||||||
Case studies:
|
|
||||||
|
|
||||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
|
||||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
|
||||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
|
||||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
|
||||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
|
||||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
|
||||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
|
||||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
|
||||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
|
||||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
|
||||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
|
||||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
|
||||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
|
||||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
|
||||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
|
||||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
|
||||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
|
||||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
|
||||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
|
||||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
|
||||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
|
||||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
|
||||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
|
||||||
|
|
||||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
|
||||||
|
|
||||||
|
|
||||||
## Prominent features
|
## Prominent features
|
||||||
|
@ -99,6 +66,37 @@ VictoriaMetrics has the following prominent features:
|
||||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||||
|
|
||||||
|
|
||||||
|
## Case studies and talks
|
||||||
|
|
||||||
|
Case studies:
|
||||||
|
|
||||||
|
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||||
|
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||||
|
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||||
|
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||||
|
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||||
|
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||||
|
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||||
|
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||||
|
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||||
|
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||||
|
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||||
|
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||||
|
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||||
|
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||||
|
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||||
|
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||||
|
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||||
|
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||||
|
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||||
|
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||||
|
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||||
|
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||||
|
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||||
|
|
||||||
|
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||||
|
|
||||||
|
|
||||||
## Operation
|
## Operation
|
||||||
|
|
||||||
## How to start VictoriaMetrics
|
## How to start VictoriaMetrics
|
||||||
|
@ -422,9 +420,15 @@ The `/api/v1/export` endpoint should return the following response:
|
||||||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||||
|
|
||||||
* [Graphite API](#graphite-api-usage)
|
* [Graphite API](#graphite-api-usage)
|
||||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
* [Prometheus querying API](#prometheus-querying-api-usage). See also [selecting Graphite metrics](#selecting-graphite-metrics).
|
||||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||||
|
|
||||||
|
## Selecting Graphite metrics
|
||||||
|
|
||||||
|
VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See [docs for Graphite paths and wildcards](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||||
|
|
||||||
|
The `__graphite__` pseudo-label supports e.g. alternate regexp filters such as `(value1|...|valueN)`. They are transparently converted to `{value1,...,valueN}` syntax [used in Graphite](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). This allows using [multi-value template variables in Grafana](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, Grafana expands `{__graphite__=~"foo.$bar.baz"}` into `{__graphite__=~"foo.(x|y).baz"}` if `$bar` template variable contains `x` and `y` values. In this case the query is automatically converted into `{__graphite__=~"foo.{x,y}.baz"}` before execution.
|
||||||
|
|
||||||
## How to send data from OpenTSDB-compatible agents
|
## How to send data from OpenTSDB-compatible agents
|
||||||
|
|
||||||
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||||
|
@ -521,9 +525,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
||||||
### Prometheus querying API enhancements
|
### Prometheus querying API enhancements
|
||||||
|
|
||||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
|
||||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||||
|
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
|
|
||||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||||
|
@ -560,12 +565,11 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
||||||
|
|
||||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||||
|
|
||||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
|
||||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||||
|
|
||||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). See [these docs](#selecting-graphite-metrics).
|
||||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
|
||||||
|
|
||||||
|
|
||||||
### Graphite Render API usage
|
### Graphite Render API usage
|
||||||
|
@ -616,6 +620,10 @@ Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressin
|
||||||
|
|
||||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||||
|
|
||||||
|
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||||
|
|
||||||
|
VMUI allows investigating correlations between two queries on the same graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||||
|
|
||||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||||
|
|
||||||
|
|
||||||
|
@ -1029,6 +1037,7 @@ VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-sc
|
||||||
|
|
||||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||||
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
||||||
|
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||||
|
|
||||||
Example contents for `-relabelConfig` file:
|
Example contents for `-relabelConfig` file:
|
||||||
|
@ -1221,7 +1230,8 @@ Consider setting the following command-line flags:
|
||||||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
* `-configAuthKey` for protecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||||
|
- `-pprofAuthKey` for protecting `/debug/pprof/*` endpoints, which can be used for [profiling](#profiling).
|
||||||
|
|
||||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||||
|
@ -1376,9 +1386,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
||||||
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
||||||
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
||||||
|
|
||||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then `{__graphite__="foo.*.baz"}` filter can be used for selecting such metrics. See [these docs](#selecting-graphite-metrics) for details.
|
||||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
|
||||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
|
||||||
|
|
||||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||||
|
|
||||||
|
@ -1497,9 +1505,11 @@ Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics
|
||||||
Feel free asking any questions regarding VictoriaMetrics:
|
Feel free asking any questions regarding VictoriaMetrics:
|
||||||
|
|
||||||
* [slack](https://slack.victoriametrics.com/)
|
* [slack](https://slack.victoriametrics.com/)
|
||||||
|
* [linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||||
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||||
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
||||||
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||||
|
* [articles and talks about VictoriaMetrics in Russian](https://github.com/denisgolius/victoriametrics-ru-links)
|
||||||
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||||
|
|
||||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||||
|
@ -1654,8 +1664,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-maxInsertRequestSize size
|
-maxInsertRequestSize size
|
||||||
The maximum size in bytes of a single Prometheus remote_write API request
|
The maximum size in bytes of a single Prometheus remote_write API request
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
||||||
|
-maxLabelValueLen int
|
||||||
|
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
|
||||||
-maxLabelsPerTimeseries int
|
-maxLabelsPerTimeseries int
|
||||||
The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30)
|
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
|
||||||
-memory.allowedBytes size
|
-memory.allowedBytes size
|
||||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
|
@ -1685,7 +1697,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.cluster.replicationFactor int
|
-promscrape.cluster.replicationFactor int
|
||||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||||
-promscrape.config string
|
-promscrape.config string
|
||||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||||
-promscrape.config.dryRun
|
-promscrape.config.dryRun
|
||||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||||
-promscrape.config.strictParse
|
-promscrape.config.strictParse
|
||||||
|
@ -1752,7 +1764,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-promscrape.suppressScrapeErrors
|
-promscrape.suppressScrapeErrors
|
||||||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
||||||
-relabelConfig string
|
-relabelConfig string
|
||||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||||
-relabelDebug
|
-relabelDebug
|
||||||
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
||||||
-retentionPeriod value
|
-retentionPeriod value
|
||||||
|
|
|
@ -50,7 +50,7 @@ to `vmagent` such as the ability to push metrics instead of pulling them. We did
|
||||||
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
||||||
and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets:
|
and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets:
|
||||||
|
|
||||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`)
|
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url.
|
||||||
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
|
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
|
||||||
|
|
||||||
Example command line:
|
Example command line:
|
||||||
|
@ -218,15 +218,16 @@ The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders w
|
||||||
|
|
||||||
## Loading scrape configs from multiple files
|
## Loading scrape configs from multiple files
|
||||||
|
|
||||||
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory plus a `single_scrape_config.yml` file:
|
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
scrape_config_files:
|
scrape_config_files:
|
||||||
- configs/*.yml
|
- configs/*.yml
|
||||||
- single_scrape_config.yml
|
- single_scrape_config.yml
|
||||||
|
- https://config-server/scrape_config.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Every referred file can contain arbitrary number of any [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
Every referred file can contain arbitrary number of [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
|
@ -283,7 +284,7 @@ The relabeling can be defined in the following places:
|
||||||
|
|
||||||
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied to target labels. This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied to target labels. This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
||||||
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to all the scraped metrics in the given `scrape_config`. This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to all the scraped metrics in the given `scrape_config`. This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
||||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is aplied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
* At the `-remoteWrite.relabelConfig` file. This relabeling is applied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||||
|
|
||||||
You can read more about relabeling in the following articles:
|
You can read more about relabeling in the following articles:
|
||||||
|
@ -810,7 +811,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||||
-promscrape.cluster.replicationFactor int
|
-promscrape.cluster.replicationFactor int
|
||||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||||
-promscrape.config string
|
-promscrape.config string
|
||||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||||
-promscrape.config.dryRun
|
-promscrape.config.dryRun
|
||||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||||
-promscrape.config.strictParse
|
-promscrape.config.strictParse
|
||||||
|
@ -935,7 +936,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||||
Optional rate limit in bytes per second for data sent to -remoteWrite.url. By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data is sent after temporary unavailability of the remote storage
|
Optional rate limit in bytes per second for data sent to -remoteWrite.url. By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data is sent after temporary unavailability of the remote storage
|
||||||
Supports array of values separated by comma or specified via multiple flags.
|
Supports array of values separated by comma or specified via multiple flags.
|
||||||
-remoteWrite.relabelConfig string
|
-remoteWrite.relabelConfig string
|
||||||
Optional path to file with relabel_config entries. These entries are applied to all the metrics before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details
|
Optional path to file with relabel_config entries. The path can point either to local file or to http url. These entries are applied to all the metrics before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details
|
||||||
-remoteWrite.relabelDebug
|
-remoteWrite.relabelDebug
|
||||||
Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs
|
Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs
|
||||||
-remoteWrite.roundDigits array
|
-remoteWrite.roundDigits array
|
||||||
|
@ -970,7 +971,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||||
Remote storage URL to write data to. It must support Prometheus remote_write API. It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . Pass multiple -remoteWrite.url flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.multitenantURL
|
Remote storage URL to write data to. It must support Prometheus remote_write API. It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . Pass multiple -remoteWrite.url flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.multitenantURL
|
||||||
Supports an array of values separated by comma or specified via multiple flags.
|
Supports an array of values separated by comma or specified via multiple flags.
|
||||||
-remoteWrite.urlRelabelConfig array
|
-remoteWrite.urlRelabelConfig array
|
||||||
Optional path to relabel config for the corresponding -remoteWrite.url
|
Optional path to relabel config for the corresponding -remoteWrite.url. The path can point either to local file or to http url
|
||||||
Supports an array of values separated by comma or specified via multiple flags.
|
Supports an array of values separated by comma or specified via multiple flags.
|
||||||
-remoteWrite.urlRelabelDebug array
|
-remoteWrite.urlRelabelDebug array
|
||||||
Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. This is useful for debugging the relabeling configs
|
Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. This is useful for debugging the relabeling configs
|
||||||
|
|
|
@ -103,12 +103,24 @@ name: <string>
|
||||||
# By default "prometheus" type is used.
|
# By default "prometheus" type is used.
|
||||||
[ type: <string> ]
|
[ type: <string> ]
|
||||||
|
|
||||||
# Optional list of label filters applied to every rule's
|
# Warning: DEPRECATED
|
||||||
# request withing a group. Is compatible only with VM datasource.
|
# Please use `params` instead:
|
||||||
# See more details at https://docs.victoriametrics.com#prometheus-querying-api-enhancements
|
# params:
|
||||||
|
# extra_label: ["job=nodeexporter", "env=prod"]
|
||||||
extra_filter_labels:
|
extra_filter_labels:
|
||||||
[ <labelname>: <labelvalue> ... ]
|
[ <labelname>: <labelvalue> ... ]
|
||||||
|
|
||||||
|
# Optional list of HTTP URL parameters
|
||||||
|
# applied for all rules requests within a group
|
||||||
|
# For example:
|
||||||
|
# params:
|
||||||
|
# nocache: ["1"] # disable caching for vmselect
|
||||||
|
# denyPartialResponse: ["true"] # fail if one or more vmstorage nodes returned an error
|
||||||
|
# extra_label: ["env=dev"] # apply additional label filter "env=dev" for all requests
|
||||||
|
# see more details at https://docs.victoriametrics.com#prometheus-querying-api-enhancements
|
||||||
|
params:
|
||||||
|
[ <string>: [<string>, ...]]
|
||||||
|
|
||||||
# Optional list of labels added to every rule within a group.
|
# Optional list of labels added to every rule within a group.
|
||||||
# It has priority over the external labels.
|
# It has priority over the external labels.
|
||||||
# Labels are commonly used for adding environment
|
# Labels are commonly used for adding environment
|
||||||
|
@ -476,6 +488,8 @@ a review to the dashboard.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
### Flags
|
||||||
|
|
||||||
Pass `-help` to `vmalert` in order to see the full list of supported
|
Pass `-help` to `vmalert` in order to see the full list of supported
|
||||||
command-line flags with their descriptions.
|
command-line flags with their descriptions.
|
||||||
|
|
||||||
|
@ -697,12 +711,32 @@ The shortlist of configuration flags is the following:
|
||||||
Show VictoriaMetrics version
|
Show VictoriaMetrics version
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Hot config reload
|
||||||
`vmalert` supports "hot" config reload via the following methods:
|
`vmalert` supports "hot" config reload via the following methods:
|
||||||
* send SIGHUP signal to `vmalert` process;
|
* send SIGHUP signal to `vmalert` process;
|
||||||
* send GET request to `/-/reload` endpoint;
|
* send GET request to `/-/reload` endpoint;
|
||||||
* configure `-rule.configCheckInterval` flag for periodic reload
|
* configure `-rule.configCheckInterval` flag for periodic reload
|
||||||
on config change.
|
on config change.
|
||||||
|
|
||||||
|
### URL params
|
||||||
|
|
||||||
|
To set additional URL params for `datasource.url`, `remoteWrite.url` or `remoteRead.url`
|
||||||
|
just add them in address: `-datasource.url=http://localhost:8428?nocache=1`.
|
||||||
|
|
||||||
|
To set additional URL params for specific [group of rules](#Groups) modify
|
||||||
|
the `params` group:
|
||||||
|
```yaml
|
||||||
|
groups:
|
||||||
|
- name: TestGroup
|
||||||
|
params:
|
||||||
|
denyPartialResponse: ["true"]
|
||||||
|
extra_label: ["env=dev"]
|
||||||
|
```
|
||||||
|
Please note, `params` are used only for executing rules expressions (requests to `datasource.url`).
|
||||||
|
If there would be a conflict between URL params set in `datasource.url` flag and params in group definition
|
||||||
|
the latter will have higher priority.
|
||||||
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
`vmalert` is mostly designed and built by VictoriaMetrics community.
|
`vmalert` is mostly designed and built by VictoriaMetrics community.
|
||||||
|
@ -718,7 +752,7 @@ It is recommended using
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||||
2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmalert` binary and puts it into the `bin` folder.
|
It builds `vmalert` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -735,7 +769,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
### Development ARM build
|
### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||||
2. Run `make vmalert-arm` or `make vmalert-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmalert-arm` or `make vmalert-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmalert-arm` or `vmalert-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `vmalert-arm` or `vmalert-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ sort: 5
|
||||||
`vmauth` is a simple auth proxy, router and [load balancer](#load-balancing) for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
`vmauth` is a simple auth proxy, router and [load balancer](#load-balancing) for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It reads auth credentials from `Authorization` http header ([Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication) and `Bearer token` is supported),
|
It reads auth credentials from `Authorization` http header ([Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication) and `Bearer token` is supported),
|
||||||
matches them against configs pointed by [-auth.config](#auth-config) command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match.
|
matches them against configs pointed by [-auth.config](#auth-config) command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match.
|
||||||
|
The `-auth.config` can point to either local file or to http url.
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
|
@ -30,12 +30,10 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi
|
||||||
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
||||||
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
||||||
|
|
||||||
|
|
||||||
## Load balancing
|
## Load balancing
|
||||||
|
|
||||||
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls. In the latter case `vmauth` balances load among the configured urls in a round-robin manner. This feature is useful for balancing the load among multiple `vmselect` and/or `vminsert` nodes in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls. In the latter case `vmauth` balances load among the configured urls in a round-robin manner. This feature is useful for balancing the load among multiple `vmselect` and/or `vminsert` nodes in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||||
|
|
||||||
|
|
||||||
## Auth config
|
## Auth config
|
||||||
|
|
||||||
`-auth.config` is represented in the following simple `yml` format:
|
`-auth.config` is represented in the following simple `yml` format:
|
||||||
|
@ -128,7 +126,6 @@ users:
|
||||||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||||
This may be useful for passing secrets to the config.
|
This may be useful for passing secrets to the config.
|
||||||
|
|
||||||
|
|
||||||
## Security
|
## Security
|
||||||
|
|
||||||
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
|
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
|
||||||
|
@ -146,7 +143,6 @@ Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termi
|
||||||
|
|
||||||
It is recommended protecting `/-/reload` endpoint with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload.
|
It is recommended protecting `/-/reload` endpoint with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload.
|
||||||
|
|
||||||
|
|
||||||
## Monitoring
|
## Monitoring
|
||||||
|
|
||||||
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
||||||
|
@ -165,7 +161,6 @@ users:
|
||||||
|
|
||||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
||||||
|
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
||||||
|
@ -191,7 +186,6 @@ by setting it via `<ROOT_IMAGE>` environment variable. For example, the followin
|
||||||
ROOT_IMAGE=scratch make package-vmauth
|
ROOT_IMAGE=scratch make package-vmauth
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Profiling
|
## Profiling
|
||||||
|
|
||||||
`vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
`vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||||
|
@ -212,7 +206,6 @@ The command for collecting CPU profile waits for 30 seconds before returning.
|
||||||
|
|
||||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||||
|
|
||||||
|
|
||||||
## Advanced usage
|
## Advanced usage
|
||||||
|
|
||||||
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
|
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
|
||||||
|
@ -225,7 +218,7 @@ vmauth authenticates and authorizes incoming requests and proxies them to Victor
|
||||||
See the docs at https://docs.victoriametrics.com/vmauth.html .
|
See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||||
|
|
||||||
-auth.config string
|
-auth.config string
|
||||||
Path to auth config. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
Path to auth config. It can point either to local file or to http url. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
||||||
-enableTCP6
|
-enableTCP6
|
||||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||||
-envflag.enable
|
-envflag.enable
|
||||||
|
@ -253,7 +246,7 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||||
-httpListenAddr string
|
-httpListenAddr string
|
||||||
TCP address to listen for http connections (default ":8427")
|
TCP address to listen for http connections (default ":8427")
|
||||||
-logInvalidAuthTokens
|
-logInvalidAuthTokens
|
||||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmagent_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
Whether to log requests with invalid auth tokens. Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||||
-loggerDisableTimestamps
|
-loggerDisableTimestamps
|
||||||
Whether to disable writing timestamps in logs
|
Whether to disable writing timestamps in logs
|
||||||
-loggerErrorsPerSecondLimit int
|
-loggerErrorsPerSecondLimit int
|
||||||
|
@ -276,9 +269,9 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||||
-memory.allowedPercent float
|
-memory.allowedPercent float
|
||||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||||
-metricsAuthKey string
|
-metricsAuthKey string
|
||||||
Auth key for /metrics. It overrides httpAuth settings
|
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||||
-pprofAuthKey string
|
-pprofAuthKey string
|
||||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||||
-reloadAuthKey string
|
-reloadAuthKey string
|
||||||
Auth key for /-/reload http endpoint. It must be passed as authKey=...
|
Auth key for /-/reload http endpoint. It must be passed as authKey=...
|
||||||
-tls
|
-tls
|
||||||
|
|
13
go.mod
13
go.mod
|
@ -1,6 +1,7 @@
|
||||||
module github.com/VictoriaMetrics/VictoriaMetrics
|
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
cloud.google.com/go v0.99.0 // indirect
|
||||||
cloud.google.com/go/storage v1.18.2
|
cloud.google.com/go/storage v1.18.2
|
||||||
github.com/VictoriaMetrics/fastcache v1.8.0
|
github.com/VictoriaMetrics/fastcache v1.8.0
|
||||||
|
|
||||||
|
@ -8,9 +9,9 @@ require (
|
||||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||||
github.com/VictoriaMetrics/metrics v1.18.1
|
github.com/VictoriaMetrics/metrics v1.18.1
|
||||||
github.com/VictoriaMetrics/metricsql v0.31.0
|
github.com/VictoriaMetrics/metricsql v0.32.0
|
||||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.42.17
|
github.com/aws/aws-sdk-go v1.42.22
|
||||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2
|
github.com/cespare/xxhash/v2 v2.1.2
|
||||||
github.com/cheggaaa/pb/v3 v3.0.8
|
github.com/cheggaaa/pb/v3 v3.0.8
|
||||||
|
@ -34,11 +35,11 @@ require (
|
||||||
github.com/valyala/fasttemplate v1.2.1
|
github.com/valyala/fasttemplate v1.2.1
|
||||||
github.com/valyala/gozstd v1.14.2
|
github.com/valyala/gozstd v1.14.2
|
||||||
github.com/valyala/quicktemplate v1.7.0
|
github.com/valyala/quicktemplate v1.7.0
|
||||||
golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881
|
golang.org/x/sys v0.0.0-20211210111614-af8b64212486
|
||||||
google.golang.org/api v0.60.0
|
google.golang.org/api v0.62.0
|
||||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 // indirect
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||||
google.golang.org/grpc v1.42.0 // indirect
|
google.golang.org/grpc v1.42.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
34
go.sum
34
go.sum
|
@ -26,8 +26,10 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc
|
||||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||||
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
|
|
||||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||||
|
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
|
||||||
|
cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY=
|
||||||
|
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
|
@ -108,8 +110,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
|
||||||
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||||
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
||||||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||||
github.com/VictoriaMetrics/metricsql v0.31.0 h1:7cpjby64WVcRNBiMieEytuvAcU/jOOz+39RLigENz4E=
|
github.com/VictoriaMetrics/metricsql v0.32.0 h1:yTZFB1FvbOsD5ahl6mxKYprHpZ248nVk3s8Kl7UBg5c=
|
||||||
github.com/VictoriaMetrics/metricsql v0.31.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
github.com/VictoriaMetrics/metricsql v0.32.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||||
|
@ -154,8 +156,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
||||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||||
github.com/aws/aws-sdk-go v1.42.17 h1:NEMRZcLd+YhXhUqdjwqNGtEYthiUZ+3BudGmK4/0yaA=
|
github.com/aws/aws-sdk-go v1.42.22 h1:EwcM7/+Ytg6xK+jbeM2+f9OELHqPiEiEKetT/GgAr7I=
|
||||||
github.com/aws/aws-sdk-go v1.42.17/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
github.com/aws/aws-sdk-go v1.42.22/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
||||||
|
@ -1179,8 +1181,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c h1:WtYZ93XtWSO5KlOMgPZu7hXY9WhMZpprvlm5VwvAl8c=
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
|
||||||
golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
@ -1306,9 +1308,10 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc=
|
|
||||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211210111614-af8b64212486 h1:5hpz5aRr+W1erYCL5JRhSUBJRph7l9XkNveoExlrKYk=
|
||||||
|
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
@ -1456,8 +1459,9 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
|
||||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||||
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
|
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
|
||||||
google.golang.org/api v0.60.0 h1:eq/zs5WPH4J9undYM9IP1O7dSr7Yh8Y0GtSCpzGzIUk=
|
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||||
google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4=
|
google.golang.org/api v0.62.0 h1:PhGymJMXfGBzc4lBRmrx9+1w4w2wEzURHNGF/sD/xGc=
|
||||||
|
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -1530,9 +1534,12 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc
|
||||||
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 h1:DN5b3HU13J4sMd/QjDx34U6afpaexKTDdop+26pdjdk=
|
|
||||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
||||||
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||||
|
@ -1565,6 +1572,7 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
||||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
|
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
|
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
|
||||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
|
|
39
lib/fs/fs.go
39
lib/fs/fs.go
|
@ -4,6 +4,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -372,3 +374,40 @@ type freeSpaceEntry struct {
|
||||||
updateTime uint64
|
updateTime uint64
|
||||||
freeSpace uint64
|
freeSpace uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadFileOrHTTP reads path either from local filesystem or from http if path starts with http or https.
|
||||||
|
func ReadFileOrHTTP(path string) ([]byte, error) {
|
||||||
|
if isHTTPURL(path) {
|
||||||
|
// reads remote file via http or https, if url is given
|
||||||
|
resp, err := http.Get(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot fetch %q: %w", path, err)
|
||||||
|
}
|
||||||
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot read %q: %s", path, err)
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
data, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot read %q: %w", path, err)
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFilepath returns full path to file for the given baseDir and path.
|
||||||
|
func GetFilepath(baseDir, path string) string {
|
||||||
|
if filepath.IsAbs(path) || isHTTPURL(path) {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
return filepath.Join(baseDir, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHTTPURL checks if a given targetURL is valid and contains a valid http scheme
|
||||||
|
func isHTTPURL(targetURL string) bool {
|
||||||
|
parsed, err := url.Parse(targetURL)
|
||||||
|
return err == nil && (parsed.Scheme == "http" || parsed.Scheme == "https") && parsed.Host != ""
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -22,3 +22,18 @@ func TestIsTemporaryFileName(t *testing.T) {
|
||||||
f("asdf.sdfds.tmp.dfd", false)
|
f("asdf.sdfds.tmp.dfd", false)
|
||||||
f("dfd.sdfds.dfds.1232", false)
|
f("dfd.sdfds.dfds.1232", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsHTTPURLSuccess(t *testing.T) {
|
||||||
|
f := func(s string, expected bool) {
|
||||||
|
t.Helper()
|
||||||
|
res := isHTTPURL(s)
|
||||||
|
if res != expected {
|
||||||
|
t.Fatalf("expecting %t, got %t", expected, res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f("http://isvalid:8000/filepath", true) // test http
|
||||||
|
f("https://isvalid:8000/filepath", true) // test https
|
||||||
|
f("tcp://notvalid:8000/filepath", false) // test tcp
|
||||||
|
f("0/filepath", false) // something invalid
|
||||||
|
f("filepath.extension", false) // something invalid
|
||||||
|
}
|
||||||
|
|
|
@ -84,8 +84,12 @@ func Serve(addr string, rh RequestHandler) {
|
||||||
if *tlsEnable {
|
if *tlsEnable {
|
||||||
scheme = "https"
|
scheme = "https"
|
||||||
}
|
}
|
||||||
logger.Infof("starting http server at %s://%s/", scheme, addr)
|
hostAddr := addr
|
||||||
logger.Infof("pprof handlers are exposed at %s://%s/debug/pprof/", scheme, addr)
|
if strings.HasPrefix(hostAddr, ":") {
|
||||||
|
hostAddr = "127.0.0.1" + hostAddr
|
||||||
|
}
|
||||||
|
logger.Infof("starting http server at %s://%s/", scheme, hostAddr)
|
||||||
|
logger.Infof("pprof handlers are exposed at %s://%s/debug/pprof/", scheme, hostAddr)
|
||||||
lnTmp, err := netutil.NewTCPListener(scheme, addr)
|
lnTmp, err := netutil.NewTCPListener(scheme, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatalf("cannot start http server at %s: %s", addr, err)
|
logger.Fatalf("cannot start http server at %s: %s", addr, err)
|
||||||
|
|
|
@ -7,11 +7,11 @@ import (
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/clientcredentials"
|
"golang.org/x/oauth2/clientcredentials"
|
||||||
|
@ -161,7 +161,7 @@ func newOAuth2ConfigInternal(baseDir string, o *OAuth2Config) (*oauth2ConfigInte
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if o.ClientSecretFile != "" {
|
if o.ClientSecretFile != "" {
|
||||||
oi.clientSecretFile = getFilepath(baseDir, o.ClientSecretFile)
|
oi.clientSecretFile = fs.GetFilepath(baseDir, o.ClientSecretFile)
|
||||||
secret, err := readPasswordFromFile(oi.clientSecretFile)
|
secret, err := readPasswordFromFile(oi.clientSecretFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read OAuth2 secret from %q: %w", oi.clientSecretFile, err)
|
return nil, fmt.Errorf("cannot read OAuth2 secret from %q: %w", oi.clientSecretFile, err)
|
||||||
|
@ -304,7 +304,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
||||||
if az.Credentials != nil {
|
if az.Credentials != nil {
|
||||||
return nil, fmt.Errorf("both `credentials`=%q and `credentials_file`=%q are set", az.Credentials, az.CredentialsFile)
|
return nil, fmt.Errorf("both `credentials`=%q and `credentials_file`=%q are set", az.Credentials, az.CredentialsFile)
|
||||||
}
|
}
|
||||||
filePath := getFilepath(baseDir, az.CredentialsFile)
|
filePath := fs.GetFilepath(baseDir, az.CredentialsFile)
|
||||||
getAuthHeader = func() string {
|
getAuthHeader = func() string {
|
||||||
token, err := readPasswordFromFile(filePath)
|
token, err := readPasswordFromFile(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -332,7 +332,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
||||||
if basicAuth.Password != nil {
|
if basicAuth.Password != nil {
|
||||||
return nil, fmt.Errorf("both `password`=%q and `password_file`=%q are set in `basic_auth` section", basicAuth.Password, basicAuth.PasswordFile)
|
return nil, fmt.Errorf("both `password`=%q and `password_file`=%q are set in `basic_auth` section", basicAuth.Password, basicAuth.PasswordFile)
|
||||||
}
|
}
|
||||||
filePath := getFilepath(baseDir, basicAuth.PasswordFile)
|
filePath := fs.GetFilepath(baseDir, basicAuth.PasswordFile)
|
||||||
getAuthHeader = func() string {
|
getAuthHeader = func() string {
|
||||||
password, err := readPasswordFromFile(filePath)
|
password, err := readPasswordFromFile(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -362,7 +362,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
||||||
if bearerToken != "" {
|
if bearerToken != "" {
|
||||||
return nil, fmt.Errorf("both `bearer_token`=%q and `bearer_token_file`=%q are set", bearerToken, bearerTokenFile)
|
return nil, fmt.Errorf("both `bearer_token`=%q and `bearer_token_file`=%q are set", bearerToken, bearerTokenFile)
|
||||||
}
|
}
|
||||||
filePath := getFilepath(baseDir, bearerTokenFile)
|
filePath := fs.GetFilepath(baseDir, bearerTokenFile)
|
||||||
getAuthHeader = func() string {
|
getAuthHeader = func() string {
|
||||||
token, err := readPasswordFromFile(filePath)
|
token, err := readPasswordFromFile(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -416,8 +416,8 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
||||||
if tlsConfig.CertFile != "" || tlsConfig.KeyFile != "" {
|
if tlsConfig.CertFile != "" || tlsConfig.KeyFile != "" {
|
||||||
getTLSCert = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
getTLSCert = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||||
// Re-read TLS certificate from disk. This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1420
|
// Re-read TLS certificate from disk. This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1420
|
||||||
certPath := getFilepath(baseDir, tlsConfig.CertFile)
|
certPath := fs.GetFilepath(baseDir, tlsConfig.CertFile)
|
||||||
keyPath := getFilepath(baseDir, tlsConfig.KeyFile)
|
keyPath := fs.GetFilepath(baseDir, tlsConfig.KeyFile)
|
||||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tlsConfig.CertFile, tlsConfig.KeyFile, err)
|
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tlsConfig.CertFile, tlsConfig.KeyFile, err)
|
||||||
|
@ -431,8 +431,8 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
||||||
tlsCertDigest = fmt.Sprintf("certFile=%q, keyFile=%q", tlsConfig.CertFile, tlsConfig.KeyFile)
|
tlsCertDigest = fmt.Sprintf("certFile=%q, keyFile=%q", tlsConfig.CertFile, tlsConfig.KeyFile)
|
||||||
}
|
}
|
||||||
if tlsConfig.CAFile != "" {
|
if tlsConfig.CAFile != "" {
|
||||||
path := getFilepath(baseDir, tlsConfig.CAFile)
|
path := fs.GetFilepath(baseDir, tlsConfig.CAFile)
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := fs.ReadFileOrHTTP(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", tlsConfig.CAFile, err)
|
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", tlsConfig.CAFile, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,21 +1,14 @@
|
||||||
package promauth
|
package promauth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func getFilepath(baseDir, path string) string {
|
|
||||||
if filepath.IsAbs(path) {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
return filepath.Join(baseDir, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func readPasswordFromFile(path string) (string, error) {
|
func readPasswordFromFile(path string) (string, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := fs.ReadFileOrHTTP(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,12 +2,12 @@ package promrelabel
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ func (pcs *ParsedConfigs) String() string {
|
||||||
|
|
||||||
// LoadRelabelConfigs loads relabel configs from the given path.
|
// LoadRelabelConfigs loads relabel configs from the given path.
|
||||||
func LoadRelabelConfigs(path string, relabelDebug bool) (*ParsedConfigs, error) {
|
func LoadRelabelConfigs(path string, relabelDebug bool) (*ParsedConfigs, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := fs.ReadFileOrHTTP(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %w", path, err)
|
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ package promscrape
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -15,6 +14,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||||
|
@ -227,7 +227,7 @@ type StaticConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := fs.ReadFileOrHTTP(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read `static_configs` from %q: %w", path, err)
|
return nil, fmt.Errorf("cannot read `static_configs` from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
@ -241,7 +241,7 @@ func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
||||||
|
|
||||||
// loadConfig loads Prometheus config from the given path.
|
// loadConfig loads Prometheus config from the given path.
|
||||||
func loadConfig(path string) (*Config, []byte, error) {
|
func loadConfig(path string) (*Config, []byte, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := fs.ReadFileOrHTTP(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err)
|
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
@ -257,7 +257,7 @@ func loadScrapeConfigFiles(baseDir string, scrapeConfigFiles []string) ([]Scrape
|
||||||
var scrapeConfigs []ScrapeConfig
|
var scrapeConfigs []ScrapeConfig
|
||||||
var scsData []byte
|
var scsData []byte
|
||||||
for _, filePath := range scrapeConfigFiles {
|
for _, filePath := range scrapeConfigFiles {
|
||||||
filePath := getFilepath(baseDir, filePath)
|
filePath := fs.GetFilepath(baseDir, filePath)
|
||||||
paths := []string{filePath}
|
paths := []string{filePath}
|
||||||
if strings.Contains(filePath, "*") {
|
if strings.Contains(filePath, "*") {
|
||||||
ps, err := filepath.Glob(filePath)
|
ps, err := filepath.Glob(filePath)
|
||||||
|
@ -268,7 +268,7 @@ func loadScrapeConfigFiles(baseDir string, scrapeConfigFiles []string) ([]Scrape
|
||||||
paths = ps
|
paths = ps
|
||||||
}
|
}
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := fs.ReadFileOrHTTP(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot load %q: %w", path, err)
|
return nil, nil, fmt.Errorf("cannot load %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
@ -877,7 +877,7 @@ func appendScrapeWorkForTargetLabels(dst []*ScrapeWork, swc *scrapeWorkConfig, t
|
||||||
|
|
||||||
func (sdc *FileSDConfig) appendScrapeWork(dst []*ScrapeWork, swsMapPrev map[string][]*ScrapeWork, baseDir string, swc *scrapeWorkConfig) []*ScrapeWork {
|
func (sdc *FileSDConfig) appendScrapeWork(dst []*ScrapeWork, swsMapPrev map[string][]*ScrapeWork, baseDir string, swc *scrapeWorkConfig) []*ScrapeWork {
|
||||||
for _, file := range sdc.Files {
|
for _, file := range sdc.Files {
|
||||||
pathPattern := getFilepath(baseDir, file)
|
pathPattern := fs.GetFilepath(baseDir, file)
|
||||||
paths := []string{pathPattern}
|
paths := []string{pathPattern}
|
||||||
if strings.Contains(pathPattern, "*") {
|
if strings.Contains(pathPattern, "*") {
|
||||||
var err error
|
var err error
|
||||||
|
@ -1201,13 +1201,6 @@ func mergeLabels(swc *scrapeWorkConfig, target string, extraLabels, metaLabels m
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFilepath(baseDir, path string) string {
|
|
||||||
if filepath.IsAbs(path) {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
return filepath.Join(baseDir, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func addMissingPort(scheme, target string) string {
|
func addMissingPort(scheme, target string) string {
|
||||||
if strings.Contains(target, ":") {
|
if strings.Contains(target, ":") {
|
||||||
return target
|
return target
|
||||||
|
|
|
@ -32,6 +32,7 @@ var (
|
||||||
suppressDuplicateScrapeTargetErrors = flag.Bool("promscrape.suppressDuplicateScrapeTargetErrors", false, "Whether to suppress 'duplicate scrape target' errors; "+
|
suppressDuplicateScrapeTargetErrors = flag.Bool("promscrape.suppressDuplicateScrapeTargetErrors", false, "Whether to suppress 'duplicate scrape target' errors; "+
|
||||||
"see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details")
|
"see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details")
|
||||||
promscrapeConfigFile = flag.String("promscrape.config", "", "Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. "+
|
promscrapeConfigFile = flag.String("promscrape.config", "", "Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. "+
|
||||||
|
"The path can point to local file and to http url. "+
|
||||||
"See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details")
|
"See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details")
|
||||||
|
|
||||||
fileSDCheckInterval = flag.Duration("promscrape.fileSDCheckInterval", 30*time.Second, "Interval for checking for changes in 'file_sd_config'. "+
|
fileSDCheckInterval = flag.Duration("promscrape.fileSDCheckInterval", 30*time.Second, "Interval for checking for changes in 'file_sd_config'. "+
|
||||||
|
|
|
@ -424,7 +424,18 @@ const maxLabelNameLen = 256
|
||||||
// The maximum length of label value.
|
// The maximum length of label value.
|
||||||
//
|
//
|
||||||
// Longer values are truncated.
|
// Longer values are truncated.
|
||||||
const maxLabelValueLen = 16 * 1024
|
var maxLabelValueLen = 16 * 1024
|
||||||
|
|
||||||
|
// SetMaxLabelValueLen sets the limit on the label value length.
|
||||||
|
//
|
||||||
|
// This function can be called before using the storage package.
|
||||||
|
//
|
||||||
|
// Label values with longer length are truncated.
|
||||||
|
func SetMaxLabelValueLen(n int) {
|
||||||
|
if n > 0 {
|
||||||
|
maxLabelValueLen = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// The maximum number of labels per each timeseries.
|
// The maximum number of labels per each timeseries.
|
||||||
var maxLabelsPerTimeseries = 30
|
var maxLabelsPerTimeseries = 30
|
||||||
|
@ -432,12 +443,13 @@ var maxLabelsPerTimeseries = 30
|
||||||
// SetMaxLabelsPerTimeseries sets the limit on the number of labels
|
// SetMaxLabelsPerTimeseries sets the limit on the number of labels
|
||||||
// per each time series.
|
// per each time series.
|
||||||
//
|
//
|
||||||
|
// This function can be called before using the storage package.
|
||||||
|
//
|
||||||
// Superfluous labels are dropped.
|
// Superfluous labels are dropped.
|
||||||
func SetMaxLabelsPerTimeseries(maxLabels int) {
|
func SetMaxLabelsPerTimeseries(maxLabels int) {
|
||||||
if maxLabels <= 0 {
|
if maxLabels > 0 {
|
||||||
logger.Panicf("BUG: maxLabels must be positive; got %d", maxLabels)
|
|
||||||
}
|
|
||||||
maxLabelsPerTimeseries = maxLabels
|
maxLabelsPerTimeseries = maxLabels
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalMetricNameRaw marshals labels to dst and returns the result.
|
// MarshalMetricNameRaw marshals labels to dst and returns the result.
|
||||||
|
|
|
@ -1228,9 +1228,52 @@ func (s *Storage) SearchTagValueSuffixes(tr TimeRange, tagKey, tagValuePrefix []
|
||||||
|
|
||||||
// SearchGraphitePaths returns all the matching paths for the given graphite query on the given tr.
|
// SearchGraphitePaths returns all the matching paths for the given graphite query on the given tr.
|
||||||
func (s *Storage) SearchGraphitePaths(tr TimeRange, query []byte, maxPaths int, deadline uint64) ([]string, error) {
|
func (s *Storage) SearchGraphitePaths(tr TimeRange, query []byte, maxPaths int, deadline uint64) ([]string, error) {
|
||||||
|
query = replaceAlternateRegexpsWithGraphiteWildcards(query)
|
||||||
return s.searchGraphitePaths(tr, nil, query, maxPaths, deadline)
|
return s.searchGraphitePaths(tr, nil, query, maxPaths, deadline)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// replaceAlternateRegexpsWithGraphiteWildcards replaces (foo|..|bar) with {foo,...,bar} in b and returns the new value.
|
||||||
|
func replaceAlternateRegexpsWithGraphiteWildcards(b []byte) []byte {
|
||||||
|
var dst []byte
|
||||||
|
for {
|
||||||
|
n := bytes.IndexByte(b, '(')
|
||||||
|
if n < 0 {
|
||||||
|
if len(dst) == 0 {
|
||||||
|
// Fast path - b doesn't contain the openining brace.
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
dst = append(dst, b...)
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
dst = append(dst, b[:n]...)
|
||||||
|
b = b[n+1:]
|
||||||
|
n = bytes.IndexByte(b, ')')
|
||||||
|
if n < 0 {
|
||||||
|
dst = append(dst, '(')
|
||||||
|
dst = append(dst, b...)
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
x := b[:n]
|
||||||
|
b = b[n+1:]
|
||||||
|
if string(x) == ".*" {
|
||||||
|
dst = append(dst, '*')
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dst = append(dst, '{')
|
||||||
|
for len(x) > 0 {
|
||||||
|
n = bytes.IndexByte(x, '|')
|
||||||
|
if n < 0 {
|
||||||
|
dst = append(dst, x...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst = append(dst, x[:n]...)
|
||||||
|
x = x[n+1:]
|
||||||
|
dst = append(dst, ',')
|
||||||
|
}
|
||||||
|
dst = append(dst, '}')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Storage) searchGraphitePaths(tr TimeRange, qHead, qTail []byte, maxPaths int, deadline uint64) ([]string, error) {
|
func (s *Storage) searchGraphitePaths(tr TimeRange, qHead, qTail []byte, maxPaths int, deadline uint64) ([]string, error) {
|
||||||
n := bytes.IndexAny(qTail, "*[{")
|
n := bytes.IndexAny(qTail, "*[{")
|
||||||
if n < 0 {
|
if n < 0 {
|
||||||
|
|
|
@ -14,6 +14,24 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestReplaceAlternateRegexpsWithGraphiteWildcards(t *testing.T) {
|
||||||
|
f := func(q, resultExpected string) {
|
||||||
|
t.Helper()
|
||||||
|
result := replaceAlternateRegexpsWithGraphiteWildcards([]byte(q))
|
||||||
|
if string(result) != resultExpected {
|
||||||
|
t.Fatalf("unexpected result for %s\ngot\n%s\nwant\n%s", q, result, resultExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f("", "")
|
||||||
|
f("foo", "foo")
|
||||||
|
f("foo(bar", "foo(bar")
|
||||||
|
f("foo.(bar|baz", "foo.(bar|baz")
|
||||||
|
f("foo.(bar).x", "foo.{bar}.x")
|
||||||
|
f("foo.(bar|baz).*.{x,y}", "foo.{bar,baz}.*.{x,y}")
|
||||||
|
f("foo.(bar|baz).*.{x,y}(z|aa)", "foo.{bar,baz}.*.{x,y}{z,aa}")
|
||||||
|
f("foo(.*)", "foo*")
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetRegexpForGraphiteNodeQuery(t *testing.T) {
|
func TestGetRegexpForGraphiteNodeQuery(t *testing.T) {
|
||||||
f := func(q, expectedRegexp string) {
|
f := func(q, expectedRegexp string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
|
@ -25,6 +25,9 @@ type Cache struct {
|
||||||
curr atomic.Value
|
curr atomic.Value
|
||||||
prev atomic.Value
|
prev atomic.Value
|
||||||
|
|
||||||
|
// cs holds cache stats
|
||||||
|
cs fastcache.Stats
|
||||||
|
|
||||||
// mode indicates whether to use only curr and skip prev.
|
// mode indicates whether to use only curr and skip prev.
|
||||||
//
|
//
|
||||||
// This flag is set to switching if curr is filled for more than 50% space.
|
// This flag is set to switching if curr is filled for more than 50% space.
|
||||||
|
@ -39,9 +42,6 @@ type Cache struct {
|
||||||
|
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
stopCh chan struct{}
|
stopCh chan struct{}
|
||||||
|
|
||||||
// cs holds cache stats
|
|
||||||
cs fastcache.Stats
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load loads the cache from filePath and limits its size to maxBytes
|
// Load loads the cache from filePath and limits its size to maxBytes
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue