mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
6a64823581
141 changed files with 2957 additions and 2000 deletions
6
Makefile
6
Makefile
|
@ -24,6 +24,8 @@ all: \
|
|||
|
||||
include app/*/Makefile
|
||||
include deployment/*/Makefile
|
||||
include snap/local/Makefile
|
||||
|
||||
|
||||
clean:
|
||||
rm -rf bin/*
|
||||
|
@ -84,9 +86,6 @@ vmutils-windows-amd64: \
|
|||
vmauth-windows-amd64 \
|
||||
vmctl-windows-amd64
|
||||
|
||||
release-snap:
|
||||
snapcraft
|
||||
snapcraft upload "victoriametrics_$(PKG_TAG)_multi.snap" --release beta,edge,candidate
|
||||
|
||||
publish-release:
|
||||
git checkout $(TAG) && $(MAKE) release publish && \
|
||||
|
@ -180,6 +179,7 @@ release-vmutils-windows-generic: \
|
|||
vmctl-windows-$(GOARCH)-prod.exe \
|
||||
> vmutils-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
|
||||
|
||||
pprof-cpu:
|
||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
|
||||
|
||||
|
|
114
README.md
114
README.md
|
@ -13,46 +13,13 @@
|
|||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
|
||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
||||
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and follow [these instructions](#how-to-start-victoriametrics).
|
||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||
|
||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Case studies and talks
|
||||
|
||||
Case studies:
|
||||
|
||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||
|
||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Prominent features
|
||||
|
@ -95,6 +62,37 @@ VictoriaMetrics has the following prominent features:
|
|||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
||||
## Case studies and talks
|
||||
|
||||
Case studies:
|
||||
|
||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||
|
||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||
|
||||
|
||||
## Operation
|
||||
|
||||
## How to start VictoriaMetrics
|
||||
|
@ -418,9 +416,15 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||
|
||||
* [Graphite API](#graphite-api-usage)
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). See also [selecting Graphite metrics](#selecting-graphite-metrics).
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||
|
||||
## Selecting Graphite metrics
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See [docs for Graphite paths and wildcards](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
|
||||
The `__graphite__` pseudo-label supports e.g. alternate regexp filters such as `(value1|...|valueN)`. They are transparently converted to `{value1,...,valueN}` syntax [used in Graphite](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). This allows using [multi-value template variables in Grafana](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, Grafana expands `{__graphite__=~"foo.($bar).baz"}` into `{__graphite__=~"foo.(x|y).baz"}` if `$bar` template variable contains `x` and `y` values. In this case the query is automatically converted into `{__graphite__=~"foo.{x,y}.baz"}` before execution.
|
||||
|
||||
## How to send data from OpenTSDB-compatible agents
|
||||
|
||||
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||
|
@ -517,9 +521,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
|||
### Prometheus querying API enhancements
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
|
@ -556,12 +561,11 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
|||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
||||
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). See [these docs](#selecting-graphite-metrics).
|
||||
|
||||
|
||||
### Graphite Render API usage
|
||||
|
@ -612,6 +616,10 @@ Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressin
|
|||
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
||||
VMUI allows investigating correlations between two queries on the same graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
|
||||
|
||||
|
@ -1025,6 +1033,7 @@ VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-sc
|
|||
|
||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
||||
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||
|
||||
Example contents for `-relabelConfig` file:
|
||||
|
@ -1217,7 +1226,8 @@ Consider setting the following command-line flags:
|
|||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
* `-configAuthKey` for protecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
- `-pprofAuthKey` for protecting `/debug/pprof/*` endpoints, which can be used for [profiling](#profiling).
|
||||
|
||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||
|
@ -1372,9 +1382,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
|||
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
||||
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
||||
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then `{__graphite__="foo.*.baz"}` filter can be used for selecting such metrics. See [these docs](#selecting-graphite-metrics) for details.
|
||||
|
||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
|
@ -1493,9 +1501,11 @@ Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics
|
|||
Feel free asking any questions regarding VictoriaMetrics:
|
||||
|
||||
* [slack](https://slack.victoriametrics.com/)
|
||||
* [linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
||||
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||
* [articles and talks about VictoriaMetrics in Russian](https://github.com/denisgolius/victoriametrics-ru-links)
|
||||
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
@ -1650,8 +1660,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-maxInsertRequestSize size
|
||||
The maximum size in bytes of a single Prometheus remote_write API request
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
||||
-maxLabelValueLen int
|
||||
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
|
||||
-maxLabelsPerTimeseries int
|
||||
The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30)
|
||||
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
|
||||
-memory.allowedBytes size
|
||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
|
@ -1681,7 +1693,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-promscrape.cluster.replicationFactor int
|
||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||
-promscrape.config string
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
-promscrape.config.dryRun
|
||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||
-promscrape.config.strictParse
|
||||
|
@ -1748,7 +1760,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-promscrape.suppressScrapeErrors
|
||||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
||||
-relabelConfig string
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
-relabelDebug
|
||||
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
||||
-retentionPeriod value
|
||||
|
|
|
@ -46,7 +46,7 @@ to `vmagent` such as the ability to push metrics instead of pulling them. We did
|
|||
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
||||
and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets:
|
||||
|
||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`)
|
||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url.
|
||||
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
|
||||
|
||||
Example command line:
|
||||
|
@ -214,15 +214,16 @@ The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders w
|
|||
|
||||
## Loading scrape configs from multiple files
|
||||
|
||||
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory plus a `single_scrape_config.yml` file:
|
||||
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||
|
||||
```yml
|
||||
scrape_config_files:
|
||||
- configs/*.yml
|
||||
- single_scrape_config.yml
|
||||
- https://config-server/scrape_config.yml
|
||||
```
|
||||
|
||||
Every referred file can contain arbitrary number of any [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
|
||||
```yml
|
||||
- job_name: foo
|
||||
|
@ -279,7 +280,7 @@ The relabeling can be defined in the following places:
|
|||
|
||||
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied to target labels. This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
||||
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to all the scraped metrics in the given `scrape_config`. This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is aplied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is applied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||
|
||||
You can read more about relabeling in the following articles:
|
||||
|
@ -806,7 +807,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-promscrape.cluster.replicationFactor int
|
||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||
-promscrape.config string
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
-promscrape.config.dryRun
|
||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||
-promscrape.config.strictParse
|
||||
|
@ -931,7 +932,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Optional rate limit in bytes per second for data sent to -remoteWrite.url. By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data is sent after temporary unavailability of the remote storage
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.relabelConfig string
|
||||
Optional path to file with relabel_config entries. These entries are applied to all the metrics before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details
|
||||
Optional path to file with relabel_config entries. The path can point either to local file or to http url. These entries are applied to all the metrics before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details
|
||||
-remoteWrite.relabelDebug
|
||||
Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs
|
||||
-remoteWrite.roundDigits array
|
||||
|
@ -966,7 +967,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Remote storage URL to write data to. It must support Prometheus remote_write API. It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . Pass multiple -remoteWrite.url flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.multitenantURL
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.urlRelabelConfig array
|
||||
Optional path to relabel config for the corresponding -remoteWrite.url
|
||||
Optional path to relabel config for the corresponding -remoteWrite.url. The path can point either to local file or to http url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.urlRelabelDebug array
|
||||
Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. This is useful for debugging the relabeling configs
|
||||
|
|
|
@ -15,12 +15,14 @@ import (
|
|||
var (
|
||||
unparsedLabelsGlobal = flagutil.NewArray("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
|
||||
"Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage")
|
||||
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabel_config entries. These entries are applied to all the metrics "+
|
||||
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabel_config entries. "+
|
||||
"The path can point either to local file or to http url. These entries are applied to all the metrics "+
|
||||
"before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details")
|
||||
relabelDebugGlobal = flag.Bool("remoteWrite.relabelDebug", false, "Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. "+
|
||||
"If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs")
|
||||
relabelConfigPaths = flagutil.NewArray("remoteWrite.urlRelabelConfig", "Optional path to relabel config for the corresponding -remoteWrite.url")
|
||||
relabelDebug = flagutil.NewArrayBool("remoteWrite.urlRelabelDebug", "Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. "+
|
||||
relabelConfigPaths = flagutil.NewArray("remoteWrite.urlRelabelConfig", "Optional path to relabel config for the corresponding -remoteWrite.url. "+
|
||||
"The path can point either to local file or to http url")
|
||||
relabelDebug = flagutil.NewArrayBool("remoteWrite.urlRelabelDebug", "Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. "+
|
||||
"If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. "+
|
||||
"This is useful for debugging the relabeling configs")
|
||||
)
|
||||
|
|
|
@ -153,6 +153,13 @@ func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]
|
|||
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
||||
}
|
||||
for _, s := range series {
|
||||
// set additional labels to identify group and rule name
|
||||
if ar.Name != "" {
|
||||
s.SetLabel(alertNameLabel, ar.Name)
|
||||
}
|
||||
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
||||
s.SetLabel(alertGroupNameLabel, ar.GroupName)
|
||||
}
|
||||
// extra labels could contain templates, so we expand them first
|
||||
labels, err := expandLabels(s, qFn, ar)
|
||||
if err != nil {
|
||||
|
@ -163,13 +170,6 @@ func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]
|
|||
// so the hash key will be consistent on restore
|
||||
s.SetLabel(k, v)
|
||||
}
|
||||
// set additional labels to identify group and rule name
|
||||
if ar.Name != "" {
|
||||
s.SetLabel(alertNameLabel, ar.Name)
|
||||
}
|
||||
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
||||
s.SetLabel(alertGroupNameLabel, ar.GroupName)
|
||||
}
|
||||
a, err := ar.newAlert(s, time.Time{}, qFn) // initial alert
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create alert: %s", err)
|
||||
|
@ -225,6 +225,13 @@ func (ar *AlertingRule) Exec(ctx context.Context) ([]prompbmarshal.TimeSeries, e
|
|||
updated := make(map[uint64]struct{})
|
||||
// update list of active alerts
|
||||
for _, m := range qMetrics {
|
||||
// set additional labels to identify group and rule name
|
||||
if ar.Name != "" {
|
||||
m.SetLabel(alertNameLabel, ar.Name)
|
||||
}
|
||||
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
||||
m.SetLabel(alertGroupNameLabel, ar.GroupName)
|
||||
}
|
||||
// extra labels could contain templates, so we expand them first
|
||||
labels, err := expandLabels(m, qFn, ar)
|
||||
if err != nil {
|
||||
|
@ -235,14 +242,6 @@ func (ar *AlertingRule) Exec(ctx context.Context) ([]prompbmarshal.TimeSeries, e
|
|||
// so the hash key will be consistent on restore
|
||||
m.SetLabel(k, v)
|
||||
}
|
||||
// set additional labels to identify group and rule name
|
||||
// set additional labels to identify group and rule name
|
||||
if ar.Name != "" {
|
||||
m.SetLabel(alertNameLabel, ar.Name)
|
||||
}
|
||||
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
||||
m.SetLabel(alertGroupNameLabel, ar.GroupName)
|
||||
}
|
||||
h := hash(m)
|
||||
if _, ok := updated[h]; ok {
|
||||
// duplicate may be caused by extra labels
|
||||
|
|
|
@ -715,6 +715,44 @@ func TestAlertingRule_Template(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
&AlertingRule{
|
||||
Name: "ExtraTemplating",
|
||||
GroupName: "Testing",
|
||||
Labels: map[string]string{
|
||||
"name": "alert_{{ $labels.alertname }}",
|
||||
"group": "group_{{ $labels.alertgroup }}",
|
||||
"instance": "{{ $labels.instance }}",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}`,
|
||||
"description": `Alert "{{ $labels.name }}({{ $labels.group }})" for instance {{ $labels.instance }}`,
|
||||
},
|
||||
alerts: make(map[uint64]*notifier.Alert),
|
||||
},
|
||||
[]datasource.Metric{
|
||||
metricWithValueAndLabels(t, 1, "instance", "foo"),
|
||||
},
|
||||
map[uint64]*notifier.Alert{
|
||||
hash(metricWithLabels(t, alertNameLabel, "ExtraTemplating",
|
||||
"name", "alert_ExtraTemplating",
|
||||
alertGroupNameLabel, "Testing",
|
||||
"group", "group_Testing",
|
||||
"instance", "foo")): {
|
||||
Labels: map[string]string{
|
||||
alertNameLabel: "ExtraTemplating",
|
||||
"name": "alert_ExtraTemplating",
|
||||
alertGroupNameLabel: "Testing",
|
||||
"group": "group_Testing",
|
||||
"instance": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": `Alert "ExtraTemplating(Testing)" for instance foo`,
|
||||
"description": `Alert "alert_ExtraTemplating(group_Testing)" for instance foo`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeGroup := Group{Name: "TestRule_Exec"}
|
||||
for _, tc := range testCases {
|
||||
|
|
|
@ -46,8 +46,6 @@ type Group struct {
|
|||
XXX map[string]interface{} `yaml:",inline"`
|
||||
}
|
||||
|
||||
const extraLabelParam = "extra_label"
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
type group Group
|
||||
|
@ -68,8 +66,14 @@ func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if g.Params == nil {
|
||||
g.Params = url.Values{}
|
||||
}
|
||||
// Sort extraFilters for consistent order for query args across runs.
|
||||
extraFilters := make([]string, 0, len(g.ExtraFilterLabels))
|
||||
for k, v := range g.ExtraFilterLabels {
|
||||
g.Params.Add(extraLabelParam, fmt.Sprintf("%s=%s", k, v))
|
||||
extraFilters = append(extraFilters, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
sort.Strings(extraFilters)
|
||||
for _, extraFilter := range extraFilters {
|
||||
g.Params.Add("extra_label", extraFilter)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -538,7 +538,7 @@ extra_filter_labels:
|
|||
rules:
|
||||
- alert: ExampleAlertAlwaysFiring
|
||||
expr: sum by(job) (up == 1)
|
||||
`, url.Values{extraLabelParam: {"job=victoriametrics", "env=prod"}})
|
||||
`, url.Values{"extra_label": {"env=prod", "job=victoriametrics"}})
|
||||
})
|
||||
|
||||
t.Run("extra labels and params", func(t *testing.T) {
|
||||
|
@ -552,6 +552,6 @@ params:
|
|||
rules:
|
||||
- alert: ExampleAlertAlwaysFiring
|
||||
expr: sum by(job) (up == 1)
|
||||
`, url.Values{"nocache": {"1"}, extraLabelParam: {"env=prod", "job=victoriametrics"}})
|
||||
`, url.Values{"nocache": {"1"}, "extra_label": {"env=prod", "job=victoriametrics"}})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
`vmauth` is a simple auth proxy, router and [load balancer](#load-balancing) for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It reads auth credentials from `Authorization` http header ([Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication) and `Bearer token` is supported),
|
||||
matches them against configs pointed by [-auth.config](#auth-config) command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match.
|
||||
|
||||
The `-auth.config` can point to either local file or to http url.
|
||||
|
||||
## Quick start
|
||||
|
||||
|
@ -26,12 +26,10 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi
|
|||
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
||||
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
||||
|
||||
|
||||
## Load balancing
|
||||
|
||||
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls. In the latter case `vmauth` balances load among the configured urls in a round-robin manner. This feature is useful for balancing the load among multiple `vmselect` and/or `vminsert` nodes in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
|
||||
## Auth config
|
||||
|
||||
`-auth.config` is represented in the following simple `yml` format:
|
||||
|
@ -124,7 +122,6 @@ users:
|
|||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
This may be useful for passing secrets to the config.
|
||||
|
||||
|
||||
## Security
|
||||
|
||||
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
|
||||
|
@ -142,7 +139,6 @@ Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termi
|
|||
|
||||
It is recommended protecting `/-/reload` endpoint with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload.
|
||||
|
||||
|
||||
## Monitoring
|
||||
|
||||
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
||||
|
@ -161,7 +157,6 @@ users:
|
|||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
||||
|
||||
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
||||
|
@ -187,7 +182,6 @@ by setting it via `<ROOT_IMAGE>` environment variable. For example, the followin
|
|||
ROOT_IMAGE=scratch make package-vmauth
|
||||
```
|
||||
|
||||
|
||||
## Profiling
|
||||
|
||||
`vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
@ -208,7 +202,6 @@ The command for collecting CPU profile waits for 30 seconds before returning.
|
|||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
|
||||
|
||||
## Advanced usage
|
||||
|
||||
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
|
||||
|
@ -221,7 +214,7 @@ vmauth authenticates and authorizes incoming requests and proxies them to Victor
|
|||
See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||
|
||||
-auth.config string
|
||||
Path to auth config. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
||||
Path to auth config. It can point either to local file or to http url. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
||||
-enableTCP6
|
||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||
-envflag.enable
|
||||
|
@ -249,7 +242,7 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
-httpListenAddr string
|
||||
TCP address to listen for http connections (default ":8427")
|
||||
-logInvalidAuthTokens
|
||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmagent_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||
-loggerDisableTimestamps
|
||||
Whether to disable writing timestamps in logs
|
||||
-loggerErrorsPerSecondLimit int
|
||||
|
@ -272,9 +265,9 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||
-metricsAuthKey string
|
||||
Auth key for /metrics. It overrides httpAuth settings
|
||||
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-reloadAuthKey string
|
||||
Auth key for /-/reload http endpoint. It must be passed as authKey=...
|
||||
-tls
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"encoding/base64"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
|
@ -14,6 +13,7 @@ import (
|
|||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -21,8 +21,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
authConfigPath = flag.String("auth.config", "", "Path to auth config. See https://docs.victoriametrics.com/vmauth.html "+
|
||||
"for details on the format of this auth config")
|
||||
authConfigPath = flag.String("auth.config", "", "Path to auth config. It can point either to local file or to http url. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config")
|
||||
)
|
||||
|
||||
// AuthConfig represents auth config.
|
||||
|
@ -237,9 +237,9 @@ var authConfigWG sync.WaitGroup
|
|||
var stopCh chan struct{}
|
||||
|
||||
func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read %q: %w", path, err)
|
||||
return nil, err
|
||||
}
|
||||
m, err := parseAuthConfig(data)
|
||||
if err != nil {
|
||||
|
|
|
@ -73,7 +73,6 @@ func main() {
|
|||
}()
|
||||
}
|
||||
|
||||
logger.Infof("starting http server for exporting metrics at http://%q/metrics", *httpListenAddr)
|
||||
go httpserver.Serve(*httpListenAddr, nil)
|
||||
|
||||
srcFS, err := newSrcFS()
|
||||
|
|
|
@ -43,7 +43,8 @@ var (
|
|||
"Usually :4242 must be set. Doesn't work if empty")
|
||||
opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty")
|
||||
configAuthKey = flag.String("configAuthKey", "", "Authorization key for accessing /config page. It must be passed via authKey query arg")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented")
|
||||
maxLabelValueLen = flag.Int("maxLabelValueLen", 16*1024, "The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented")
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -57,6 +58,7 @@ var (
|
|||
func Init() {
|
||||
relabel.Init()
|
||||
storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries)
|
||||
storage.SetMaxLabelValueLen(*maxLabelValueLen)
|
||||
common.StartUnmarshalWorkers()
|
||||
writeconcurrencylimiter.Init()
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
var (
|
||||
relabelConfig = flag.String("relabelConfig", "", "Optional path to a file with relabeling rules, which are applied to all the ingested metrics. "+
|
||||
"The path can point either to local file or to http url. "+
|
||||
"See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal")
|
||||
relabelDebug = flag.Bool("relabelDebug", false, "Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, "+
|
||||
"then the metrics aren't sent to storage. This is useful for debugging the relabeling configs")
|
||||
|
|
|
@ -36,7 +36,6 @@ func main() {
|
|||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
logger.Infof("starting http server for exporting metrics at http://%q/metrics", *httpListenAddr)
|
||||
go httpserver.Serve(*httpListenAddr, nil)
|
||||
|
||||
srcFS, err := newSrcFS()
|
||||
|
|
|
@ -32,7 +32,7 @@ func TagsDelSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
|||
var row graphiteparser.Row
|
||||
var tagsPool []graphiteparser.Tag
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
|
@ -53,8 +53,8 @@ func TagsDelSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
|||
Value: []byte(tag.Value),
|
||||
})
|
||||
}
|
||||
tfs = append(tfs, etfs...)
|
||||
sq := storage.NewSearchQuery(0, ct, [][]storage.TagFilter{tfs})
|
||||
tfss := joinTagFilterss(tfs, etfs)
|
||||
sq := storage.NewSearchQuery(0, ct, tfss)
|
||||
n, err := netstorage.DeleteSeries(sq, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete series for %q: %w", sq, err)
|
||||
|
@ -181,7 +181,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, w http.ResponseWriter, r
|
|||
valuePrefix := r.FormValue("valuePrefix")
|
||||
exprs := r.Form["expr"]
|
||||
var tagValues []string
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, w http.ResponseWriter, r *
|
|||
tagPrefix := r.FormValue("tagPrefix")
|
||||
exprs := r.Form["expr"]
|
||||
var labels []string
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
|
@ -345,7 +345,7 @@ func TagsFindSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.R
|
|||
if len(exprs) == 0 {
|
||||
return fmt.Errorf("expecting at least one `expr` query arg")
|
||||
}
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
|
@ -474,14 +474,14 @@ func getInt(r *http.Request, argName string) (int, error) {
|
|||
return n, nil
|
||||
}
|
||||
|
||||
func getSearchQueryForExprs(startTime time.Time, etfs []storage.TagFilter, exprs []string) (*storage.SearchQuery, error) {
|
||||
func getSearchQueryForExprs(startTime time.Time, etfs [][]storage.TagFilter, exprs []string) (*storage.SearchQuery, error) {
|
||||
tfs, err := exprsToTagFilters(exprs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
tfs = append(tfs, etfs...)
|
||||
sq := storage.NewSearchQuery(0, ct, [][]storage.TagFilter{tfs})
|
||||
tfss := joinTagFilterss(tfs, etfs)
|
||||
sq := storage.NewSearchQuery(0, ct, tfss)
|
||||
return sq, nil
|
||||
}
|
||||
|
||||
|
@ -524,3 +524,7 @@ func parseFilterExpr(s string) (*storage.TagFilter, error) {
|
|||
IsRegexp: isRegexp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func joinTagFilterss(tfs []storage.TagFilter, extraFilters [][]storage.TagFilter) [][]storage.TagFilter {
|
||||
return searchutils.JoinTagFilterss([][]storage.TagFilter{tfs}, extraFilters)
|
||||
}
|
||||
|
|
|
@ -283,11 +283,11 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := exportHandler(w, matches, etf, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||
if err := exportHandler(w, matches, etfs, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
|
||||
}
|
||||
return nil
|
||||
|
@ -295,7 +295,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
|
||||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||
|
||||
func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFilter, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
||||
func exportHandler(w http.ResponseWriter, matches []string, etfs [][]storage.TagFilter, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
||||
writeResponseFunc := WriteExportStdResponse
|
||||
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
|
@ -352,7 +352,7 @@ func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFil
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
|
||||
sq := storage.NewSearchQuery(start, end, tagFilterss)
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
@ -478,13 +478,13 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matches := getMatchesFromRequest(r)
|
||||
var labelValues []string
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||
|
@ -527,7 +527,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, etf, start, end, deadline)
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, etfs, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
||||
}
|
||||
|
@ -543,7 +543,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
return nil
|
||||
}
|
||||
|
||||
func labelValuesWithMatches(labelName string, matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
func labelValuesWithMatches(labelName string, matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -564,7 +564,7 @@ func labelValuesWithMatches(labelName string, matches []string, etf []storage.Ta
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
|
@ -648,7 +648,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -679,13 +679,13 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
topN = n
|
||||
}
|
||||
var status *storage.TSDBStatus
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
status, err = netstorage.GetTSDBStatusForDate(deadline, date, topN)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
||||
}
|
||||
} else {
|
||||
status, err = tsdbStatusWithMatches(matches, etf, date, topN, deadline)
|
||||
status, err = tsdbStatusWithMatches(matches, etfs, date, topN, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain tsdb status with matches for date=%d, topN=%d: %w", date, topN, err)
|
||||
}
|
||||
|
@ -700,12 +700,12 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
return nil
|
||||
}
|
||||
|
||||
func tsdbStatusWithMatches(matches []string, etf []storage.TagFilter, date uint64, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
||||
func tsdbStatusWithMatches(matches []string, etfs [][]storage.TagFilter, date uint64, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
|
@ -731,13 +731,13 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matches := getMatchesFromRequest(r)
|
||||
var labels []string
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labels, err = netstorage.GetLabels(deadline)
|
||||
|
@ -778,7 +778,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labels, err = labelsWithMatches(matches, etf, start, end, deadline)
|
||||
labels, err = labelsWithMatches(matches, etfs, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
||||
}
|
||||
|
@ -794,7 +794,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
return nil
|
||||
}
|
||||
|
||||
func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
func labelsWithMatches(matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -802,7 +802,7 @@ func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
|
@ -999,7 +999,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
if len(query) > maxQueryLen.N {
|
||||
return fmt.Errorf("too long query; got %d bytes; mustn't exceed `-search.maxQueryLen=%d` bytes", len(query), maxQueryLen.N)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1014,7 +1014,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
if end < start {
|
||||
end = start
|
||||
}
|
||||
if err := exportHandler(w, []string{childQuery}, etf, start, end, "promapi", 0, false, deadline); err != nil {
|
||||
if err := exportHandler(w, []string{childQuery}, etfs, start, end, "promapi", 0, false, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
|
@ -1030,7 +1030,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etf); err != nil {
|
||||
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etfs); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
|
@ -1048,14 +1048,14 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
queryOffset = 0
|
||||
}
|
||||
ec := promql.EvalConfig{
|
||||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilters: etf,
|
||||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilterss: etfs,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query, true)
|
||||
if err != nil {
|
||||
|
@ -1105,17 +1105,17 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etf); err != nil {
|
||||
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etfs); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etf []storage.TagFilter) error {
|
||||
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
mayCache := !searchutils.GetBool(r, "nocache")
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
|
@ -1138,15 +1138,15 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string,
|
|||
}
|
||||
|
||||
ec := promql.EvalConfig{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilters: etf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilterss: etfs,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query, false)
|
||||
if err != nil {
|
||||
|
@ -1254,24 +1254,12 @@ func getMaxLookback(r *http.Request) (int64, error) {
|
|||
return searchutils.GetDuration(r, "max_lookback", d)
|
||||
}
|
||||
|
||||
func addEnforcedFiltersToTagFilterss(dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter) [][]storage.TagFilter {
|
||||
if len(dstTfss) == 0 {
|
||||
return [][]storage.TagFilter{
|
||||
enforcedFilters,
|
||||
}
|
||||
}
|
||||
for i := range dstTfss {
|
||||
dstTfss[i] = append(dstTfss[i], enforcedFilters...)
|
||||
}
|
||||
return dstTfss
|
||||
}
|
||||
|
||||
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
||||
tagFilterss := make([][]storage.TagFilter, 0, len(matches))
|
||||
for _, match := range matches {
|
||||
tagFilters, err := promql.ParseMetricSelector(match)
|
||||
tagFilters, err := searchutils.ParseMetricSelector(match)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %w", match, err)
|
||||
return nil, fmt.Errorf("cannot parse matches[]=%s: %w", match, err)
|
||||
}
|
||||
tagFilterss = append(tagFilterss, tagFilters)
|
||||
}
|
||||
|
@ -1287,11 +1275,11 @@ func getTagFilterssFromRequest(r *http.Request) ([][]storage.TagFilter, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
return tagFilterss, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestRemoveEmptyValuesAndTimeseries(t *testing.T) {
|
||||
|
@ -196,38 +195,3 @@ func TestAdjustLastPoints(t *testing.T) {
|
|||
},
|
||||
})
|
||||
}
|
||||
|
||||
// helper for tests
|
||||
func tfFromKV(k, v string) storage.TagFilter {
|
||||
return storage.TagFilter{
|
||||
Key: []byte(k),
|
||||
Value: []byte(v),
|
||||
}
|
||||
}
|
||||
|
||||
func Test_addEnforcedFiltersToTagFilterss(t *testing.T) {
|
||||
f := func(t *testing.T, dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter, want [][]storage.TagFilter) {
|
||||
t.Helper()
|
||||
got := addEnforcedFiltersToTagFilterss(dstTfss, enforcedFilters)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for addEnforcedFiltersToTagFilterss, \ngot: %v,\n want: %v", want, got)
|
||||
}
|
||||
}
|
||||
f(t, [][]storage.TagFilter{{tfFromKV("label", "value")}},
|
||||
nil,
|
||||
[][]storage.TagFilter{{tfFromKV("label", "value")}})
|
||||
|
||||
f(t, nil,
|
||||
[]storage.TagFilter{tfFromKV("ext-label", "ext-value")},
|
||||
[][]storage.TagFilter{{tfFromKV("ext-label", "ext-value")}})
|
||||
|
||||
f(t, [][]storage.TagFilter{
|
||||
{tfFromKV("l1", "v1")},
|
||||
{tfFromKV("l2", "v2")},
|
||||
},
|
||||
[]storage.TagFilter{tfFromKV("ext-l1", "v2")},
|
||||
[][]storage.TagFilter{
|
||||
{tfFromKV("l1", "v1"), tfFromKV("ext-l1", "v2")},
|
||||
{tfFromKV("l2", "v2"), tfFromKV("ext-l1", "v2")},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ var aggrFuncs = map[string]aggrFunc{
|
|||
"geomean": newAggrFunc(aggrFuncGeomean),
|
||||
"group": newAggrFunc(aggrFuncGroup),
|
||||
"histogram": newAggrFunc(aggrFuncHistogram),
|
||||
"limit_offset": aggrFuncLimitOffset,
|
||||
"limitk": aggrFuncLimitK,
|
||||
"mad": newAggrFunc(aggrFuncMAD),
|
||||
"max": newAggrFunc(aggrFuncMax),
|
||||
|
@ -1005,37 +1004,12 @@ func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
|
|||
if len(limits) > 0 {
|
||||
limit = int(limits[0])
|
||||
}
|
||||
afe := newLimitOffsetAggrFunc(limit, 0)
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
|
||||
}
|
||||
|
||||
func aggrFuncLimitOffset(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 3); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limit, err := getIntNumber(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain limit arg: %w", err)
|
||||
}
|
||||
offset, err := getIntNumber(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain offset arg: %w", err)
|
||||
}
|
||||
afe := newLimitOffsetAggrFunc(limit, offset)
|
||||
return aggrFuncExt(afe, args[2], &afa.ae.Modifier, afa.ae.Limit, true)
|
||||
}
|
||||
|
||||
func newLimitOffsetAggrFunc(limit, offset int) func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||
if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
return func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||
// Sort series by metricName hash in order to get consistent set of output series
|
||||
// across multiple calls to limitk() and limit_offset() functions.
|
||||
// across multiple calls to limitk() function.
|
||||
// Sort series by hash in order to guarantee uniform selection across series.
|
||||
type hashSeries struct {
|
||||
h uint64
|
||||
|
@ -1056,15 +1030,12 @@ func newLimitOffsetAggrFunc(limit, offset int) func(tss []*timeseries, modifier
|
|||
for i, hs := range hss {
|
||||
tss[i] = hs.ts
|
||||
}
|
||||
if offset > len(tss) {
|
||||
return nil
|
||||
}
|
||||
tss = tss[offset:]
|
||||
if limit < len(tss) {
|
||||
tss = tss[:limit]
|
||||
}
|
||||
return tss
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
|
||||
}
|
||||
|
||||
func getHash(d *xxhash.Digest, mn *storage.MetricName) uint64 {
|
||||
|
|
|
@ -104,8 +104,8 @@ type EvalConfig struct {
|
|||
// How many decimal digits after the point to leave in response.
|
||||
RoundDigits int
|
||||
|
||||
// EnforcedTagFilters used for apply additional label filters to query.
|
||||
EnforcedTagFilters []storage.TagFilter
|
||||
// EnforcedTagFilterss may contain additional label filters to use in the query.
|
||||
EnforcedTagFilterss [][]storage.TagFilter
|
||||
|
||||
timestamps []int64
|
||||
timestampsOnce sync.Once
|
||||
|
@ -121,7 +121,7 @@ func newEvalConfig(src *EvalConfig) *EvalConfig {
|
|||
ec.MayCache = src.MayCache
|
||||
ec.LookbackDelta = src.LookbackDelta
|
||||
ec.RoundDigits = src.RoundDigits
|
||||
ec.EnforcedTagFilters = src.EnforcedTagFilters
|
||||
ec.EnforcedTagFilterss = src.EnforcedTagFilterss
|
||||
|
||||
// do not copy src.timestamps - they must be generated again.
|
||||
return &ec
|
||||
|
@ -672,16 +672,15 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
|
|||
}
|
||||
|
||||
// Fetch the remaining part of the result.
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
// append external filters.
|
||||
tfs = append(tfs, ec.EnforcedTagFilters...)
|
||||
tfs := searchutils.ToTagFilters(me.LabelFilters)
|
||||
tfss := searchutils.JoinTagFilterss([][]storage.TagFilter{tfs}, ec.EnforcedTagFilterss)
|
||||
minTimestamp := start - maxSilenceInterval
|
||||
if window > ec.Step {
|
||||
minTimestamp -= window
|
||||
} else {
|
||||
minTimestamp -= ec.Step
|
||||
}
|
||||
sq := storage.NewSearchQuery(minTimestamp, ec.End, [][]storage.TagFilter{tfs})
|
||||
sq := storage.NewSearchQuery(minTimestamp, ec.End, tfss)
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, ec.Deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -877,26 +876,6 @@ func mulNoOverflow(a, b int64) int64 {
|
|||
return a * b
|
||||
}
|
||||
|
||||
func toTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
||||
tfs := make([]storage.TagFilter, len(lfs))
|
||||
for i := range lfs {
|
||||
toTagFilter(&tfs[i], &lfs[i])
|
||||
}
|
||||
return tfs
|
||||
}
|
||||
|
||||
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
||||
if src.Label != "__name__" {
|
||||
dst.Key = []byte(src.Label)
|
||||
} else {
|
||||
// This is required for storage.Search.
|
||||
dst.Key = nil
|
||||
}
|
||||
dst.Value = []byte(src.Value)
|
||||
dst.IsRegexp = src.IsRegexp
|
||||
dst.IsNegative = src.IsNegative
|
||||
}
|
||||
|
||||
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
if *noStaleMarkers || funcName == "default_rollup" {
|
||||
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
|
||||
|
|
|
@ -2055,6 +2055,24 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r1, r2, r3}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`limit_offset`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `limit_offset(1, 1, sort_by_label((
|
||||
label_set(time()*1, "foo", "y"),
|
||||
label_set(time()*2, "foo", "a"),
|
||||
label_set(time()*3, "foo", "x"),
|
||||
), "foo"))`
|
||||
r := netstorage.Result{
|
||||
Values: []float64{3000, 3600, 4200, 4800, 5400, 6000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("x"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`sum(label_graphite_group)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(sum by (__name__) (
|
||||
|
@ -5161,21 +5179,6 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r1}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`limit_offset()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `limit_offset(1, 0, (label_set(10, "foo", "bar"), label_set(time()/150, "xbaz", "sss")))`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{10, 10, 10, 10, 10, 10},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r1}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`limitk(10)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(limitk(10, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss")))`
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
|
@ -43,21 +40,3 @@ func IsMetricSelectorWithRollup(s string) (childQuery string, window, offset *me
|
|||
wrappedQuery := me.AppendString(nil)
|
||||
return string(wrappedQuery), re.Window, re.Offset
|
||||
}
|
||||
|
||||
// ParseMetricSelector parses s containing PromQL metric selector
|
||||
// and returns the corresponding LabelFilters.
|
||||
func ParseMetricSelector(s string) ([]storage.TagFilter, error) {
|
||||
expr, err := parsePromQLWithCache(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
me, ok := expr.(*metricsql.MetricExpr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
||||
}
|
||||
if len(me.LabelFilters) == 0 {
|
||||
return nil, fmt.Errorf("labelFilters cannot be empty")
|
||||
}
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
return tfs, nil
|
||||
}
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseMetricSelectorSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||
}
|
||||
if tfs == nil {
|
||||
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("foo")
|
||||
f(":foo")
|
||||
f(" :fo:bar.baz")
|
||||
f(`a{}`)
|
||||
f(`{foo="bar"}`)
|
||||
f(`{:f:oo=~"bar.+"}`)
|
||||
f(`foo {bar != "baz"}`)
|
||||
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
||||
f(`(foo)`)
|
||||
f(`\п\р\и\в\е\т{\ы="111"}`)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
if tfs != nil {
|
||||
t.Fatalf("expecting nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f(`{}`)
|
||||
f(`foo bar`)
|
||||
f(`foo+bar`)
|
||||
f(`sum(bar)`)
|
||||
f(`x{y}`)
|
||||
f(`x{y+z}`)
|
||||
f(`foo[5m]`)
|
||||
f(`foo offset 5m`)
|
||||
}
|
|
@ -194,7 +194,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
|||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
if len(metainfoBuf) == 0 {
|
||||
return nil, ec.Start
|
||||
|
@ -214,7 +214,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
|||
if len(compressedResultBuf.B) == 0 {
|
||||
mi.RemoveKey(key)
|
||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
rrc.c.Set(bb.B, metainfoBuf)
|
||||
return nil, ec.Start
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window in
|
|||
bb.B = key.Marshal(bb.B[:0])
|
||||
rrc.c.SetBig(bb.B, compressedResultBuf.B)
|
||||
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
var mi rollupResultCacheMetainfo
|
||||
if len(metainfoBuf) > 0 {
|
||||
|
@ -347,14 +347,19 @@ var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
|
|||
// Increment this value every time the format of the cache changes.
|
||||
const rollupResultCacheVersion = 8
|
||||
|
||||
func marshalRollupResultCacheKey(dst []byte, expr metricsql.Expr, window, step int64, filters []storage.TagFilter) []byte {
|
||||
func marshalRollupResultCacheKey(dst []byte, expr metricsql.Expr, window, step int64, etfs [][]storage.TagFilter) []byte {
|
||||
dst = append(dst, rollupResultCacheVersion)
|
||||
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix)
|
||||
dst = encoding.MarshalInt64(dst, window)
|
||||
dst = encoding.MarshalInt64(dst, step)
|
||||
dst = expr.AppendString(dst)
|
||||
for _, f := range filters {
|
||||
dst = f.Marshal(dst)
|
||||
for i, etf := range etfs {
|
||||
for _, f := range etf {
|
||||
dst = f.Marshal(dst)
|
||||
}
|
||||
if i+1 < len(etfs) {
|
||||
dst = append(dst, '|')
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
@ -69,6 +70,7 @@ var transformFuncs = map[string]transformFunc{
|
|||
"label_transform": transformLabelTransform,
|
||||
"label_uppercase": transformLabelUppercase,
|
||||
"label_value": transformLabelValue,
|
||||
"limit_offset": transformLimitOffset,
|
||||
"ln": newTransformFuncOneArg(transformLn),
|
||||
"log2": newTransformFuncOneArg(transformLog2),
|
||||
"log10": newTransformFuncOneArg(transformLog10),
|
||||
|
@ -218,7 +220,7 @@ func getAbsentTimeseries(ec *EvalConfig, arg metricsql.Expr) []*timeseries {
|
|||
if !ok {
|
||||
return rvs
|
||||
}
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
tfs := searchutils.ToTagFilters(me.LabelFilters)
|
||||
for i := range tfs {
|
||||
tf := &tfs[i]
|
||||
if len(tf.Key) == 0 {
|
||||
|
@ -1770,6 +1772,29 @@ func transformLabelGraphiteGroup(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
|
||||
var dotSeparator = []byte(".")
|
||||
|
||||
func transformLimitOffset(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 3); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limit, err := getIntNumber(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain limit arg: %w", err)
|
||||
}
|
||||
offset, err := getIntNumber(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain offset arg: %w", err)
|
||||
}
|
||||
rvs := args[2]
|
||||
if len(rvs) >= offset {
|
||||
rvs = rvs[offset:]
|
||||
}
|
||||
if len(rvs) > limit {
|
||||
rvs = rvs[:limit]
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func transformLn(v float64) float64 {
|
||||
return math.Log(v)
|
||||
}
|
||||
|
|
|
@ -9,9 +9,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
|
@ -198,15 +197,17 @@ func (d *Deadline) String() string {
|
|||
return fmt.Sprintf("%.3f seconds (elapsed %.3f seconds); the timeout can be adjusted with `%s` command-line flag", d.timeout.Seconds(), elapsed.Seconds(), d.flagHint)
|
||||
}
|
||||
|
||||
// GetEnforcedTagFiltersFromRequest returns additional filters from request.
|
||||
func GetEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, error) {
|
||||
// fast path.
|
||||
extraLabels := r.Form["extra_label"]
|
||||
if len(extraLabels) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
tagFilters := make([]storage.TagFilter, 0, len(extraLabels))
|
||||
for _, match := range extraLabels {
|
||||
// GetExtraTagFilters returns additional label filters from request.
|
||||
//
|
||||
// Label filters can be present in extra_label and extra_filters[] query args.
|
||||
// They are combined. For example, the following query args:
|
||||
// extra_label=t1=v1&extra_label=t2=v2&extra_filters[]={env="prod",team="devops"}&extra_filters={env=~"dev|staging",team!="devops"}
|
||||
// should be translated to the following filters joined with "or":
|
||||
// {env="prod",team="devops",t1="v1",t2="v2"}
|
||||
// {env=~"dev|staging",team!="devops",t1="v1",t2="v2"}
|
||||
func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
|
||||
var tagFilters []storage.TagFilter
|
||||
for _, match := range r.Form["extra_label"] {
|
||||
tmp := strings.SplitN(match, "=", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
|
||||
|
@ -216,5 +217,79 @@ func GetEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, err
|
|||
Value: []byte(tmp[1]),
|
||||
})
|
||||
}
|
||||
return tagFilters, nil
|
||||
extraFilters := r.Form["extra_filters"]
|
||||
extraFilters = append(extraFilters, r.Form["extra_filters[]"]...)
|
||||
if len(extraFilters) == 0 {
|
||||
if len(tagFilters) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return [][]storage.TagFilter{tagFilters}, nil
|
||||
}
|
||||
var etfs [][]storage.TagFilter
|
||||
for _, extraFilter := range extraFilters {
|
||||
tfs, err := ParseMetricSelector(extraFilter)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse extra_filters=%s: %w", extraFilter, err)
|
||||
}
|
||||
tfs = append(tfs, tagFilters...)
|
||||
etfs = append(etfs, tfs)
|
||||
}
|
||||
return etfs, nil
|
||||
}
|
||||
|
||||
// JoinTagFilterss adds etfs to every src filter and returns the result.
|
||||
func JoinTagFilterss(src, etfs [][]storage.TagFilter) [][]storage.TagFilter {
|
||||
if len(src) == 0 {
|
||||
return etfs
|
||||
}
|
||||
if len(etfs) == 0 {
|
||||
return src
|
||||
}
|
||||
var dst [][]storage.TagFilter
|
||||
for _, tf := range src {
|
||||
for _, etf := range etfs {
|
||||
tfs := append([]storage.TagFilter{}, tf...)
|
||||
tfs = append(tfs, etf...)
|
||||
dst = append(dst, tfs)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// ParseMetricSelector parses s containing PromQL metric selector and returns the corresponding LabelFilters.
|
||||
func ParseMetricSelector(s string) ([]storage.TagFilter, error) {
|
||||
expr, err := metricsql.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
me, ok := expr.(*metricsql.MetricExpr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
||||
}
|
||||
if len(me.LabelFilters) == 0 {
|
||||
return nil, fmt.Errorf("labelFilters cannot be empty")
|
||||
}
|
||||
tfs := ToTagFilters(me.LabelFilters)
|
||||
return tfs, nil
|
||||
}
|
||||
|
||||
// ToTagFilters converts lfs to a slice of storage.TagFilter
|
||||
func ToTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
||||
tfs := make([]storage.TagFilter, len(lfs))
|
||||
for i := range lfs {
|
||||
toTagFilter(&tfs[i], &lfs[i])
|
||||
}
|
||||
return tfs
|
||||
}
|
||||
|
||||
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
||||
if src.Label != "__name__" {
|
||||
dst.Key = []byte(src.Label)
|
||||
} else {
|
||||
// This is required for storage.Search.
|
||||
dst.Key = nil
|
||||
}
|
||||
dst.Value = []byte(src.Value)
|
||||
dst.IsRegexp = src.IsRegexp
|
||||
dst.IsNegative = src.IsNegative
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
@ -80,47 +81,238 @@ func TestGetTimeError(t *testing.T) {
|
|||
f("292277025-08-18T07:12:54.999999998Z")
|
||||
}
|
||||
|
||||
// helper for tests
|
||||
func tfFromKV(k, v string) storage.TagFilter {
|
||||
return storage.TagFilter{
|
||||
Key: []byte(k),
|
||||
Value: []byte(v),
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEnforcedTagFiltersFromRequest(t *testing.T) {
|
||||
httpReqWithForm := func(tfs []string) *http.Request {
|
||||
func TestGetExtraTagFilters(t *testing.T) {
|
||||
httpReqWithForm := func(qs string) *http.Request {
|
||||
q, err := url.ParseQuery(qs)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
return &http.Request{
|
||||
Form: map[string][]string{
|
||||
"extra_label": tfs,
|
||||
},
|
||||
Form: q,
|
||||
}
|
||||
}
|
||||
f := func(t *testing.T, r *http.Request, want []storage.TagFilter, wantErr bool) {
|
||||
f := func(t *testing.T, r *http.Request, want []string, wantErr bool) {
|
||||
t.Helper()
|
||||
got, err := GetEnforcedTagFiltersFromRequest(r)
|
||||
result, err := GetExtraTagFilters(r)
|
||||
if (err != nil) != wantErr {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
got := tagFilterssToStrings(result)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for getEnforcedTagFiltersFromRequest, \ngot: %v,\n want: %v", want, got)
|
||||
t.Fatalf("unxpected result for GetExtraTagFilters\ngot: %s\nwant: %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
f(t, httpReqWithForm([]string{"label=value"}),
|
||||
[]storage.TagFilter{
|
||||
tfFromKV("label", "value"),
|
||||
},
|
||||
false)
|
||||
|
||||
f(t, httpReqWithForm([]string{"job=vmagent", "dc=gce"}),
|
||||
[]storage.TagFilter{tfFromKV("job", "vmagent"), tfFromKV("dc", "gce")},
|
||||
f(t, httpReqWithForm("extra_label=label=value"),
|
||||
[]string{`{label="value"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm([]string{"bad_filter"}),
|
||||
f(t, httpReqWithForm("extra_label=job=vmagent&extra_label=dc=gce"),
|
||||
[]string{`{job="vmagent",dc="gce"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters={foo="bar"}`),
|
||||
[]string{`{foo="bar"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters={foo="bar"}&extra_filters[]={baz!~"aa",x=~"y"}`),
|
||||
[]string{
|
||||
`{foo="bar"}`,
|
||||
`{baz!~"aa",x=~"y"}`,
|
||||
},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_label=job=vmagent&extra_label=dc=gce&extra_filters={foo="bar"}`),
|
||||
[]string{`{foo="bar",job="vmagent",dc="gce"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_label=job=vmagent&extra_label=dc=gce&extra_filters[]={foo="bar"}&extra_filters[]={x=~"y|z",a="b"}`),
|
||||
[]string{
|
||||
`{foo="bar",job="vmagent",dc="gce"}`,
|
||||
`{x=~"y|z",a="b",job="vmagent",dc="gce"}`,
|
||||
},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm("extra_label=bad_filter"),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, &http.Request{},
|
||||
nil, false)
|
||||
f(t, httpReqWithForm(`extra_filters={bad_filter}`),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters[]={bad_filter}`),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, httpReqWithForm(""),
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||
}
|
||||
if tfs == nil {
|
||||
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("foo")
|
||||
f(":foo")
|
||||
f(" :fo:bar.baz")
|
||||
f(`a{}`)
|
||||
f(`{foo="bar"}`)
|
||||
f(`{:f:oo=~"bar.+"}`)
|
||||
f(`foo {bar != "baz"}`)
|
||||
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
||||
f(`(foo)`)
|
||||
f(`\п\р\и\в\е\т{\ы="111"}`)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
if tfs != nil {
|
||||
t.Fatalf("expecting nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f(`{}`)
|
||||
f(`foo bar`)
|
||||
f(`foo+bar`)
|
||||
f(`sum(bar)`)
|
||||
f(`x{y}`)
|
||||
f(`x{y+z}`)
|
||||
f(`foo[5m]`)
|
||||
f(`foo offset 5m`)
|
||||
}
|
||||
|
||||
func TestJoinTagFilterss(t *testing.T) {
|
||||
f := func(t *testing.T, src, etfs [][]storage.TagFilter, want []string) {
|
||||
t.Helper()
|
||||
result := JoinTagFilterss(src, etfs)
|
||||
got := tagFilterssToStrings(result)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for JoinTagFilterss\ngot: %s\nwant: %v", got, want)
|
||||
}
|
||||
}
|
||||
// Single tag filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, nil, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
})
|
||||
// Miltiple tag filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, nil, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
`{k5=~"v5"}`,
|
||||
})
|
||||
// Single extra filter
|
||||
f(t, nil, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
})
|
||||
// Multiple extra filters
|
||||
f(t, nil, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
`{k5=~"v5"}`,
|
||||
})
|
||||
// Single tag filter and a single extra filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k5=~"v5"}`,
|
||||
})
|
||||
// Multiple tag filters and a single extra filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
`{k5=~"v5",k6=~"v6"}`,
|
||||
})
|
||||
// Single tag filter and multiple extra filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k5=~"v5"}`,
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
})
|
||||
// Multiple tag filters and multiple extra filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
mustParseMetricSelector(`{k7=~"v7"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k7=~"v7"}`,
|
||||
`{k5=~"v5",k6=~"v6"}`,
|
||||
`{k5=~"v5",k7=~"v7"}`,
|
||||
})
|
||||
}
|
||||
|
||||
func mustParseMetricSelector(s string) []storage.TagFilter {
|
||||
tf, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot parse %q: %w", s, err))
|
||||
}
|
||||
return tf
|
||||
}
|
||||
|
||||
func tagFilterssToStrings(tfss [][]storage.TagFilter) []string {
|
||||
var a []string
|
||||
for _, tfs := range tfss {
|
||||
a = append(a, tagFiltersToString(tfs))
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func tagFiltersToString(tfs []storage.TagFilter) string {
|
||||
b := []byte("{")
|
||||
for i, tf := range tfs {
|
||||
b = append(b, tf.Key...)
|
||||
if tf.IsNegative {
|
||||
if tf.IsRegexp {
|
||||
b = append(b, "!~"...)
|
||||
} else {
|
||||
b = append(b, "!="...)
|
||||
}
|
||||
} else {
|
||||
if tf.IsRegexp {
|
||||
b = append(b, "=~"...)
|
||||
} else {
|
||||
b = append(b, "="...)
|
||||
}
|
||||
}
|
||||
b = strconv.AppendQuote(b, string(tf.Value))
|
||||
if i+1 < len(tfs) {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, '}')
|
||||
return string(b)
|
||||
}
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.674f8c98.chunk.css",
|
||||
"main.js": "./static/js/main.f4cab8bc.chunk.js",
|
||||
"runtime-main.js": "./static/js/runtime-main.f698388d.js",
|
||||
"main.css": "./static/css/main.83d9ae2d.chunk.css",
|
||||
"main.js": "./static/js/main.6651c49c.chunk.js",
|
||||
"runtime-main.js": "./static/js/runtime-main.c4b656b8.js",
|
||||
"static/css/2.77671664.chunk.css": "./static/css/2.77671664.chunk.css",
|
||||
"static/js/2.bfcf9c30.chunk.js": "./static/js/2.bfcf9c30.chunk.js",
|
||||
"static/js/3.e51afffb.chunk.js": "./static/js/3.e51afffb.chunk.js",
|
||||
"static/js/2.ef1db8c8.chunk.js": "./static/js/2.ef1db8c8.chunk.js",
|
||||
"static/js/3.65648506.chunk.js": "./static/js/3.65648506.chunk.js",
|
||||
"index.html": "./index.html",
|
||||
"static/js/2.bfcf9c30.chunk.js.LICENSE.txt": "./static/js/2.bfcf9c30.chunk.js.LICENSE.txt"
|
||||
"static/js/2.ef1db8c8.chunk.js.LICENSE.txt": "./static/js/2.ef1db8c8.chunk.js.LICENSE.txt"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/js/runtime-main.f698388d.js",
|
||||
"static/js/runtime-main.c4b656b8.js",
|
||||
"static/css/2.77671664.chunk.css",
|
||||
"static/js/2.bfcf9c30.chunk.js",
|
||||
"static/css/main.674f8c98.chunk.css",
|
||||
"static/js/main.f4cab8bc.chunk.js"
|
||||
"static/js/2.ef1db8c8.chunk.js",
|
||||
"static/css/main.83d9ae2d.chunk.css",
|
||||
"static/js/main.6651c49c.chunk.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.77671664.chunk.css" rel="stylesheet"><link href="./static/css/main.674f8c98.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],f=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(l&&l(r);p.length;)p.shift()();return u.push.apply(u,f||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"e51afffb"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(f);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var f=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var f=0;f<a.length;f++)r(a[f]);var l=c;t()}([])</script><script src="./static/js/2.bfcf9c30.chunk.js"></script><script src="./static/js/main.f4cab8bc.chunk.js"></script></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.77671664.chunk.css" rel="stylesheet"><link href="./static/css/main.83d9ae2d.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"65648506"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.ef1db8c8.chunk.js"></script><script src="./static/js/main.6651c49c.chunk.js"></script></body></html>
|
|
@ -1 +0,0 @@
|
|||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:10px 0!important}.cm-activeLine{background-color:inherit!important}.cm-editor{border-radius:4px;border:1px solid #b9b9b9;font-size:10px}.one-line-scroll .cm-editor{height:24px}.cm-gutters{border-radius:4px 0 0 4px;height:100%}.multi-line-scroll .cm-content,.multi-line-scroll .cm-gutters{min-height:64px!important}.one-line-scroll .cm-content,.one-line-scroll .cm-gutters{min-height:auto}.u-tooltip{position:absolute;display:none;grid-gap:12px;max-width:300px;padding:8px;border-radius:4px;background:rgba(57,57,57,.9);color:#fff;font-size:10px;line-height:1.4em;font-weight:500;word-wrap:break-word;font-family:monospace;pointer-events:none;z-index:100}.u-tooltip-data{display:flex;flex-wrap:wrap;align-items:center;font-size:11px;line-height:150%}.u-tooltip-data__value{padding:4px;font-weight:700}.u-tooltip__info{display:grid;grid-gap:4px}.u-tooltip__marker{width:12px;height:12px;margin-right:4px}.legendWrapper{margin-top:20px}.legendItem{display:inline-grid;grid-template-columns:auto auto;grid-gap:4px;align-items:center;justify-content:start;padding:5px 10px;background-color:#fff;cursor:pointer;transition:.2s ease}.legendItemHide{text-decoration:line-through;opacity:.5}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{width:12px;height:12px;border-width:2px;border-style:solid;box-sizing:border-box;transition:.2s ease}.legendLabel{font-size:12px;font-weight:600}
|
1
app/vmselect/vmui/static/css/main.83d9ae2d.chunk.css
Normal file
1
app/vmselect/vmui/static/css/main.83d9ae2d.chunk.css
Normal file
|
@ -0,0 +1 @@
|
|||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:0!important}.cm-activeLine{background-color:inherit!important}.cm-editor{border:none;border-radius:4px;font-size:10px}.cm-gutters{border-radius:4px 0 0 4px;height:100%;overflow:hidden;border:none!important}.cm-activeLineGutter,.cm-gutters{background-color:#fff!important}.query-editor .cm-scroller{align-items:center!important}.query-editor .cm-editor.cm-focused{outline:none}.query-editor-container{position:relative;padding:12px;border:1px solid #b9b9b9;border-radius:4px}.query-editor-container_focus{border:1px solid #3f51b5}.query-editor-container_error{border-color:#ff4141}.query-editor-container-one-line .query-editor .cm-editor{height:22px}.query-editor-container-one-line{padding:6px}.query-editor-label{font-weight:400;font-size:12px;line-height:1;letter-spacing:normal;color:rgba(0,0,0,.6);padding:0 5px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;max-width:calc(133% - 24px);position:absolute;left:4px;top:-.71875em;z-index:1;background-color:#fff;-webkit-transform:scale(.75);transform:scale(.75)}.query-editor-container_error .query-editor-label{color:#ff4141}.u-tooltip{position:absolute;display:none;grid-gap:12px;max-width:300px;padding:8px;border-radius:4px;background:rgba(57,57,57,.9);color:#fff;font-size:10px;line-height:1.4em;font-weight:500;word-wrap:break-word;font-family:monospace;pointer-events:none;z-index:100}.u-tooltip-data{display:flex;flex-wrap:wrap;align-items:center;font-size:11px;line-height:150%}.u-tooltip-data__value{padding:4px;font-weight:700}.u-tooltip__info{display:grid;grid-gap:4px}.u-tooltip__marker{width:12px;height:12px;margin-right:4px}.legendWrapper{display:grid;grid-template-columns:repeat(auto-fit,minmax(400px,1fr));grid-gap:20px;margin-top:20px;cursor:default}.legendGroup{margin-bottom:24px}.legendGroupTitle{display:flex;align-items:center;padding:10px 0 5px;font-size:11px}.legendGroupLine{margin:0 10px}.legendItem{display:inline-grid;grid-template-columns:auto auto;grid-gap:6px;align-items:start;justify-content:start;padding:5px 10px;background-color:#fff;cursor:pointer;transition:.2s ease}.legendItemHide{text-decoration:line-through;opacity:.5}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{width:12px;height:12px;border-width:2px;border-style:solid;box-sizing:border-box;transition:.2s ease;margin:3px 0}.legendLabel{font-size:11px;font-weight:400}
|
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/2.ef1db8c8.chunk.js
Normal file
2
app/vmselect/vmui/static/js/2.ef1db8c8.chunk.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -1 +1 @@
|
|||
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{351:function(e,t,n){"use strict";n.r(t),n.d(t,"getCLS",(function(){return y})),n.d(t,"getFCP",(function(){return g})),n.d(t,"getFID",(function(){return C})),n.d(t,"getLCP",(function(){return k})),n.d(t,"getTTFB",(function(){return D}));var i,r,a,o,u=function(e,t){return{name:e,value:void 0===t?-1:t,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},c=function(e,t){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var n=new PerformanceObserver((function(e){return e.getEntries().map(t)}));return n.observe({type:e,buffered:!0}),n}}catch(e){}},f=function(e,t){var n=function n(i){"pagehide"!==i.type&&"hidden"!==document.visibilityState||(e(i),t&&(removeEventListener("visibilitychange",n,!0),removeEventListener("pagehide",n,!0)))};addEventListener("visibilitychange",n,!0),addEventListener("pagehide",n,!0)},s=function(e){addEventListener("pageshow",(function(t){t.persisted&&e(t)}),!0)},m=function(e,t,n){var i;return function(r){t.value>=0&&(r||n)&&(t.delta=t.value-(i||0),(t.delta||void 0===i)&&(i=t.value,e(t)))}},v=-1,p=function(){return"hidden"===document.visibilityState?0:1/0},d=function(){f((function(e){var t=e.timeStamp;v=t}),!0)},l=function(){return v<0&&(v=p(),d(),s((function(){setTimeout((function(){v=p(),d()}),0)}))),{get firstHiddenTime(){return v}}},g=function(e,t){var n,i=l(),r=u("FCP"),a=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime<i.firstHiddenTime&&(r.value=e.startTime,r.entries.push(e),n(!0)))},o=window.performance&&performance.getEntriesByName&&performance.getEntriesByName("first-contentful-paint")[0],f=o?null:c("paint",a);(o||f)&&(n=m(e,r,t),o&&a(o),s((function(i){r=u("FCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,n(!0)}))}))})))},h=!1,T=-1,y=function(e,t){h||(g((function(e){T=e.value})),h=!0);var n,i=function(t){T>-1&&e(t)},r=u("CLS",0),a=0,o=[],v=function(e){if(!e.hadRecentInput){var t=o[0],i=o[o.length-1];a&&e.startTime-i.startTime<1e3&&e.startTime-t.startTime<5e3?(a+=e.value,o.push(e)):(a=e.value,o=[e]),a>r.value&&(r.value=a,r.entries=o,n())}},p=c("layout-shift",v);p&&(n=m(i,r,t),f((function(){p.takeRecords().map(v),n(!0)})),s((function(){a=0,T=-1,r=u("CLS",0),n=m(i,r,t)})))},E={passive:!0,capture:!0},w=new Date,L=function(e,t){i||(i=t,r=e,a=new Date,F(removeEventListener),S())},S=function(){if(r>=0&&r<a-w){var e={entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+r};o.forEach((function(t){t(e)})),o=[]}},b=function(e){if(e.cancelable){var t=(e.timeStamp>1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,t){var n=function(){L(e,t),r()},i=function(){r()},r=function(){removeEventListener("pointerup",n,E),removeEventListener("pointercancel",i,E)};addEventListener("pointerup",n,E),addEventListener("pointercancel",i,E)}(t,e):L(t,e)}},F=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(t){return e(t,b,E)}))},C=function(e,t){var n,a=l(),v=u("FID"),p=function(e){e.startTime<a.firstHiddenTime&&(v.value=e.processingStart-e.startTime,v.entries.push(e),n(!0))},d=c("first-input",p);n=m(e,v,t),d&&f((function(){d.takeRecords().map(p),d.disconnect()}),!0),d&&s((function(){var a;v=u("FID"),n=m(e,v,t),o=[],r=-1,i=null,F(addEventListener),a=p,o.push(a),S()}))},P={},k=function(e,t){var n,i=l(),r=u("LCP"),a=function(e){var t=e.startTime;t<i.firstHiddenTime&&(r.value=t,r.entries.push(e)),n()},o=c("largest-contentful-paint",a);if(o){n=m(e,r,t);var v=function(){P[r.id]||(o.takeRecords().map(a),o.disconnect(),P[r.id]=!0,n(!0))};["keydown","click"].forEach((function(e){addEventListener(e,v,{once:!0,capture:!0})})),f(v,!0),s((function(i){r=u("LCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,P[r.id]=!0,n(!0)}))}))}))}},D=function(e){var t,n=u("TTFB");t=function(){try{var t=performance.getEntriesByType("navigation")[0]||function(){var e=performance.timing,t={entryType:"navigation",startTime:0};for(var n in e)"navigationStart"!==n&&"toJSON"!==n&&(t[n]=Math.max(e[n]-e.navigationStart,0));return t}();if(n.value=n.delta=t.responseStart,n.value<0||n.value>performance.now())return;n.entries=[t],e(n)}catch(e){}},"complete"===document.readyState?setTimeout(t,0):addEventListener("pageshow",t)}}}]);
|
||||
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{356:function(e,t,n){"use strict";n.r(t),n.d(t,"getCLS",(function(){return y})),n.d(t,"getFCP",(function(){return g})),n.d(t,"getFID",(function(){return C})),n.d(t,"getLCP",(function(){return k})),n.d(t,"getTTFB",(function(){return D}));var i,r,a,o,u=function(e,t){return{name:e,value:void 0===t?-1:t,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},c=function(e,t){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var n=new PerformanceObserver((function(e){return e.getEntries().map(t)}));return n.observe({type:e,buffered:!0}),n}}catch(e){}},f=function(e,t){var n=function n(i){"pagehide"!==i.type&&"hidden"!==document.visibilityState||(e(i),t&&(removeEventListener("visibilitychange",n,!0),removeEventListener("pagehide",n,!0)))};addEventListener("visibilitychange",n,!0),addEventListener("pagehide",n,!0)},s=function(e){addEventListener("pageshow",(function(t){t.persisted&&e(t)}),!0)},m=function(e,t,n){var i;return function(r){t.value>=0&&(r||n)&&(t.delta=t.value-(i||0),(t.delta||void 0===i)&&(i=t.value,e(t)))}},v=-1,p=function(){return"hidden"===document.visibilityState?0:1/0},d=function(){f((function(e){var t=e.timeStamp;v=t}),!0)},l=function(){return v<0&&(v=p(),d(),s((function(){setTimeout((function(){v=p(),d()}),0)}))),{get firstHiddenTime(){return v}}},g=function(e,t){var n,i=l(),r=u("FCP"),a=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime<i.firstHiddenTime&&(r.value=e.startTime,r.entries.push(e),n(!0)))},o=window.performance&&performance.getEntriesByName&&performance.getEntriesByName("first-contentful-paint")[0],f=o?null:c("paint",a);(o||f)&&(n=m(e,r,t),o&&a(o),s((function(i){r=u("FCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,n(!0)}))}))})))},h=!1,T=-1,y=function(e,t){h||(g((function(e){T=e.value})),h=!0);var n,i=function(t){T>-1&&e(t)},r=u("CLS",0),a=0,o=[],v=function(e){if(!e.hadRecentInput){var t=o[0],i=o[o.length-1];a&&e.startTime-i.startTime<1e3&&e.startTime-t.startTime<5e3?(a+=e.value,o.push(e)):(a=e.value,o=[e]),a>r.value&&(r.value=a,r.entries=o,n())}},p=c("layout-shift",v);p&&(n=m(i,r,t),f((function(){p.takeRecords().map(v),n(!0)})),s((function(){a=0,T=-1,r=u("CLS",0),n=m(i,r,t)})))},E={passive:!0,capture:!0},w=new Date,L=function(e,t){i||(i=t,r=e,a=new Date,F(removeEventListener),S())},S=function(){if(r>=0&&r<a-w){var e={entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+r};o.forEach((function(t){t(e)})),o=[]}},b=function(e){if(e.cancelable){var t=(e.timeStamp>1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,t){var n=function(){L(e,t),r()},i=function(){r()},r=function(){removeEventListener("pointerup",n,E),removeEventListener("pointercancel",i,E)};addEventListener("pointerup",n,E),addEventListener("pointercancel",i,E)}(t,e):L(t,e)}},F=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(t){return e(t,b,E)}))},C=function(e,t){var n,a=l(),v=u("FID"),p=function(e){e.startTime<a.firstHiddenTime&&(v.value=e.processingStart-e.startTime,v.entries.push(e),n(!0))},d=c("first-input",p);n=m(e,v,t),d&&f((function(){d.takeRecords().map(p),d.disconnect()}),!0),d&&s((function(){var a;v=u("FID"),n=m(e,v,t),o=[],r=-1,i=null,F(addEventListener),a=p,o.push(a),S()}))},P={},k=function(e,t){var n,i=l(),r=u("LCP"),a=function(e){var t=e.startTime;t<i.firstHiddenTime&&(r.value=t,r.entries.push(e)),n()},o=c("largest-contentful-paint",a);if(o){n=m(e,r,t);var v=function(){P[r.id]||(o.takeRecords().map(a),o.disconnect(),P[r.id]=!0,n(!0))};["keydown","click"].forEach((function(e){addEventListener(e,v,{once:!0,capture:!0})})),f(v,!0),s((function(i){r=u("LCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,P[r.id]=!0,n(!0)}))}))}))}},D=function(e){var t,n=u("TTFB");t=function(){try{var t=performance.getEntriesByType("navigation")[0]||function(){var e=performance.timing,t={entryType:"navigation",startTime:0};for(var n in e)"navigationStart"!==n&&"toJSON"!==n&&(t[n]=Math.max(e[n]-e.navigationStart,0));return t}();if(n.value=n.delta=t.responseStart,n.value<0||n.value>performance.now())return;n.entries=[t],e(n)}catch(e){}},"complete"===document.readyState?setTimeout(t,0):addEventListener("pageshow",t)}}}]);
|
1
app/vmselect/vmui/static/js/main.6651c49c.chunk.js
Normal file
1
app/vmselect/vmui/static/js/main.6651c49c.chunk.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1 +1 @@
|
|||
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],f=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(l&&l(r);p.length;)p.shift()();return u.push.apply(u,f||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"e51afffb"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(f);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var f=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var f=0;f<a.length;f++)r(a[f]);var l=c;t()}([]);
|
||||
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"65648506"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);
|
|
@ -2,19 +2,6 @@
|
|||
|
||||
Web UI for VictoriaMetrics
|
||||
|
||||
Features:
|
||||
|
||||
- configurable Server URL
|
||||
- configurable time range - every variant have own resolution to show around 30 data points
|
||||
- query editor has basic highlighting and can be multi-line
|
||||
- chart is responsive by width
|
||||
- color assignment for series is automatic
|
||||
- legend with reduced naming
|
||||
- tooltips for closest data point
|
||||
- auto-refresh mode with several time interval presets
|
||||
- table and raw JSON Query viewer
|
||||
|
||||
|
||||
## Docker image build
|
||||
|
||||
Run the following command from the root of VictoriaMetrics repository in order to build `victoriametrics/vmui` Docker image:
|
||||
|
@ -65,4 +52,4 @@ Then run the built binary with the following command:
|
|||
bin/victoria-metrics -selfScrapeInterval=5s
|
||||
```
|
||||
|
||||
Then navigate to `http://localhost:8428/vmui/`
|
||||
Then navigate to `http://localhost:8428/vmui/`. See [these docs](https://docs.victoriametrics.com/#vmui) for more details.
|
||||
|
|
1053
app/vmui/packages/vmui/package-lock.json
generated
1053
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -6,25 +6,25 @@
|
|||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^0.19.9",
|
||||
"@codemirror/basic-setup": "^0.19.0",
|
||||
"@codemirror/commands": "^0.19.5",
|
||||
"@codemirror/commands": "^0.19.6",
|
||||
"@codemirror/highlight": "^0.19.6",
|
||||
"@codemirror/state": "^0.19.6",
|
||||
"@codemirror/view": "^0.19.21",
|
||||
"@codemirror/view": "^0.19.29",
|
||||
"@date-io/dayjs": "^2.11.0",
|
||||
"@emotion/react": "^11.7.0",
|
||||
"@emotion/react": "^11.7.1",
|
||||
"@emotion/styled": "^11.6.0",
|
||||
"@mui/icons-material": "^5.2.0",
|
||||
"@mui/lab": "^5.0.0-alpha.58",
|
||||
"@mui/material": "^5.2.2",
|
||||
"@mui/styles": "^5.2.2",
|
||||
"@testing-library/jest-dom": "^5.15.1",
|
||||
"@mui/icons-material": "^5.2.1",
|
||||
"@mui/lab": "^5.0.0-alpha.59",
|
||||
"@mui/material": "^5.2.3",
|
||||
"@mui/styles": "^5.2.3",
|
||||
"@testing-library/jest-dom": "^5.16.1",
|
||||
"@testing-library/react": "^12.1.2",
|
||||
"@testing-library/user-event": "^13.5.0",
|
||||
"@types/jest": "^27.0.3",
|
||||
"@types/lodash.debounce": "^4.0.6",
|
||||
"@types/lodash.get": "^4.4.6",
|
||||
"@types/lodash.throttle": "^4.1.6",
|
||||
"@types/node": "^16.11.10",
|
||||
"@types/node": "^16.11.12",
|
||||
"@types/numeral": "^2.0.2",
|
||||
"@types/qs": "^6.9.7",
|
||||
"@types/react": "^17.0.37",
|
||||
|
@ -36,12 +36,13 @@
|
|||
"lodash.get": "^4.4.2",
|
||||
"lodash.throttle": "^4.1.1",
|
||||
"numeral": "^2.0.6",
|
||||
"qs": "^6.10.1",
|
||||
"qs": "^6.10.2",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
"react-draggable": "^4.4.4",
|
||||
"react-measure": "^2.5.2",
|
||||
"react-scripts": "4.0.3",
|
||||
"typescript": "~4.5.2",
|
||||
"typescript": "~4.5.3",
|
||||
"uplot": "^1.6.17",
|
||||
"web-vitals": "^2.1.2"
|
||||
},
|
||||
|
@ -73,8 +74,8 @@
|
|||
},
|
||||
"devDependencies": {
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.4.0",
|
||||
"@typescript-eslint/parser": "^5.4.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.6.0",
|
||||
"@typescript-eslint/parser": "^5.6.0",
|
||||
"customize-cra": "^1.0.0",
|
||||
"eslint-plugin-react": "^7.27.1",
|
||||
"react-app-rewired": "^2.1.8"
|
||||
|
|
|
@ -4,44 +4,15 @@ import HomeLayout from "./components/Home/HomeLayout";
|
|||
import {StateProvider} from "./state/common/StateContext";
|
||||
import {AuthStateProvider} from "./state/auth/AuthStateContext";
|
||||
import {GraphStateProvider} from "./state/graph/GraphStateContext";
|
||||
import { ThemeProvider, Theme, StyledEngineProvider, createTheme } from "@mui/material/styles";
|
||||
|
||||
import { ThemeProvider, StyledEngineProvider } from "@mui/material/styles";
|
||||
import THEME from "./theme/theme";
|
||||
import CssBaseline from "@mui/material/CssBaseline";
|
||||
|
||||
import LocalizationProvider from "@mui/lab/LocalizationProvider";
|
||||
// pick a date util library
|
||||
import DayjsUtils from "@date-io/dayjs";
|
||||
|
||||
|
||||
declare module "@mui/styles/defaultTheme" {
|
||||
// eslint-disable-next-line @typescript-eslint/no-empty-interface
|
||||
interface DefaultTheme extends Theme {}
|
||||
}
|
||||
|
||||
|
||||
const App: FC = () => {
|
||||
|
||||
const THEME = createTheme({
|
||||
palette: {
|
||||
primary: {
|
||||
main: "#3F51B5"
|
||||
},
|
||||
secondary: {
|
||||
main: "#F50057"
|
||||
}
|
||||
},
|
||||
components: {
|
||||
MuiSwitch: {
|
||||
defaultProps: {
|
||||
color: "secondary"
|
||||
}
|
||||
}
|
||||
},
|
||||
typography: {
|
||||
"fontSize": 10
|
||||
}
|
||||
});
|
||||
|
||||
return <>
|
||||
<CssBaseline /> {/* CSS Baseline: kind of normalize.css made by materialUI team - can be scoped */}
|
||||
<LocalizationProvider dateAdapter={DayjsUtils}> {/* Allows datepicker to work with DayJS */}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
export interface MetricBase {
|
||||
group: number;
|
||||
metric: {
|
||||
[key: string]: string;
|
||||
};
|
||||
|
|
|
@ -26,8 +26,8 @@ import TabPanel from "./AuthTabPanel";
|
|||
import PersonIcon from "@mui/icons-material/Person";
|
||||
import LockIcon from "@mui/icons-material/Lock";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
import {useAuthDispatch, useAuthState} from "../../../state/auth/AuthStateContext";
|
||||
import {AUTH_METHOD, WithCheckbox} from "../../../state/auth/reducer";
|
||||
import {useAuthDispatch, useAuthState} from "../../../../state/auth/AuthStateContext";
|
||||
import {AUTH_METHOD, WithCheckbox} from "../../../../state/auth/reducer";
|
||||
|
||||
// TODO: make generic when creating second dialog
|
||||
export interface DialogProps {
|
|
@ -0,0 +1,42 @@
|
|||
import React, {FC, useCallback, useMemo} from "react";
|
||||
import {Box, FormControlLabel, TextField} from "@mui/material";
|
||||
import {useGraphDispatch, useGraphState} from "../../../../state/graph/GraphStateContext";
|
||||
import debounce from "lodash.debounce";
|
||||
import BasicSwitch from "../../../../theme/switch";
|
||||
|
||||
const AxesLimitsConfigurator: FC = () => {
|
||||
|
||||
const { yaxis } = useGraphState();
|
||||
const graphDispatch = useGraphDispatch();
|
||||
const axes = useMemo(() => Object.keys(yaxis.limits.range), [yaxis.limits.range]);
|
||||
|
||||
const onChangeYaxisLimits = () => { graphDispatch({type: "TOGGLE_ENABLE_YAXIS_LIMITS"}); };
|
||||
|
||||
const onChangeLimit = (e: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement>, axis: string, index: number) => {
|
||||
const newLimits = yaxis.limits.range;
|
||||
newLimits[axis][index] = +e.target.value;
|
||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: newLimits});
|
||||
};
|
||||
const debouncedOnChangeLimit = useCallback(debounce(onChangeLimit, 500), [yaxis.limits.range]);
|
||||
|
||||
return <Box display="grid" alignItems="center" gap={2}>
|
||||
<FormControlLabel
|
||||
control={<BasicSwitch checked={yaxis.limits.enable} onChange={onChangeYaxisLimits}/>}
|
||||
label="Fix the limits for y-axis"
|
||||
/>
|
||||
<Box display="grid" alignItems="center" gap={2}>
|
||||
{axes.map(axis => <Box display="grid" gridTemplateColumns="120px 120px" gap={1} key={axis}>
|
||||
<TextField label={`Min ${axis}`} type="number" size="small" variant="outlined"
|
||||
disabled={!yaxis.limits.enable}
|
||||
defaultValue={yaxis.limits.range[axis][0]}
|
||||
onChange={(e) => debouncedOnChangeLimit(e, axis, 0)}/>
|
||||
<TextField label={`Max ${axis}`} type="number" size="small" variant="outlined"
|
||||
disabled={!yaxis.limits.enable}
|
||||
defaultValue={yaxis.limits.range[axis][1]}
|
||||
onChange={(e) => debouncedOnChangeLimit(e, axis, 1)} />
|
||||
</Box>)}
|
||||
</Box>
|
||||
</Box>;
|
||||
};
|
||||
|
||||
export default AxesLimitsConfigurator;
|
|
@ -0,0 +1,64 @@
|
|||
import SettingsIcon from "@mui/icons-material/Settings";
|
||||
import React, {FC, useState, useRef} from "react";
|
||||
import AxesLimitsConfigurator from "./AxesLimitsConfigurator";
|
||||
import {Box, Button, IconButton, Paper, Typography} from "@mui/material";
|
||||
import Draggable from "react-draggable";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
import CloseIcon from "@mui/icons-material/Close";
|
||||
|
||||
const useStyles = makeStyles({
|
||||
popover: {
|
||||
position: "absolute",
|
||||
display: "grid",
|
||||
gridGap: "16px",
|
||||
padding: "0 0 25px",
|
||||
zIndex: 2,
|
||||
},
|
||||
popoverHeader: {
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
justifyContent: "space-between",
|
||||
background: "#3F51B5",
|
||||
padding: "6px 6px 6px 12px",
|
||||
borderRadius: "4px 4px 0 0",
|
||||
color: "#FFF",
|
||||
cursor: "move",
|
||||
},
|
||||
popoverBody: {
|
||||
display: "grid",
|
||||
gridGap: "6px",
|
||||
padding: "0 14px",
|
||||
}
|
||||
});
|
||||
|
||||
const GraphSettings: FC = () => {
|
||||
const [open, setOpen] = useState(false);
|
||||
const draggableRef = useRef<HTMLDivElement>(null);
|
||||
const position = { x: 173, y: 0 };
|
||||
|
||||
const classes = useStyles();
|
||||
|
||||
return <Box display="flex" px={2}>
|
||||
<Button onClick={() => setOpen((old) => !old)} variant="outlined">
|
||||
<SettingsIcon sx={{fontSize: 16, marginRight: "4px"}}/>
|
||||
<span style={{lineHeight: 1, paddingTop: "1px"}}>{open ? "Hide" : "Show"} graph settings</span>
|
||||
</Button>
|
||||
{open && (
|
||||
<Draggable nodeRef={draggableRef} defaultPosition={position} handle="#handle">
|
||||
<Paper elevation={3} className={classes.popover} ref={draggableRef}>
|
||||
<div id="handle" className={classes.popoverHeader}>
|
||||
<Typography variant="body1"><b>Graph Settings</b></Typography>
|
||||
<IconButton size="small" onClick={() => setOpen(false)}>
|
||||
<CloseIcon style={{color: "white"}}/>
|
||||
</IconButton>
|
||||
</div>
|
||||
<Box className={classes.popoverBody}>
|
||||
<AxesLimitsConfigurator/>
|
||||
</Box>
|
||||
</Paper>
|
||||
</Draggable>
|
||||
)}
|
||||
</Box>;
|
||||
};
|
||||
|
||||
export default GraphSettings;
|
|
@ -0,0 +1,40 @@
|
|||
import React, {FC} from "react";
|
||||
import {Box, FormControlLabel} from "@mui/material";
|
||||
import {saveToStorage} from "../../../../utils/storage";
|
||||
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||
import BasicSwitch from "../../../../theme/switch";
|
||||
import StepConfigurator from "./StepConfigurator";
|
||||
|
||||
const AdditionalSettings: FC = () => {
|
||||
|
||||
const {queryControls: {autocomplete, nocache}} = useAppState();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const onChangeAutocomplete = () => {
|
||||
dispatch({type: "TOGGLE_AUTOCOMPLETE"});
|
||||
saveToStorage("AUTOCOMPLETE", !autocomplete);
|
||||
};
|
||||
|
||||
const onChangeCache = () => {
|
||||
dispatch({type: "NO_CACHE"});
|
||||
saveToStorage("NO_CACHE", !nocache);
|
||||
};
|
||||
|
||||
return <Box display="flex" alignItems="center">
|
||||
<Box>
|
||||
<FormControlLabel label="Enable autocomplete"
|
||||
control={<BasicSwitch checked={autocomplete} onChange={onChangeAutocomplete}/>}
|
||||
/>
|
||||
</Box>
|
||||
<Box ml={2}>
|
||||
<FormControlLabel label="Enable cache"
|
||||
control={<BasicSwitch checked={!nocache} onChange={onChangeCache}/>}
|
||||
/>
|
||||
</Box>
|
||||
<Box ml={2}>
|
||||
<StepConfigurator/>
|
||||
</Box>
|
||||
</Box>;
|
||||
};
|
||||
|
||||
export default AdditionalSettings;
|
|
@ -0,0 +1,137 @@
|
|||
import React, {FC, useEffect, useRef, useState} from "react";
|
||||
import {
|
||||
Accordion, AccordionDetails, AccordionSummary, Box, Grid, IconButton, Typography, Tooltip, Button
|
||||
} from "@mui/material";
|
||||
import QueryEditor from "./QueryEditor";
|
||||
import {TimeSelector} from "../Time/TimeSelector";
|
||||
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||
import ExpandMoreIcon from "@mui/icons-material/ExpandMore";
|
||||
import HighlightOffIcon from "@mui/icons-material/HighlightOff";
|
||||
import AddIcon from "@mui/icons-material/Add";
|
||||
import PlayCircleOutlineIcon from "@mui/icons-material/PlayCircleOutline";
|
||||
import Portal from "@mui/material/Portal";
|
||||
import ServerConfigurator from "./ServerConfigurator";
|
||||
import AdditionalSettings from "./AdditionalSettings";
|
||||
import {ErrorTypes} from "../../../../types";
|
||||
|
||||
export interface QueryConfiguratorProps {
|
||||
error?: ErrorTypes | string;
|
||||
}
|
||||
|
||||
const QueryConfigurator: FC<QueryConfiguratorProps> = ({error}) => {
|
||||
|
||||
const {serverUrl, query, queryHistory, time: {duration}, queryControls: {autocomplete}} = useAppState();
|
||||
const dispatch = useAppDispatch();
|
||||
const [expanded, setExpanded] = useState(true);
|
||||
const queryContainer = useRef<HTMLDivElement>(null);
|
||||
const queryRef = useRef(query);
|
||||
useEffect(() => {
|
||||
queryRef.current = query;
|
||||
}, [query]);
|
||||
|
||||
const onSetDuration = (dur: string) => dispatch({type: "SET_DURATION", payload: dur});
|
||||
|
||||
const updateHistory = () => {
|
||||
dispatch({
|
||||
type: "SET_QUERY_HISTORY", payload: query.map((q, i) => {
|
||||
const h = queryHistory[i] || {values: []};
|
||||
const queryEqual = q === h.values[h.values.length - 1];
|
||||
return {
|
||||
index: h.values.length - Number(queryEqual),
|
||||
values: !queryEqual && q ? [...h.values, q] : h.values
|
||||
};
|
||||
})
|
||||
});
|
||||
};
|
||||
|
||||
const onRunQuery = () => {
|
||||
updateHistory();
|
||||
dispatch({type: "SET_QUERY", payload: query});
|
||||
dispatch({type: "RUN_QUERY"});
|
||||
};
|
||||
|
||||
const onAddQuery = () => dispatch({type: "SET_QUERY", payload: [...queryRef.current, ""]});
|
||||
|
||||
const onRemoveQuery = (index: number) => {
|
||||
const newQuery = [...queryRef.current];
|
||||
newQuery.splice(index, 1);
|
||||
dispatch({type: "SET_QUERY", payload: newQuery});
|
||||
};
|
||||
|
||||
const onSetQuery = (value: string, index: number) => {
|
||||
const newQuery = [...queryRef.current];
|
||||
newQuery[index] = value;
|
||||
dispatch({type: "SET_QUERY", payload: newQuery});
|
||||
};
|
||||
|
||||
const setHistoryIndex = (step: number, indexQuery: number) => {
|
||||
const {index, values} = queryHistory[indexQuery];
|
||||
const newIndexHistory = index + step;
|
||||
if (newIndexHistory < 0 || newIndexHistory >= values.length) return;
|
||||
onSetQuery(values[newIndexHistory] || "", indexQuery);
|
||||
dispatch({
|
||||
type: "SET_QUERY_HISTORY_BY_INDEX",
|
||||
payload: {value: {values, index: newIndexHistory}, queryNumber: indexQuery}
|
||||
});
|
||||
};
|
||||
|
||||
return <>
|
||||
<Accordion expanded={expanded} onChange={() => setExpanded(prev => !prev)}>
|
||||
<AccordionSummary
|
||||
expandIcon={<IconButton><ExpandMoreIcon/></IconButton>}
|
||||
aria-controls="panel1a-content"
|
||||
id="panel1a-header"
|
||||
sx={{alignItems: "flex-start", padding: "15px"}}
|
||||
>
|
||||
<Box mr={2}>
|
||||
<Typography variant="h6" component="h2">Query Configuration</Typography>
|
||||
</Box>
|
||||
<Box flexGrow={1} onClick={e => e.stopPropagation()} onFocusCapture={e => e.stopPropagation()}>
|
||||
<Portal disablePortal={!expanded} container={queryContainer.current}>
|
||||
{query.map((q, i) =>
|
||||
<Box key={i} display="grid" gridTemplateColumns="1fr auto" gap="4px" width="100%"
|
||||
mb={i === query.length - 1 ? 0 : 2}>
|
||||
<QueryEditor server={serverUrl} query={query[i]} index={i} oneLiner={!expanded}
|
||||
autocomplete={autocomplete} queryHistory={queryHistory[i]} error={error}
|
||||
setHistoryIndex={setHistoryIndex} runQuery={onRunQuery}
|
||||
setQuery={onSetQuery}/>
|
||||
{i === 0 && <Tooltip title="Execute Query">
|
||||
<IconButton onClick={onRunQuery}>
|
||||
<PlayCircleOutlineIcon/>
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
{i > 0 && <Tooltip title="Remove Query">
|
||||
<IconButton onClick={() => onRemoveQuery(i)}>
|
||||
<HighlightOffIcon/>
|
||||
</IconButton>
|
||||
</Tooltip>}
|
||||
</Box>)}
|
||||
</Portal>
|
||||
</Box>
|
||||
</AccordionSummary>
|
||||
<AccordionDetails>
|
||||
<Grid container columnSpacing={2}>
|
||||
<Grid item xs={6} minWidth={400}>
|
||||
<ServerConfigurator error={error}/>
|
||||
{/* for portal QueryEditor */}
|
||||
<div ref={queryContainer}/>
|
||||
{query.length < 2 && <Box display="inline-block" minHeight="40px" mt={2}>
|
||||
<Button onClick={onAddQuery} variant="outlined">
|
||||
<AddIcon sx={{fontSize: 16, marginRight: "4px"}}/>
|
||||
<span style={{lineHeight: 1, paddingTop: "1px"}}>Query</span>
|
||||
</Button>
|
||||
</Box>}
|
||||
</Grid>
|
||||
<Grid item xs>
|
||||
<TimeSelector setDuration={onSetDuration} duration={duration}/>
|
||||
</Grid>
|
||||
<Grid item xs={12} pt={1}>
|
||||
<AdditionalSettings/>
|
||||
</Grid>
|
||||
</Grid>
|
||||
</AccordionDetails>
|
||||
</Accordion>
|
||||
</>;
|
||||
};
|
||||
|
||||
export default QueryConfigurator;
|
|
@ -2,28 +2,41 @@ import {EditorState} from "@codemirror/state";
|
|||
import {EditorView, keymap} from "@codemirror/view";
|
||||
import {defaultKeymap} from "@codemirror/commands";
|
||||
import React, {FC, useEffect, useRef, useState} from "react";
|
||||
import { PromQLExtension } from "codemirror-promql";
|
||||
import { basicSetup } from "@codemirror/basic-setup";
|
||||
import {QueryHistory} from "../../../state/common/reducer";
|
||||
import {PromQLExtension} from "codemirror-promql";
|
||||
import {basicSetup} from "@codemirror/basic-setup";
|
||||
import {QueryHistory} from "../../../../state/common/reducer";
|
||||
import {ErrorTypes} from "../../../../types";
|
||||
|
||||
export interface QueryEditorProps {
|
||||
setHistoryIndex: (step: number) => void;
|
||||
setQuery: (query: string) => void;
|
||||
runQuery: () => void;
|
||||
query: string;
|
||||
queryHistory: QueryHistory;
|
||||
server: string;
|
||||
oneLiner?: boolean;
|
||||
autocomplete: boolean
|
||||
setHistoryIndex: (step: number, index: number) => void;
|
||||
setQuery: (query: string, index: number) => void;
|
||||
runQuery: () => void;
|
||||
query: string;
|
||||
index: number;
|
||||
queryHistory: QueryHistory;
|
||||
server: string;
|
||||
oneLiner?: boolean;
|
||||
autocomplete: boolean;
|
||||
error?: ErrorTypes | string;
|
||||
}
|
||||
|
||||
const QueryEditor: FC<QueryEditorProps> = ({
|
||||
query, queryHistory, setHistoryIndex, setQuery, runQuery, server, oneLiner = false, autocomplete
|
||||
index,
|
||||
query,
|
||||
queryHistory,
|
||||
setHistoryIndex,
|
||||
setQuery,
|
||||
runQuery,
|
||||
server,
|
||||
oneLiner = false,
|
||||
autocomplete,
|
||||
error
|
||||
}) => {
|
||||
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
|
||||
const [editorView, setEditorView] = useState<EditorView>();
|
||||
const [focusEditor, setFocusEditor] = useState(false);
|
||||
|
||||
// init editor view on load
|
||||
useEffect(() => {
|
||||
|
@ -41,11 +54,14 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
useEffect(() => {
|
||||
const promQL = new PromQLExtension();
|
||||
promQL.activateCompletion(autocomplete);
|
||||
promQL.setComplete({ remote: { url: server } });
|
||||
promQL.setComplete({remote: {url: server}});
|
||||
|
||||
const listenerExtension = EditorView.updateListener.of(editorUpdate => {
|
||||
if (editorUpdate.focusChanged) {
|
||||
setFocusEditor(editorView?.hasFocus || false);
|
||||
}
|
||||
if (editorUpdate.docChanged) {
|
||||
setQuery(editorUpdate.state.doc.toJSON().map(el => el.trim()).join(""));
|
||||
setQuery(editorUpdate.state.doc.toJSON().map(el => el.trim()).join(""), index);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -66,18 +82,20 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
if (key === "Enter" && ctrlMetaKey) {
|
||||
runQuery();
|
||||
} else if (key === "ArrowUp" && ctrlMetaKey) {
|
||||
setHistoryIndex(-1);
|
||||
setHistoryIndex(-1, index);
|
||||
} else if (key === "ArrowDown" && ctrlMetaKey) {
|
||||
setHistoryIndex(1);
|
||||
setHistoryIndex(1, index);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
{/*Class one-line-scroll and other codemirror styles are declared in index.css*/}
|
||||
<div ref={ref} className={oneLiner ? "one-line-scroll" : "multi-line-scroll"} onKeyUp={onKeyUp}/>
|
||||
</>
|
||||
);
|
||||
return <div className={`query-editor-container
|
||||
${focusEditor ? "query-editor-container_focus" : ""}
|
||||
query-editor-container-${oneLiner ? "one-line" : "multi-line"}
|
||||
${error === ErrorTypes.validQuery ? "query-editor-container_error" : ""}`}>
|
||||
{/*Class one-line-scroll and other codemirror styles are declared in index.css*/}
|
||||
<label className="query-editor-label">Query</label>
|
||||
<div className="query-editor" ref={ref} onKeyUp={onKeyUp}/>
|
||||
</div>;
|
||||
};
|
||||
|
||||
export default QueryEditor;
|
|
@ -0,0 +1,40 @@
|
|||
import React, {FC, useState} from "react";
|
||||
import {Box, TextField, Tooltip, IconButton} from "@mui/material";
|
||||
import SecurityIcon from "@mui/icons-material/Security";
|
||||
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||
import {AuthDialog} from "../Auth/AuthDialog";
|
||||
import {ErrorTypes} from "../../../../types";
|
||||
|
||||
export interface ServerConfiguratorProps {
|
||||
error?: ErrorTypes | string;
|
||||
}
|
||||
|
||||
const ServerConfigurator: FC<ServerConfiguratorProps> = ({error}) => {
|
||||
|
||||
const {serverUrl} = useAppState();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const onSetServer = ({target: {value}}: {target: {value: string}}) => {
|
||||
dispatch({type: "SET_SERVER", payload: value});
|
||||
};
|
||||
const [dialogOpen, setDialogOpen] = useState(false);
|
||||
|
||||
return <>
|
||||
<Box display="grid" gridTemplateColumns="1fr auto" gap="4px" alignItems="center" width="100%" mb={2} minHeight={50}>
|
||||
<TextField variant="outlined" fullWidth label="Server URL" value={serverUrl}
|
||||
error={error === ErrorTypes.validServer || error === ErrorTypes.emptyServer}
|
||||
inputProps={{style: {fontFamily: "Monospace"}}}
|
||||
onChange={onSetServer}/>
|
||||
<Box>
|
||||
<Tooltip title="Request Auth Settings">
|
||||
<IconButton onClick={() => setDialogOpen(true)}>
|
||||
<SecurityIcon/>
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
</Box>
|
||||
</Box>
|
||||
<AuthDialog open={dialogOpen} onClose={() => setDialogOpen(false)}/>
|
||||
</>;
|
||||
};
|
||||
|
||||
export default ServerConfigurator;
|
|
@ -0,0 +1,54 @@
|
|||
import React, {FC, useCallback, useEffect, useState} from "react";
|
||||
import {Box, FormControlLabel, TextField} from "@mui/material";
|
||||
import BasicSwitch from "../../../../theme/switch";
|
||||
import {useGraphDispatch, useGraphState} from "../../../../state/graph/GraphStateContext";
|
||||
import {useAppState} from "../../../../state/common/StateContext";
|
||||
import debounce from "lodash.debounce";
|
||||
|
||||
const StepConfigurator: FC = () => {
|
||||
const {customStep} = useGraphState();
|
||||
const graphDispatch = useGraphDispatch();
|
||||
const [error, setError] = useState(false);
|
||||
const {time: {period: {step}, duration}} = useAppState();
|
||||
|
||||
const onChangeStep = (e: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||
const value = +e.target.value;
|
||||
if (value > 0) {
|
||||
graphDispatch({type: "SET_CUSTOM_STEP", payload: value});
|
||||
setError(false);
|
||||
} else {
|
||||
setError(true);
|
||||
}
|
||||
};
|
||||
|
||||
const debouncedOnChangeStep = useCallback(debounce(onChangeStep, 500), [customStep.value]);
|
||||
|
||||
const onChangeEnableStep = () => {
|
||||
setError(false);
|
||||
graphDispatch({type: "TOGGLE_CUSTOM_STEP"});
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (customStep.enable) onChangeEnableStep();
|
||||
}, [duration]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!customStep.enable) graphDispatch({type: "SET_CUSTOM_STEP", payload: step || 1});
|
||||
}, [step]);
|
||||
|
||||
return <Box display="grid" gridTemplateColumns="auto 120px" alignItems="center">
|
||||
<FormControlLabel
|
||||
control={<BasicSwitch checked={customStep.enable} onChange={onChangeEnableStep}/>}
|
||||
label="Override step value"
|
||||
/>
|
||||
{customStep.enable &&
|
||||
<TextField label="Step value" type="number" size="small" variant="outlined"
|
||||
defaultValue={customStep.value}
|
||||
error={error}
|
||||
helperText={error ? "step is out of allowed range" : " "}
|
||||
onChange={debouncedOnChangeStep}/>
|
||||
}
|
||||
</Box>;
|
||||
};
|
||||
|
||||
export default StepConfigurator;
|
|
@ -1,26 +1,28 @@
|
|||
import {useEffect, useMemo, useState} from "react";
|
||||
import {getQueryRangeUrl, getQueryUrl} from "../../../api/query-range";
|
||||
import {useAppState} from "../../../state/common/StateContext";
|
||||
import {InstantMetricResult, MetricResult} from "../../../api/types";
|
||||
import {isValidHttpUrl} from "../../../utils/url";
|
||||
import {useAuthState} from "../../../state/auth/AuthStateContext";
|
||||
import {TimeParams} from "../../../types";
|
||||
import {getQueryRangeUrl, getQueryUrl} from "../../../../api/query-range";
|
||||
import {useAppState} from "../../../../state/common/StateContext";
|
||||
import {InstantMetricResult, MetricBase, MetricResult} from "../../../../api/types";
|
||||
import {isValidHttpUrl} from "../../../../utils/url";
|
||||
import {useAuthState} from "../../../../state/auth/AuthStateContext";
|
||||
import {ErrorTypes, TimeParams} from "../../../../types";
|
||||
import {useGraphState} from "../../../../state/graph/GraphStateContext";
|
||||
|
||||
export const useFetchQuery = (): {
|
||||
fetchUrl?: string,
|
||||
fetchUrl?: string[],
|
||||
isLoading: boolean,
|
||||
graphData?: MetricResult[],
|
||||
liveData?: InstantMetricResult[],
|
||||
error?: string,
|
||||
error?: ErrorTypes | string,
|
||||
} => {
|
||||
const {query, displayType, serverUrl, time: {period}, queryControls: {nocache}} = useAppState();
|
||||
|
||||
const {basicData, bearerData, authMethod} = useAuthState();
|
||||
const {customStep} = useGraphState();
|
||||
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [graphData, setGraphData] = useState<MetricResult[]>();
|
||||
const [liveData, setLiveData] = useState<InstantMetricResult[]>();
|
||||
const [error, setError] = useState<string>();
|
||||
const [error, setError] = useState<ErrorTypes | string>();
|
||||
const [prevPeriod, setPrevPeriod] = useState<TimeParams>();
|
||||
|
||||
useEffect(() => {
|
||||
|
@ -40,7 +42,7 @@ export const useFetchQuery = (): {
|
|||
}, [period]);
|
||||
|
||||
const fetchData = async () => {
|
||||
if (!fetchUrl) return;
|
||||
if (!fetchUrl?.length) return;
|
||||
setIsLoading(true);
|
||||
setPrevPeriod(period);
|
||||
|
||||
|
@ -53,16 +55,25 @@ export const useFetchQuery = (): {
|
|||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(fetchUrl, { headers });
|
||||
if (response.ok) {
|
||||
const responses = await Promise.all(fetchUrl.map(url => fetch(url, {headers})));
|
||||
const tempData = [];
|
||||
let counter = 1;
|
||||
for await (const response of responses) {
|
||||
const resp = await response.json();
|
||||
setError(undefined);
|
||||
displayType === "chart" ? setGraphData(resp.data.result) : setLiveData(resp.data.result);
|
||||
} else {
|
||||
setError((await response.json())?.error);
|
||||
if (response.ok) {
|
||||
setError(undefined);
|
||||
tempData.push(...resp.data.result.map((d: MetricBase) => {
|
||||
d.group = counter;
|
||||
return d;
|
||||
}));
|
||||
counter++;
|
||||
} else {
|
||||
setError(`${resp.errorType}\r\n${resp?.error}`);
|
||||
}
|
||||
}
|
||||
displayType === "chart" ? setGraphData(tempData) : setLiveData(tempData);
|
||||
} catch (e) {
|
||||
if (e instanceof Error) setError(e.message);
|
||||
if (e instanceof Error) setError(`${e.name}: ${e.message}`);
|
||||
}
|
||||
|
||||
setIsLoading(false);
|
||||
|
@ -71,20 +82,21 @@ export const useFetchQuery = (): {
|
|||
const fetchUrl = useMemo(() => {
|
||||
if (!period) return;
|
||||
if (!serverUrl) {
|
||||
setError("Please enter Server URL");
|
||||
} else if (!query.trim()) {
|
||||
setError("Please enter a valid Query and execute it");
|
||||
setError(ErrorTypes.emptyServer);
|
||||
} else if (query.every(q => !q.trim())) {
|
||||
setError(ErrorTypes.validQuery);
|
||||
} else if (isValidHttpUrl(serverUrl)) {
|
||||
const duration = (period.end - period.start) / 2;
|
||||
const bufferPeriod = {...period, start: period.start - duration, end: period.end + duration};
|
||||
return displayType === "chart"
|
||||
? getQueryRangeUrl(serverUrl, query, bufferPeriod, nocache)
|
||||
: getQueryUrl(serverUrl, query, period);
|
||||
if (customStep.enable) bufferPeriod.step = customStep.value;
|
||||
return query.filter(q => q.trim()).map(q => displayType === "chart"
|
||||
? getQueryRangeUrl(serverUrl, q, bufferPeriod, nocache)
|
||||
: getQueryUrl(serverUrl, q, period));
|
||||
} else {
|
||||
setError("Please provide a valid URL");
|
||||
setError(ErrorTypes.validServer);
|
||||
}
|
||||
},
|
||||
[serverUrl, period, displayType]);
|
||||
[serverUrl, period, displayType, customStep]);
|
||||
|
||||
useEffect(() => {
|
||||
setPrevPeriod(undefined);
|
||||
|
@ -94,7 +106,7 @@ export const useFetchQuery = (): {
|
|||
// Doing it on each query change - looks to be a bad idea. Probably can be done on blur
|
||||
useEffect(() => {
|
||||
fetchData();
|
||||
}, [serverUrl, displayType]);
|
||||
}, [serverUrl, displayType, customStep]);
|
||||
|
||||
useEffect(() => {
|
||||
if (needUpdateData) {
|
|
@ -1,148 +0,0 @@
|
|||
import React, {FC, useRef, useState} from "react";
|
||||
import { Accordion, AccordionDetails, AccordionSummary, Box, Grid, IconButton, TextField, Typography, FormControlLabel,
|
||||
Tooltip, Switch } from "@mui/material";
|
||||
import QueryEditor from "./QueryEditor";
|
||||
import {TimeSelector} from "./TimeSelector";
|
||||
import {useAppDispatch, useAppState} from "../../../state/common/StateContext";
|
||||
import ExpandMoreIcon from "@mui/icons-material/ExpandMore";
|
||||
import SecurityIcon from "@mui/icons-material/Security";
|
||||
import {AuthDialog} from "./AuthDialog";
|
||||
import PlayCircleOutlineIcon from "@mui/icons-material/PlayCircleOutline";
|
||||
import Portal from "@mui/material/Portal";
|
||||
import {saveToStorage} from "../../../utils/storage";
|
||||
import {useGraphDispatch, useGraphState} from "../../../state/graph/GraphStateContext";
|
||||
import debounce from "lodash.debounce";
|
||||
|
||||
const QueryConfigurator: FC = () => {
|
||||
const {serverUrl, query, queryHistory, time: {duration}, queryControls: {autocomplete, nocache}} = useAppState();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const onChangeAutocomplete = () => {
|
||||
dispatch({type: "TOGGLE_AUTOCOMPLETE"});
|
||||
saveToStorage("AUTOCOMPLETE", !autocomplete);
|
||||
};
|
||||
const onChangeCache = () => {
|
||||
dispatch({type: "NO_CACHE"});
|
||||
saveToStorage("NO_CACHE", !nocache);
|
||||
};
|
||||
|
||||
const { yaxis } = useGraphState();
|
||||
const graphDispatch = useGraphDispatch();
|
||||
|
||||
const onChangeYaxisLimits = () => { graphDispatch({type: "TOGGLE_ENABLE_YAXIS_LIMITS"}); };
|
||||
|
||||
const setMinLimit = ({target: {value}}: {target: {value: string}}) => {
|
||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: [+value, yaxis.limits.range[1]]});
|
||||
};
|
||||
const setMaxLimit = ({target: {value}}: {target: {value: string}}) => {
|
||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: [yaxis.limits.range[0], +value]});
|
||||
};
|
||||
|
||||
const [dialogOpen, setDialogOpen] = useState(false);
|
||||
const [expanded, setExpanded] = useState(true);
|
||||
|
||||
const queryContainer = useRef<HTMLDivElement>(null);
|
||||
|
||||
const onSetDuration = (dur: string) => dispatch({type: "SET_DURATION", payload: dur});
|
||||
|
||||
const onRunQuery = () => {
|
||||
const { values } = queryHistory;
|
||||
dispatch({type: "RUN_QUERY"});
|
||||
if (query === values[values.length - 1]) return;
|
||||
dispatch({type: "SET_QUERY_HISTORY_INDEX", payload: values.length});
|
||||
dispatch({type: "SET_QUERY_HISTORY_VALUES", payload: [...values, query]});
|
||||
};
|
||||
const onSetQuery = (newQuery: string) => {
|
||||
if (query === newQuery) return;
|
||||
dispatch({type: "SET_QUERY", payload: newQuery});
|
||||
};
|
||||
const setHistoryIndex = (step: number) => {
|
||||
const index = queryHistory.index + step;
|
||||
if (index < -1 || index > queryHistory.values.length) return;
|
||||
dispatch({type: "SET_QUERY_HISTORY_INDEX", payload: index});
|
||||
onSetQuery(queryHistory.values[index] || "");
|
||||
};
|
||||
const onSetServer = ({target: {value}}: {target: {value: string}}) => {
|
||||
dispatch({type: "SET_SERVER", payload: value});
|
||||
};
|
||||
|
||||
return <>
|
||||
<Accordion expanded={expanded} onChange={() => setExpanded(prev => !prev)}>
|
||||
<AccordionSummary
|
||||
expandIcon={<ExpandMoreIcon/>}
|
||||
aria-controls="panel1a-content"
|
||||
id="panel1a-header"
|
||||
>
|
||||
<Box display="flex" alignItems="center" mr={2}><Typography variant="h6" component="h2">Query Configuration</Typography></Box>
|
||||
<Box flexGrow={1} onClick={e => e.stopPropagation()} onFocusCapture={e => e.stopPropagation()}>
|
||||
<Portal disablePortal={!expanded} container={queryContainer.current}>
|
||||
<Box display="flex" alignItems="center">
|
||||
<Box width="100%">
|
||||
<QueryEditor server={serverUrl} query={query} oneLiner={!expanded} autocomplete={autocomplete}
|
||||
queryHistory={queryHistory} setHistoryIndex={setHistoryIndex} runQuery={onRunQuery} setQuery={onSetQuery}/>
|
||||
</Box>
|
||||
<Tooltip title="Execute Query">
|
||||
<IconButton onClick={onRunQuery} size="large"><PlayCircleOutlineIcon /></IconButton>
|
||||
</Tooltip>
|
||||
</Box>
|
||||
</Portal>
|
||||
</Box>
|
||||
</AccordionSummary>
|
||||
<AccordionDetails>
|
||||
<Grid container spacing={2}>
|
||||
<Grid item xs={12} md={6}>
|
||||
<Box display="grid" gap={2} gridTemplateRows="auto 1fr">
|
||||
<Box display="flex" alignItems="center">
|
||||
<TextField variant="outlined" fullWidth label="Server URL" value={serverUrl}
|
||||
inputProps={{style: {fontFamily: "Monospace"}}}
|
||||
onChange={onSetServer}/>
|
||||
<Box>
|
||||
<Tooltip title="Request Auth Settings">
|
||||
<IconButton onClick={() => setDialogOpen(true)} size="large"><SecurityIcon/></IconButton>
|
||||
</Tooltip>
|
||||
</Box>
|
||||
</Box>
|
||||
<Box flexGrow={1} ><div ref={queryContainer} />{/* for portal QueryEditor */}</Box>
|
||||
</Box>
|
||||
</Grid>
|
||||
<Grid item xs={8} md={6} >
|
||||
<Box style={{
|
||||
minHeight: "128px",
|
||||
padding: "10px 0",
|
||||
borderRadius: "4px",
|
||||
borderColor: "#b9b9b9",
|
||||
borderStyle: "solid",
|
||||
borderWidth: "1px"}}>
|
||||
<TimeSelector setDuration={onSetDuration} duration={duration}/>
|
||||
</Box>
|
||||
</Grid>
|
||||
<Grid item xs={12}>
|
||||
<Box px={1} display="flex" alignItems="center" minHeight={52}>
|
||||
<Box><FormControlLabel label="Enable autocomplete"
|
||||
control={<Switch size="small" checked={autocomplete} onChange={onChangeAutocomplete}/>}
|
||||
/></Box>
|
||||
<Box ml={2}><FormControlLabel label="Enable cache"
|
||||
control={<Switch size="small" checked={!nocache} onChange={onChangeCache}/>}
|
||||
/></Box>
|
||||
<Box ml={2} display="flex" alignItems="center">
|
||||
<FormControlLabel
|
||||
control={<Switch size="small" checked={yaxis.limits.enable} onChange={onChangeYaxisLimits}/>}
|
||||
label="Fix the limits for y-axis"
|
||||
/>
|
||||
{yaxis.limits.enable && <Box display="grid" gridTemplateColumns="120px 120px" gap={1}>
|
||||
<TextField label="Min" type="number" size="small" variant="outlined"
|
||||
defaultValue={yaxis.limits.range[0]} onChange={debounce(setMinLimit, 750)}/>
|
||||
<TextField label="Max" type="number" size="small" variant="outlined"
|
||||
defaultValue={yaxis.limits.range[1]} onChange={debounce(setMaxLimit, 750)}/>
|
||||
</Box>}
|
||||
</Box>
|
||||
</Box>
|
||||
</Grid>
|
||||
</Grid>
|
||||
</AccordionDetails>
|
||||
</Accordion>
|
||||
<AuthDialog open={dialogOpen} onClose={() => setDialogOpen(false)}/>
|
||||
</>;
|
||||
};
|
||||
|
||||
export default QueryConfigurator;
|
|
@ -1,9 +1,10 @@
|
|||
import React, {FC, useEffect, useState} from "react";
|
||||
import {Box, FormControlLabel, IconButton, Switch, Tooltip} from "@mui/material";
|
||||
import {Box, FormControlLabel, IconButton, Tooltip} from "@mui/material";
|
||||
import EqualizerIcon from "@mui/icons-material/Equalizer";
|
||||
import {useAppDispatch, useAppState} from "../../../state/common/StateContext";
|
||||
import CircularProgressWithLabel from "../../common/CircularProgressWithLabel";
|
||||
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||
import CircularProgressWithLabel from "../../../common/CircularProgressWithLabel";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
import BasicSwitch from "../../../../theme/switch";
|
||||
|
||||
const useStyles = makeStyles({
|
||||
colorizing: {
|
||||
|
@ -69,7 +70,7 @@ export const ExecutionControls: FC = () => {
|
|||
|
||||
return <Box display="flex" alignItems="center">
|
||||
{<FormControlLabel
|
||||
control={<Switch size="small" className={classes.colorizing} checked={autoRefresh} onChange={handleChange} />}
|
||||
control={<BasicSwitch className={classes.colorizing} checked={autoRefresh} onChange={handleChange} />}
|
||||
label="Auto-refresh"
|
||||
/>}
|
||||
|
||||
|
@ -78,7 +79,9 @@ export const ExecutionControls: FC = () => {
|
|||
onClick={() => {iterateDelays();}} />
|
||||
<Tooltip title="Change delay refresh">
|
||||
<Box ml={1}>
|
||||
<IconButton onClick={() => {iterateDelays();}} size="large"><EqualizerIcon style={{color: "white"}} /></IconButton>
|
||||
<IconButton onClick={() => {iterateDelays();}}>
|
||||
<EqualizerIcon style={{color: "white"}} />
|
||||
</IconButton>
|
||||
</Box>
|
||||
</Tooltip>
|
||||
</>}
|
|
@ -1,6 +1,6 @@
|
|||
import React, {FC} from "react";
|
||||
import {Paper, Table, TableBody, TableCell, TableContainer, TableHead, TableRow} from "@mui/material";
|
||||
import {supportedDurations} from "../../../utils/time";
|
||||
import {supportedDurations} from "../../../../utils/time";
|
||||
|
||||
export const TimeDurationPopover: FC = () => {
|
||||
|
|
@ -2,17 +2,33 @@ import React, {FC, useEffect, useState} from "react";
|
|||
import {Box, Popover, TextField, Typography} from "@mui/material";
|
||||
import DateTimePicker from "@mui/lab/DateTimePicker";
|
||||
import {TimeDurationPopover} from "./TimeDurationPopover";
|
||||
import {useAppDispatch, useAppState} from "../../../state/common/StateContext";
|
||||
import {checkDurationLimit, dateFromSeconds, formatDateForNativeInput} from "../../../utils/time";
|
||||
import {InlineBtn} from "../../common/InlineBtn";
|
||||
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||
import {checkDurationLimit, dateFromSeconds, formatDateForNativeInput} from "../../../../utils/time";
|
||||
import {InlineBtn} from "../../../common/InlineBtn";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
|
||||
interface TimeSelectorProps {
|
||||
setDuration: (str: string) => void;
|
||||
duration: string;
|
||||
}
|
||||
|
||||
const useStyles = makeStyles({
|
||||
container: {
|
||||
display: "grid",
|
||||
gridTemplateColumns: "auto auto",
|
||||
height: "100%",
|
||||
padding: "18px 14px",
|
||||
borderRadius: "4px",
|
||||
borderColor: "#b9b9b9",
|
||||
borderStyle: "solid",
|
||||
borderWidth: "1px"
|
||||
}
|
||||
});
|
||||
|
||||
export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
||||
|
||||
const classes = useStyles();
|
||||
|
||||
const [durationStringFocused, setFocused] = useState(false);
|
||||
const [anchorEl, setAnchorEl] = React.useState<Element | null>(null);
|
||||
const [until, setUntil] = useState<string>();
|
||||
|
@ -60,7 +76,7 @@ export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
|||
|
||||
const open = Boolean(anchorEl);
|
||||
|
||||
return <Box m={1} flexDirection="row" display="flex">
|
||||
return <Box className={classes.container}>
|
||||
{/*setup duration*/}
|
||||
<Box px={1}>
|
||||
<Box>
|
||||
|
@ -72,7 +88,7 @@ export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
|||
onFocus={() => {setFocused(true);}}
|
||||
/>
|
||||
</Box>
|
||||
<Box my={2}>
|
||||
<Box mt={2}>
|
||||
<Typography variant="body2">
|
||||
<span aria-owns={open ? "mouse-over-popover" : undefined}
|
||||
aria-haspopup="true"
|
||||
|
@ -119,7 +135,7 @@ export const TimeSelector: FC<TimeSelectorProps> = ({setDuration}) => {
|
|||
/>
|
||||
</Box>
|
||||
|
||||
<Box my={2}>
|
||||
<Box mt={2}>
|
||||
<Typography variant="body2">
|
||||
Will be changed to current time for auto-refresh mode.
|
||||
<InlineBtn handler={() => dispatch({type: "RUN_QUERY_TO_NOW"})} text="Switch to now"/>
|
|
@ -1,20 +1,19 @@
|
|||
import React, {FC} from "react";
|
||||
import {Alert, AppBar, Box, CircularProgress, Fade, Link, Toolbar, Typography} from "@mui/material";
|
||||
import {ExecutionControls} from "./Configurator/ExecutionControls";
|
||||
import {ExecutionControls} from "./Configurator/Time/ExecutionControls";
|
||||
import {DisplayTypeSwitch} from "./Configurator/DisplayTypeSwitch";
|
||||
import GraphView from "./Views/GraphView";
|
||||
import TableView from "./Views/TableView";
|
||||
import {useAppState} from "../../state/common/StateContext";
|
||||
import QueryConfigurator from "./Configurator/QueryConfigurator";
|
||||
import {useFetchQuery} from "./Configurator/useFetchQuery";
|
||||
import QueryConfigurator from "./Configurator/Query/QueryConfigurator";
|
||||
import {useFetchQuery} from "./Configurator/Query/useFetchQuery";
|
||||
import JsonView from "./Views/JsonView";
|
||||
import {UrlCopy} from "./UrlCopy";
|
||||
|
||||
const HomeLayout: FC = () => {
|
||||
|
||||
const {displayType, time: {period}} = useAppState();
|
||||
|
||||
const {fetchUrl, isLoading, liveData, graphData, error} = useFetchQuery();
|
||||
const {isLoading, liveData, graphData, error} = useFetchQuery();
|
||||
|
||||
return (
|
||||
<>
|
||||
|
@ -46,12 +45,11 @@ const HomeLayout: FC = () => {
|
|||
<ExecutionControls/>
|
||||
</Box>
|
||||
<DisplayTypeSwitch/>
|
||||
<UrlCopy url={fetchUrl}/>
|
||||
</Toolbar>
|
||||
</AppBar>
|
||||
<Box p={2} display="grid" gridTemplateRows="auto 1fr" gap={"20px"} style={{minHeight: "calc(100vh - 64px)"}}>
|
||||
<Box p={4} display="grid" gridTemplateRows="auto 1fr" gap={"20px"} style={{minHeight: "calc(100vh - 64px)"}}>
|
||||
<Box>
|
||||
<QueryConfigurator/>
|
||||
<QueryConfigurator error={error}/>
|
||||
</Box>
|
||||
<Box height={"100%"}>
|
||||
{isLoading && <Fade in={isLoading} style={{
|
||||
|
@ -68,9 +66,9 @@ const HomeLayout: FC = () => {
|
|||
<CircularProgress/>
|
||||
</Box>
|
||||
</Fade>}
|
||||
{<Box height={"100%"} p={3} bgcolor={"#fff"}>
|
||||
{<Box height={"100%"} bgcolor={"#fff"}>
|
||||
{error &&
|
||||
<Alert color="error" severity="error" style={{fontSize: "14px"}}>
|
||||
<Alert color="error" severity="error" style={{fontSize: "14px", whiteSpace: "pre-wrap"}}>
|
||||
{error}
|
||||
</Alert>}
|
||||
{graphData && period && (displayType === "chart") &&
|
||||
|
|
|
@ -2,50 +2,51 @@ import React, {FC, useEffect, useState} from "react";
|
|||
import {MetricResult} from "../../../api/types";
|
||||
import LineChart from "../../LineChart/LineChart";
|
||||
import {AlignedData as uPlotData, Series as uPlotSeries} from "uplot";
|
||||
import {Legend, LegendItem} from "../../Legend/Legend";
|
||||
import {useGraphDispatch, useGraphState} from "../../../state/graph/GraphStateContext";
|
||||
import {getHideSeries, getLegendItem, getLimitsYAxis, getSeriesItem, getTimeSeries} from "../../../utils/uPlot";
|
||||
import Legend from "../../Legend/Legend";
|
||||
import {useGraphDispatch} from "../../../state/graph/GraphStateContext";
|
||||
import {getHideSeries, getLegendItem, getSeriesItem} from "../../../utils/uplot/series";
|
||||
import {getLimitsYAxis, getTimeSeries} from "../../../utils/uplot/axes";
|
||||
import {LegendItem} from "../../../utils/uplot/types";
|
||||
import {AxisRange} from "../../../state/graph/reducer";
|
||||
import GraphSettings from "../Configurator/Graph/GraphSettings";
|
||||
|
||||
export interface GraphViewProps {
|
||||
data?: MetricResult[];
|
||||
}
|
||||
|
||||
const GraphView: FC<GraphViewProps> = ({data = []}) => {
|
||||
const { yaxis } = useGraphState();
|
||||
const graphDispatch = useGraphDispatch();
|
||||
|
||||
const [dataChart, setDataChart] = useState<uPlotData>([[]]);
|
||||
const [series, setSeries] = useState<uPlotSeries[]>([]);
|
||||
const [legend, setLegend] = useState<LegendItem[]>([]);
|
||||
const [hideSeries, setHideSeries] = useState<string[]>([]);
|
||||
const [valuesLimit, setValuesLimit] = useState<[number, number]>([0, 1]);
|
||||
const [valuesLimit, setValuesLimit] = useState<AxisRange>({"1": [0, 1]});
|
||||
|
||||
const setLimitsYaxis = (values: number[]) => {
|
||||
if (!yaxis.limits.enable || (yaxis.limits.range.every(item => !item))) {
|
||||
const limits = getLimitsYAxis(values);
|
||||
setValuesLimit(limits);
|
||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: limits});
|
||||
}
|
||||
const setLimitsYaxis = (values: {[key: string]: number[]}) => {
|
||||
const limits = getLimitsYAxis(values);
|
||||
setValuesLimit(limits);
|
||||
graphDispatch({type: "SET_YAXIS_LIMITS", payload: limits});
|
||||
};
|
||||
|
||||
const onChangeLegend = (label: string, metaKey: boolean) => {
|
||||
setHideSeries(getHideSeries({hideSeries, label, metaKey, series}));
|
||||
const onChangeLegend = (legend: LegendItem, metaKey: boolean) => {
|
||||
setHideSeries(getHideSeries({hideSeries, legend, metaKey, series}));
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const tempTimes: number[] = [];
|
||||
const tempValues: number[] = [];
|
||||
const tempValues: {[key: string]: number[]} = {};
|
||||
const tempLegend: LegendItem[] = [];
|
||||
const tempSeries: uPlotSeries[] = [];
|
||||
|
||||
data?.forEach(d => {
|
||||
data?.forEach((d) => {
|
||||
const seriesItem = getSeriesItem(d, hideSeries);
|
||||
tempSeries.push(seriesItem);
|
||||
tempLegend.push(getLegendItem(seriesItem));
|
||||
tempLegend.push(getLegendItem(seriesItem, d.group));
|
||||
|
||||
d.values.forEach(v => {
|
||||
tempTimes.push(v[0]);
|
||||
tempValues.push(+v[1]);
|
||||
tempValues[d.group] ? tempValues[d.group].push(+v[1]) : tempValues[d.group] = [+v[1]];
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -68,7 +69,7 @@ const GraphView: FC<GraphViewProps> = ({data = []}) => {
|
|||
data?.forEach(d => {
|
||||
const seriesItem = getSeriesItem(d, hideSeries);
|
||||
tempSeries.push(seriesItem);
|
||||
tempLegend.push(getLegendItem(seriesItem));
|
||||
tempLegend.push(getLegendItem(seriesItem, d.group));
|
||||
});
|
||||
setSeries([{}, ...tempSeries]);
|
||||
setLegend(tempLegend);
|
||||
|
@ -77,6 +78,7 @@ const GraphView: FC<GraphViewProps> = ({data = []}) => {
|
|||
return <>
|
||||
{(data.length > 0)
|
||||
? <div>
|
||||
<GraphSettings/>
|
||||
<LineChart data={dataChart} series={series} metrics={data} limits={valuesLimit}/>
|
||||
<Legend labels={legend} onChange={onChangeLegend}/>
|
||||
</div>
|
||||
|
|
|
@ -1,31 +1,48 @@
|
|||
import React, {FC} from "react";
|
||||
import React, {FC, useMemo} from "react";
|
||||
import {hexToRGB} from "../../utils/color";
|
||||
import {useAppState} from "../../state/common/StateContext";
|
||||
import {LegendItem} from "../../utils/uplot/types";
|
||||
import "./legend.css";
|
||||
|
||||
export interface LegendItem {
|
||||
label: string;
|
||||
color: string;
|
||||
checked: boolean;
|
||||
}
|
||||
import {getDashLine} from "../../utils/uplot/helpers";
|
||||
|
||||
export interface LegendProps {
|
||||
labels: LegendItem[];
|
||||
onChange: (legend: string, metaKey: boolean) => void;
|
||||
onChange: (item: LegendItem, metaKey: boolean) => void;
|
||||
}
|
||||
|
||||
export const Legend: FC<LegendProps> = ({labels, onChange}) => {
|
||||
const Legend: FC<LegendProps> = ({labels, onChange}) => {
|
||||
const {query} = useAppState();
|
||||
|
||||
const groups = useMemo(() => {
|
||||
return Array.from(new Set(labels.map(l => l.group)));
|
||||
}, [labels]);
|
||||
|
||||
return <div className="legendWrapper">
|
||||
{labels.map((legendItem: LegendItem) =>
|
||||
<div className={legendItem.checked ? "legendItem" : "legendItem legendItemHide"}
|
||||
key={legendItem.label}
|
||||
onClick={(e) => onChange(legendItem.label, e.ctrlKey || e.metaKey)}>
|
||||
<div className="legendMarker"
|
||||
style={{
|
||||
borderColor: legendItem.color,
|
||||
backgroundColor: `rgba(${hexToRGB(legendItem.color)}, 0.1)`
|
||||
}}/>
|
||||
<div className="legendLabel">{legendItem.checked} {legendItem.label}</div>
|
||||
{groups.map((group) => <div className="legendGroup" key={group}>
|
||||
<div className="legendGroupTitle">
|
||||
<svg className="legendGroupLine" width="33" height="3" version="1.1" xmlns="http://www.w3.org/2000/svg">
|
||||
<line strokeWidth="3" x1="0" y1="0" x2="33" y2="0" stroke="#363636"
|
||||
strokeDasharray={getDashLine(group).join(",")}
|
||||
/>
|
||||
</svg>
|
||||
<b>"{query[group - 1]}"</b>:
|
||||
</div>
|
||||
)}
|
||||
<div>
|
||||
{labels.filter(l => l.group === group).map((legendItem: LegendItem) =>
|
||||
<div className={legendItem.checked ? "legendItem" : "legendItem legendItemHide"}
|
||||
key={`${legendItem.group}.${legendItem.label}`}
|
||||
onClick={(e) => onChange(legendItem, e.ctrlKey || e.metaKey)}>
|
||||
<div className="legendMarker"
|
||||
style={{
|
||||
borderColor: legendItem.color,
|
||||
backgroundColor: `rgba(${hexToRGB(legendItem.color)}, 0.1)`
|
||||
}}/>
|
||||
<div className="legendLabel">{legendItem.label}</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>)}
|
||||
</div>;
|
||||
};
|
||||
};
|
||||
|
||||
export default Legend;
|
|
@ -1,12 +1,31 @@
|
|||
.legendWrapper {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(400px, 1fr));
|
||||
grid-gap: 20px;
|
||||
margin-top: 20px;
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
.legendGroup {
|
||||
margin-bottom: 24px;
|
||||
}
|
||||
|
||||
.legendGroupTitle {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 10px 0 5px;
|
||||
font-size: 11px;
|
||||
}
|
||||
|
||||
.legendGroupLine {
|
||||
margin: 0 10px;
|
||||
}
|
||||
|
||||
.legendItem {
|
||||
display: inline-grid;
|
||||
grid-template-columns: auto auto;
|
||||
grid-gap: 4px;
|
||||
align-items: center;
|
||||
grid-gap: 6px;
|
||||
align-items: start;
|
||||
justify-content: start;
|
||||
padding: 5px 10px;
|
||||
background-color: #FFF;
|
||||
|
@ -30,9 +49,10 @@
|
|||
border-style: solid;
|
||||
box-sizing: border-box;
|
||||
transition: 0.2s ease;
|
||||
margin: 3px 0;
|
||||
}
|
||||
|
||||
.legendLabel {
|
||||
font-size: 12px;
|
||||
font-weight: 600;
|
||||
font-size: 11px;
|
||||
font-weight: normal;
|
||||
}
|
|
@ -1,45 +1,46 @@
|
|||
import React, {FC, useCallback, useEffect, useRef, useState} from "react";
|
||||
import {useAppDispatch, useAppState} from "../../state/common/StateContext";
|
||||
import uPlot, {AlignedData as uPlotData, Options as uPlotOptions, Series as uPlotSeries, Range} from "uplot";
|
||||
import uPlot, {AlignedData as uPlotData, Options as uPlotOptions, Series as uPlotSeries, Range, Scales, Scale} from "uplot";
|
||||
import {useGraphState} from "../../state/graph/GraphStateContext";
|
||||
import {defaultOptions, dragChart, setTooltip} from "../../utils/uPlot";
|
||||
import {defaultOptions} from "../../utils/uplot/helpers";
|
||||
import {dragChart} from "../../utils/uplot/events";
|
||||
import {getAxes} from "../../utils/uplot/axes";
|
||||
import {setTooltip} from "../../utils/uplot/tooltip";
|
||||
import {MetricResult} from "../../api/types";
|
||||
import {limitsDurations} from "../../utils/time";
|
||||
import throttle from "lodash.throttle";
|
||||
import "uplot/dist/uPlot.min.css";
|
||||
import "./tooltip.css";
|
||||
import {AxisRange} from "../../state/graph/reducer";
|
||||
|
||||
export interface LineChartProps {
|
||||
metrics: MetricResult[]
|
||||
data: uPlotData;
|
||||
series: uPlotSeries[],
|
||||
limits: [number, number]
|
||||
metrics: MetricResult[];
|
||||
data: uPlotData;
|
||||
series: uPlotSeries[];
|
||||
limits: AxisRange;
|
||||
}
|
||||
|
||||
enum typeChartUpdate { xRange = "xRange", yRange = "yRange", data = "data" }
|
||||
enum typeChartUpdate {xRange = "xRange", yRange = "yRange", data = "data"}
|
||||
|
||||
const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const {time: {period}} = useAppState();
|
||||
const { yaxis } = useGraphState();
|
||||
const {yaxis} = useGraphState();
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const uPlotRef = useRef<HTMLDivElement>(null);
|
||||
const [isPanning, setPanning] = useState(false);
|
||||
const [zoomPos, setZoomPos] = useState(0);
|
||||
const [xRange, setXRange] = useState({ min: period.start, max: period.end });
|
||||
const [xRange, setXRange] = useState({min: period.start, max: period.end});
|
||||
const [uPlotInst, setUPlotInst] = useState<uPlot>();
|
||||
|
||||
const tooltip = document.createElement("div");
|
||||
tooltip.className = "u-tooltip";
|
||||
const tooltipIdx = { seriesIdx: 1, dataIdx: 0 };
|
||||
const tooltipOffset = { left: 0, top: 0 };
|
||||
const tooltipIdx = {seriesIdx: 1, dataIdx: 0};
|
||||
const tooltipOffset = {left: 0, top: 0};
|
||||
|
||||
const setScale = ({min, max}: {min: number, max: number}): void => {
|
||||
const setScale = ({min, max}: { min: number, max: number }): void => {
|
||||
dispatch({type: "SET_PERIOD", payload: {from: new Date(min * 1000), to: new Date(max * 1000)}});
|
||||
};
|
||||
const throttledSetScale = useCallback(throttle(setScale, 500), []);
|
||||
|
||||
const setPlotScale = ({u, min, max}: {u: uPlot, min: number, max: number}) => {
|
||||
const setPlotScale = ({u, min, max}: { u: uPlot, min: number, max: number }) => {
|
||||
const delta = (max - min) * 1000;
|
||||
if ((delta < limitsDurations.min) || (delta > limitsDurations.max)) return;
|
||||
u.setScale("x", {min, max});
|
||||
|
@ -52,22 +53,18 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) =>
|
|||
tooltipOffset.left = parseFloat(u.over.style.left);
|
||||
tooltipOffset.top = parseFloat(u.over.style.top);
|
||||
u.root.querySelector(".u-wrap")?.appendChild(tooltip);
|
||||
|
||||
// wheel drag pan
|
||||
u.over.addEventListener("mousedown", e => {
|
||||
dragChart({u, e, setPanning, setPlotScale, factor});
|
||||
});
|
||||
|
||||
u.over.addEventListener("mousedown", e => dragChart({u, e, setPanning, setPlotScale, factor}));
|
||||
// wheel scroll zoom
|
||||
u.over.addEventListener("wheel", e => {
|
||||
if (!e.ctrlKey && !e.metaKey) return;
|
||||
e.preventDefault();
|
||||
const {width} = u.over.getBoundingClientRect();
|
||||
if (u.cursor.left && u.cursor.left > 0) setZoomPos(u.cursor.left);
|
||||
const zoomPos = u.cursor.left && u.cursor.left > 0 ? u.cursor.left : 0;
|
||||
const xVal = u.posToVal(zoomPos, "x");
|
||||
const oxRange = (u.scales.x.max || 0) - (u.scales.x.min || 0);
|
||||
const nxRange = e.deltaY < 0 ? oxRange * factor : oxRange / factor;
|
||||
const min = xVal - (zoomPos/width) * nxRange;
|
||||
const min = xVal - (zoomPos / width) * nxRange;
|
||||
const max = min + nxRange;
|
||||
u.batch(() => setPlotScale({u, min, max}));
|
||||
});
|
||||
|
@ -88,25 +85,27 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) =>
|
|||
? setTooltip({u, tooltipIdx, metrics, series, tooltip, tooltipOffset})
|
||||
: tooltip.style.display = "none";
|
||||
};
|
||||
|
||||
const getRangeY = (u: uPlot, min = 0, max = 1): Range.MinMax => {
|
||||
if (yaxis.limits.enable) return yaxis.limits.range;
|
||||
return min && max ? [min - (min * 0.05), max + (max * 0.05)] : limits;
|
||||
const getRangeX = (): Range.MinMax => [xRange.min, xRange.max];
|
||||
const getRangeY = (u: uPlot, min = 0, max = 1, axis: string): Range.MinMax => {
|
||||
if (yaxis.limits.enable) return yaxis.limits.range[axis];
|
||||
return min && max ? [min - (min * 0.05), max + (max * 0.05)] : limits[axis];
|
||||
};
|
||||
|
||||
const getRangeX = (): Range.MinMax => {
|
||||
return [xRange.min, xRange.max];
|
||||
const getScales = (): Scales => {
|
||||
const scales: { [key: string]: { range: Scale.Range } } = {x: {range: getRangeX}};
|
||||
Object.keys(yaxis.limits.range).forEach(axis => {
|
||||
scales[axis] = {range: (u: uPlot, min = 0, max = 1) => getRangeY(u, min, max, axis)};
|
||||
});
|
||||
return scales;
|
||||
};
|
||||
|
||||
const options: uPlotOptions = {
|
||||
...defaultOptions,
|
||||
width: containerRef.current ? containerRef.current.offsetWidth : 400,
|
||||
series,
|
||||
plugins: [{ hooks: { ready: onReadyChart, setCursor, setSeries: seriesFocus }}],
|
||||
scales: {
|
||||
x: { range: getRangeX },
|
||||
y: { range: getRangeY }
|
||||
}
|
||||
axes: getAxes(series),
|
||||
scales: {...getScales()},
|
||||
width: containerRef.current ? containerRef.current.offsetWidth : 400,
|
||||
plugins: [{hooks: {ready: onReadyChart, setCursor, setSeries: seriesFocus}}],
|
||||
};
|
||||
|
||||
const updateChart = (type: typeChartUpdate): void => {
|
||||
|
@ -116,7 +115,10 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) =>
|
|||
uPlotInst.scales.x.range = getRangeX;
|
||||
break;
|
||||
case typeChartUpdate.yRange:
|
||||
uPlotInst.scales.y.range = getRangeY;
|
||||
Object.keys(yaxis.limits.range).forEach(axis => {
|
||||
if (!uPlotInst.scales[axis]) return;
|
||||
uPlotInst.scales[axis].range = (u: uPlot, min = 0, max = 1) => getRangeY(u, min, max, axis);
|
||||
});
|
||||
break;
|
||||
case typeChartUpdate.data:
|
||||
uPlotInst.setData(data);
|
||||
|
@ -125,13 +127,13 @@ const LineChart: FC<LineChartProps> = ({data, series, metrics = [], limits}) =>
|
|||
uPlotInst.redraw();
|
||||
};
|
||||
|
||||
useEffect(() => setXRange({ min: period.start, max: period.end }), [period]);
|
||||
useEffect(() => setXRange({min: period.start, max: period.end}), [period]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotRef.current) return;
|
||||
const u = new uPlot(options, data, uPlotRef.current);
|
||||
setUPlotInst(u);
|
||||
setXRange({ min: period.start, max: period.end });
|
||||
setXRange({min: period.start, max: period.end});
|
||||
return u.destroy;
|
||||
}, [uPlotRef.current, series]);
|
||||
|
||||
|
|
|
@ -7,43 +7,89 @@ body {
|
|||
}
|
||||
|
||||
code {
|
||||
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
|
||||
monospace;
|
||||
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace;
|
||||
}
|
||||
|
||||
/*Material UI global classes*/
|
||||
|
||||
.MuiAccordionSummary-content {
|
||||
margin: 10px 0 !important;
|
||||
margin: 0 !important;
|
||||
}
|
||||
|
||||
/*Codemirror classes*/
|
||||
|
||||
/* TODO: find better way to override codemirror styles */
|
||||
.cm-activeLine {
|
||||
background-color: inherit !important;
|
||||
}
|
||||
.cm-editor {
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
border-color: #b9b9b9;
|
||||
border-style: solid;
|
||||
border-width: 1px;
|
||||
font-size: 10px;
|
||||
}
|
||||
|
||||
.one-line-scroll .cm-editor {
|
||||
height: 24px;
|
||||
}
|
||||
|
||||
.cm-gutters {
|
||||
border-radius: 4px 0 0 4px;
|
||||
height: 100%;
|
||||
overflow: hidden;
|
||||
background-color: #FFFFFF !important;
|
||||
border: none !important;
|
||||
}
|
||||
|
||||
.multi-line-scroll .cm-content,
|
||||
.multi-line-scroll .cm-gutters {
|
||||
min-height: 64px !important;
|
||||
.cm-activeLineGutter {
|
||||
background-color: #FFFFFF !important;
|
||||
}
|
||||
|
||||
.one-line-scroll .cm-content,
|
||||
.one-line-scroll .cm-gutters {
|
||||
min-height: auto;
|
||||
.query-editor .cm-scroller {
|
||||
align-items: center !important;
|
||||
}
|
||||
|
||||
.query-editor .cm-editor.cm-focused {
|
||||
outline: none;
|
||||
}
|
||||
|
||||
.query-editor-container {
|
||||
position: relative;
|
||||
padding: 12px;
|
||||
border: 1px solid #b9b9b9;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.query-editor-container_focus {
|
||||
border: 1px solid #3F51B5;
|
||||
}
|
||||
|
||||
.query-editor-container_error {
|
||||
border-color: #FF4141;
|
||||
}
|
||||
|
||||
.query-editor-container-one-line .query-editor .cm-editor {
|
||||
height: 22px;
|
||||
}
|
||||
|
||||
.query-editor-container-one-line {
|
||||
padding: 6px;
|
||||
}
|
||||
|
||||
.query-editor-label {
|
||||
font-weight: 400;
|
||||
font-size: 12px;
|
||||
line-height: 1;
|
||||
letter-spacing: normal;
|
||||
color: rgba(0, 0, 0, 0.6);
|
||||
padding: 0 5px;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
max-width: calc(133% - 24px);
|
||||
position: absolute;
|
||||
left: 4px;
|
||||
top: -0.71875em;
|
||||
z-index: 1;
|
||||
background-color: #FFFFFF;
|
||||
transform: scale(0.75);
|
||||
}
|
||||
|
||||
.query-editor-container_error .query-editor-label {
|
||||
color: #FF4141;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import {TimeParams, TimePeriod} from "../../types";
|
|||
import {dateFromSeconds, formatDateToLocal, getDateNowUTC, getDurationFromPeriod, getTimeperiodForDuration} from "../../utils/time";
|
||||
import {getFromStorage} from "../../utils/storage";
|
||||
import {getDefaultServer} from "../../utils/default-server-url";
|
||||
import {getQueryStringValue} from "../../utils/query-string";
|
||||
import {getQueryArray, getQueryStringValue} from "../../utils/query-string";
|
||||
|
||||
export interface TimeState {
|
||||
duration: string;
|
||||
|
@ -19,9 +19,9 @@ export interface QueryHistory {
|
|||
export interface AppState {
|
||||
serverUrl: string;
|
||||
displayType: DisplayType;
|
||||
query: string;
|
||||
query: string[];
|
||||
time: TimeState;
|
||||
queryHistory: QueryHistory,
|
||||
queryHistory: QueryHistory[],
|
||||
queryControls: {
|
||||
autoRefresh: boolean;
|
||||
autocomplete: boolean,
|
||||
|
@ -32,9 +32,9 @@ export interface AppState {
|
|||
export type Action =
|
||||
| { type: "SET_DISPLAY_TYPE", payload: DisplayType }
|
||||
| { type: "SET_SERVER", payload: string }
|
||||
| { type: "SET_QUERY", payload: string }
|
||||
| { type: "SET_QUERY_HISTORY_INDEX", payload: number }
|
||||
| { type: "SET_QUERY_HISTORY_VALUES", payload: string[] }
|
||||
| { type: "SET_QUERY", payload: string[] }
|
||||
| { type: "SET_QUERY_HISTORY_BY_INDEX", payload: {value: QueryHistory, queryNumber: number} }
|
||||
| { type: "SET_QUERY_HISTORY", payload: QueryHistory[] }
|
||||
| { type: "SET_DURATION", payload: string }
|
||||
| { type: "SET_UNTIL", payload: Date }
|
||||
| { type: "SET_PERIOD", payload: TimePeriod }
|
||||
|
@ -46,13 +46,13 @@ export type Action =
|
|||
|
||||
const duration = getQueryStringValue("g0.range_input", "1h") as string;
|
||||
const endInput = formatDateToLocal(getQueryStringValue("g0.end_input", getDateNowUTC()) as Date);
|
||||
const query = getQueryStringValue("g0.expr", "") as string;
|
||||
const query = getQueryArray();
|
||||
|
||||
export const initialState: AppState = {
|
||||
serverUrl: getDefaultServer(),
|
||||
displayType: getQueryStringValue("tab", "chart") as DisplayType,
|
||||
query: query, // demo_memory_usage_bytes
|
||||
queryHistory: { index: 0, values: [query] },
|
||||
queryHistory: query.map(q => ({index: 0, values: [q]})),
|
||||
time: {
|
||||
duration,
|
||||
period: getTimeperiodForDuration(duration, new Date(endInput))
|
||||
|
@ -81,21 +81,16 @@ export function reducer(state: AppState, action: Action): AppState {
|
|||
...state,
|
||||
query: action.payload
|
||||
};
|
||||
case "SET_QUERY_HISTORY_INDEX":
|
||||
case "SET_QUERY_HISTORY":
|
||||
return {
|
||||
...state,
|
||||
queryHistory: {
|
||||
...state.queryHistory,
|
||||
index: action.payload
|
||||
}
|
||||
queryHistory: action.payload
|
||||
};
|
||||
case "SET_QUERY_HISTORY_VALUES":
|
||||
case "SET_QUERY_HISTORY_BY_INDEX":
|
||||
state.queryHistory.splice(action.payload.queryNumber, 1, action.payload.value);
|
||||
return {
|
||||
...state,
|
||||
queryHistory: {
|
||||
...state.queryHistory,
|
||||
values: action.payload
|
||||
}
|
||||
queryHistory: state.queryHistory
|
||||
};
|
||||
case "SET_DURATION":
|
||||
return {
|
||||
|
|
|
@ -1,21 +1,34 @@
|
|||
export interface AxisRange {
|
||||
[key: string]: [number, number]
|
||||
}
|
||||
|
||||
export interface YaxisState {
|
||||
limits: {
|
||||
enable: boolean,
|
||||
range: [number, number]
|
||||
}
|
||||
limits: {
|
||||
enable: boolean,
|
||||
range: AxisRange
|
||||
}
|
||||
}
|
||||
|
||||
export interface CustomStep {
|
||||
enable: boolean,
|
||||
value: number
|
||||
}
|
||||
|
||||
export interface GraphState {
|
||||
yaxis: YaxisState
|
||||
customStep: CustomStep
|
||||
yaxis: YaxisState
|
||||
}
|
||||
|
||||
export type GraphAction =
|
||||
| { type: "TOGGLE_ENABLE_YAXIS_LIMITS" }
|
||||
| { type: "SET_YAXIS_LIMITS", payload: [number, number] }
|
||||
| { type: "TOGGLE_ENABLE_YAXIS_LIMITS" }
|
||||
| { type: "SET_YAXIS_LIMITS", payload: { [key: string]: [number, number] } }
|
||||
| { type: "TOGGLE_CUSTOM_STEP" }
|
||||
| { type: "SET_CUSTOM_STEP", payload: number}
|
||||
|
||||
export const initialGraphState: GraphState = {
|
||||
customStep: {enable: false, value: 1},
|
||||
yaxis: {
|
||||
limits: {enable: false, range: [0, 0]}
|
||||
limits: {enable: false, range: {"1": [0, 0]}}
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -32,6 +45,22 @@ export function reducer(state: GraphState, action: GraphAction): GraphState {
|
|||
}
|
||||
}
|
||||
};
|
||||
case "TOGGLE_CUSTOM_STEP":
|
||||
return {
|
||||
...state,
|
||||
customStep: {
|
||||
...state.customStep,
|
||||
enable: !state.customStep.enable
|
||||
}
|
||||
};
|
||||
case "SET_CUSTOM_STEP":
|
||||
return {
|
||||
...state,
|
||||
customStep: {
|
||||
...state.customStep,
|
||||
value: action.payload
|
||||
}
|
||||
};
|
||||
case "SET_YAXIS_LIMITS":
|
||||
return {
|
||||
...state,
|
||||
|
|
25
app/vmui/packages/vmui/src/theme/switch.ts
Normal file
25
app/vmui/packages/vmui/src/theme/switch.ts
Normal file
|
@ -0,0 +1,25 @@
|
|||
import {styled} from "@mui/material/styles";
|
||||
import Switch from "@mui/material/Switch";
|
||||
|
||||
const BasicSwitch = styled(Switch)(() => ({
|
||||
padding: 10,
|
||||
"& .MuiSwitch-track": {
|
||||
borderRadius: 14,
|
||||
"&:before, &:after": {
|
||||
content: "\"\"",
|
||||
position: "absolute",
|
||||
top: "50%",
|
||||
transform: "translateY(-50%)",
|
||||
width: 14,
|
||||
height: 14,
|
||||
},
|
||||
},
|
||||
"& .MuiSwitch-thumb": {
|
||||
boxShadow: "none",
|
||||
width: 12,
|
||||
height: 12,
|
||||
margin: 4,
|
||||
},
|
||||
}));
|
||||
|
||||
export default BasicSwitch;
|
87
app/vmui/packages/vmui/src/theme/theme.ts
Normal file
87
app/vmui/packages/vmui/src/theme/theme.ts
Normal file
|
@ -0,0 +1,87 @@
|
|||
import {createTheme} from "@mui/material/styles";
|
||||
|
||||
const THEME = createTheme({
|
||||
palette: {
|
||||
primary: {
|
||||
main: "#3F51B5"
|
||||
},
|
||||
secondary: {
|
||||
main: "#F50057"
|
||||
},
|
||||
error: {
|
||||
main: "#FF4141"
|
||||
}
|
||||
},
|
||||
components: {
|
||||
MuiFormHelperText: {
|
||||
styleOverrides: {
|
||||
root: {
|
||||
position: "absolute",
|
||||
top: "36px",
|
||||
left: "2px",
|
||||
margin: 0,
|
||||
}
|
||||
}
|
||||
},
|
||||
MuiInputLabel: {
|
||||
styleOverrides: {
|
||||
root: {
|
||||
fontSize: "12px",
|
||||
letterSpacing: "normal",
|
||||
lineHeight: "1"
|
||||
}
|
||||
}
|
||||
},
|
||||
MuiInputBase: {
|
||||
styleOverrides: {
|
||||
"root": {
|
||||
"&.Mui-focused fieldset": {
|
||||
"borderWidth": "1px !important"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
MuiSwitch: {
|
||||
defaultProps: {
|
||||
color: "secondary"
|
||||
}
|
||||
},
|
||||
MuiAccordion: {
|
||||
styleOverrides: {
|
||||
root: {
|
||||
boxShadow: "rgba(0, 0, 0, 0.16) 0px 1px 4px;"
|
||||
},
|
||||
},
|
||||
},
|
||||
MuiPaper: {
|
||||
styleOverrides: {
|
||||
elevation3: {
|
||||
boxShadow: "rgba(0, 0, 0, 0.2) 0px 3px 8px;"
|
||||
},
|
||||
},
|
||||
},
|
||||
MuiIconButton: {
|
||||
defaultProps: {
|
||||
size: "large",
|
||||
},
|
||||
styleOverrides: {
|
||||
sizeLarge: {
|
||||
borderRadius: "20%",
|
||||
height: "40px",
|
||||
width: "41px"
|
||||
},
|
||||
sizeMedium: {
|
||||
borderRadius: "20%",
|
||||
},
|
||||
sizeSmall: {
|
||||
borderRadius: "20%",
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
typography: {
|
||||
"fontSize": 10
|
||||
}
|
||||
});
|
||||
|
||||
export default THEME;
|
|
@ -24,8 +24,13 @@ export interface DataSeries extends MetricBase{
|
|||
values: DataValue[]; // sorted by key which is timestamp
|
||||
}
|
||||
|
||||
|
||||
export interface InstantDataSeries {
|
||||
metadata: string[]; // just ordered columns
|
||||
value: string;
|
||||
}
|
||||
|
||||
export enum ErrorTypes {
|
||||
emptyServer = "Please enter Server URL",
|
||||
validServer = "Please provide a valid Server URL",
|
||||
validQuery = "Please enter a valid Query and execute it"
|
||||
}
|
|
@ -2,10 +2,9 @@ import qs from "qs";
|
|||
import get from "lodash.get";
|
||||
|
||||
const stateToUrlParams = {
|
||||
"query": "g0.expr",
|
||||
"time.duration": "g0.range_input",
|
||||
"time.period.date": "g0.end_input",
|
||||
"time.period.step": "g0.step_input",
|
||||
"time.duration": "range_input",
|
||||
"time.period.date": "end_input",
|
||||
"time.period.step": "step_input",
|
||||
"displayType": "tab"
|
||||
};
|
||||
|
||||
|
@ -39,15 +38,19 @@ export const setQueryStringWithoutPageReload = (qsValue: string): void => {
|
|||
|
||||
export const setQueryStringValue = (newValue: Record<string, unknown>): void => {
|
||||
const queryMap = new Map(Object.entries(stateToUrlParams));
|
||||
const query = get(newValue, "query", "") as string[];
|
||||
const newQsValue: string[] = [];
|
||||
queryMap.forEach((queryKey, stateKey) => {
|
||||
// const queryKeyEncoded = encodeURIComponent(queryKey);
|
||||
const value = get(newValue, stateKey, "") as string;
|
||||
if (value) {
|
||||
const valueEncoded = encodeURIComponent(value);
|
||||
newQsValue.push(`${queryKey}=${valueEncoded}`);
|
||||
}
|
||||
query.forEach((q, i) => {
|
||||
queryMap.forEach((queryKey, stateKey) => {
|
||||
const value = get(newValue, stateKey, "") as string;
|
||||
if (value) {
|
||||
const valueEncoded = encodeURIComponent(value);
|
||||
newQsValue.push(`g${i}.${queryKey}=${valueEncoded}`);
|
||||
}
|
||||
});
|
||||
newQsValue.push(`g${i}.expr=${q}`);
|
||||
});
|
||||
|
||||
setQueryStringWithoutPageReload(newQsValue.join("&"));
|
||||
};
|
||||
|
||||
|
@ -59,3 +62,10 @@ export const getQueryStringValue = (
|
|||
const values = qs.parse(queryString, { ignoreQueryPrefix: true });
|
||||
return get(values, key, defaultValue || "");
|
||||
};
|
||||
|
||||
export const getQueryArray = (): string[] => {
|
||||
const queryLength = window.location.search.match(/g\d+.expr/gmi)?.length || 1;
|
||||
return new Array(queryLength).fill(1).map((q, i) => {
|
||||
return getQueryStringValue(`g${i}.expr`, "") as string;
|
||||
});
|
||||
};
|
||||
|
|
|
@ -1,147 +0,0 @@
|
|||
import uPlot, {Series as uPlotSeries, Series} from "uplot";
|
||||
import {getColorFromString} from "./color";
|
||||
import dayjs from "dayjs";
|
||||
import {MetricResult} from "../api/types";
|
||||
import {LegendItem} from "../components/Legend/Legend";
|
||||
import {getNameForMetric} from "./metric";
|
||||
import {getMaxFromArray, getMinFromArray} from "./math";
|
||||
import {roundTimeSeconds} from "./time";
|
||||
import numeral from "numeral";
|
||||
|
||||
interface SetupTooltip {
|
||||
u: uPlot,
|
||||
metrics: MetricResult[],
|
||||
series: Series[],
|
||||
tooltip: HTMLDivElement,
|
||||
tooltipOffset: {left: number, top: number},
|
||||
tooltipIdx: {seriesIdx: number, dataIdx: number}
|
||||
}
|
||||
|
||||
interface HideSeriesArgs {
|
||||
hideSeries: string[],
|
||||
label: string,
|
||||
metaKey: boolean,
|
||||
series: Series[]
|
||||
}
|
||||
|
||||
interface DragArgs {
|
||||
e: MouseEvent,
|
||||
u: uPlot,
|
||||
factor: number,
|
||||
setPanning: (enable: boolean) => void,
|
||||
setPlotScale: ({u, min, max}: {u: uPlot, min: number, max: number}) => void
|
||||
}
|
||||
|
||||
const stub = (): null => null;
|
||||
|
||||
export const defaultOptions = {
|
||||
height: 500,
|
||||
legend: { show: false },
|
||||
axes: [
|
||||
{ space: 80 },
|
||||
{
|
||||
show: true,
|
||||
font: "10px Arial",
|
||||
values: (self: uPlot, ticks: number[]): (string | number)[] => ticks.map(n => n > 1000 ? numeral(n).format("0.0a") : n)
|
||||
}
|
||||
],
|
||||
cursor: {
|
||||
drag: { x: false, y: false },
|
||||
focus: { prox: 30 },
|
||||
bind: { mouseup: stub, mousedown: stub, click: stub, dblclick: stub, mouseenter: stub }
|
||||
},
|
||||
};
|
||||
|
||||
export const setTooltip = ({ u, tooltipIdx, metrics, series, tooltip, tooltipOffset }: SetupTooltip) : void => {
|
||||
const {seriesIdx, dataIdx} = tooltipIdx;
|
||||
const dataSeries = u.data[seriesIdx][dataIdx];
|
||||
const dataTime = u.data[0][dataIdx];
|
||||
const metric = metrics[seriesIdx - 1]?.metric || {};
|
||||
const color = getColorFromString(series[seriesIdx].label || "");
|
||||
|
||||
const {width, height} = u.over.getBoundingClientRect();
|
||||
const top = u.valToPos((dataSeries || 0), "y");
|
||||
const lft = u.valToPos(dataTime, "x");
|
||||
const {width: tooltipWidth, height: tooltipHeight} = tooltip.getBoundingClientRect();
|
||||
const overflowX = lft + tooltipWidth >= width;
|
||||
const overflowY = top + tooltipHeight >= height;
|
||||
|
||||
tooltip.style.display = "grid";
|
||||
tooltip.style.top = `${tooltipOffset.top + top + 10 - (overflowY ? tooltipHeight + 10 : 0)}px`;
|
||||
tooltip.style.left = `${tooltipOffset.left + lft + 10 - (overflowX ? tooltipWidth + 20 : 0)}px`;
|
||||
const date = dayjs(new Date(dataTime * 1000)).format("YYYY-MM-DD HH:mm:ss:SSS (Z)");
|
||||
const info = Object.keys(metric).filter(k => k !== "__name__").map(k => `<div><b>${k}</b>: ${metric[k]}</div>`).join("");
|
||||
const marker = `<div class="u-tooltip__marker" style="background: ${color}"></div>`;
|
||||
tooltip.innerHTML = `<div>${date}</div>
|
||||
<div class="u-tooltip-data">
|
||||
${marker}${metric.__name__ || ""}: <b class="u-tooltip-data__value">${dataSeries}</b>
|
||||
</div>
|
||||
<div class="u-tooltip__info">${info}</div>`;
|
||||
};
|
||||
|
||||
export const getHideSeries = ({hideSeries, label, metaKey, series}: HideSeriesArgs): string[] => {
|
||||
const include = hideSeries.includes(label);
|
||||
const labels = series.map(s => s.label || "").filter(l => l);
|
||||
if (metaKey && include) {
|
||||
return [...labels.filter(l => l !== label)];
|
||||
} else if (metaKey && !include) {
|
||||
return hideSeries.length === series.length - 2 ? [] : [...labels.filter(l => l !== label)];
|
||||
}
|
||||
return include ? hideSeries.filter(l => l !== label) : [...hideSeries, label];
|
||||
};
|
||||
|
||||
export const getTimeSeries = (times: number[]): number[] => {
|
||||
const allTimes = Array.from(new Set(times)).sort((a,b) => a-b);
|
||||
const step = getMinFromArray(allTimes.map((t, i) => allTimes[i + 1] - t));
|
||||
const length = allTimes.length;
|
||||
const startTime = allTimes[0] || 0;
|
||||
return new Array(length).fill(startTime).map((d, i) => roundTimeSeconds(d + (step * i)));
|
||||
};
|
||||
|
||||
export const getLimitsYAxis = (values: number[]): [number, number] => {
|
||||
const min = getMinFromArray(values);
|
||||
const max = getMaxFromArray(values);
|
||||
return [min - (min * 0.05), max + (max * 0.05)];
|
||||
};
|
||||
|
||||
export const getSeriesItem = (d: MetricResult, hideSeries: string[]): Series => {
|
||||
const label = getNameForMetric(d);
|
||||
return {
|
||||
label,
|
||||
width: 1.5,
|
||||
stroke: getColorFromString(label),
|
||||
show: !hideSeries.includes(label),
|
||||
scale: "y"
|
||||
};
|
||||
};
|
||||
|
||||
export const getLegendItem = (s: uPlotSeries): LegendItem => ({
|
||||
label: s.label || "",
|
||||
color: s.stroke as string,
|
||||
checked: s.show || false
|
||||
});
|
||||
|
||||
export const dragChart = ({e, factor = 0.85, u, setPanning, setPlotScale}: DragArgs): void => {
|
||||
if (e.button !== 0) return;
|
||||
e.preventDefault();
|
||||
setPanning(true);
|
||||
const leftStart = e.clientX;
|
||||
const xUnitsPerPx = u.posToVal(1, "x") - u.posToVal(0, "x");
|
||||
const scXMin = u.scales.x.min || 0;
|
||||
const scXMax = u.scales.x.max || 0;
|
||||
|
||||
const mouseMove = (e: MouseEvent) => {
|
||||
e.preventDefault();
|
||||
const dx = xUnitsPerPx * ((e.clientX - leftStart) * factor);
|
||||
setPlotScale({u, min: scXMin - dx, max: scXMax - dx});
|
||||
};
|
||||
|
||||
const mouseUp = () => {
|
||||
setPanning(false);
|
||||
document.removeEventListener("mousemove", mouseMove);
|
||||
document.removeEventListener("mouseup", mouseUp);
|
||||
};
|
||||
|
||||
document.addEventListener("mousemove", mouseMove);
|
||||
document.addEventListener("mouseup", mouseUp);
|
||||
};
|
30
app/vmui/packages/vmui/src/utils/uplot/axes.ts
Normal file
30
app/vmui/packages/vmui/src/utils/uplot/axes.ts
Normal file
|
@ -0,0 +1,30 @@
|
|||
import {Axis, Series} from "uplot";
|
||||
import {getMaxFromArray, getMinFromArray} from "../math";
|
||||
import {roundTimeSeconds} from "../time";
|
||||
import {AxisRange} from "../../state/graph/reducer";
|
||||
import {formatTicks} from "./helpers";
|
||||
|
||||
export const getAxes = (series: Series[]): Axis[] => Array.from(new Set(series.map(s => s.scale))).map(a => {
|
||||
const axis = {scale: a, show: true, font: "10px Arial", values: formatTicks};
|
||||
if (!a) return {space: 80};
|
||||
if (!(Number(a) % 2)) return {...axis, side: 1};
|
||||
return axis;
|
||||
});
|
||||
|
||||
export const getTimeSeries = (times: number[]): number[] => {
|
||||
const allTimes = Array.from(new Set(times)).sort((a, b) => a - b);
|
||||
const step = getMinFromArray(allTimes.map((t, i) => allTimes[i + 1] - t));
|
||||
const startTime = allTimes[0] || 0;
|
||||
return new Array(allTimes.length).fill(startTime).map((d, i) => roundTimeSeconds(d + (step * i)));
|
||||
};
|
||||
|
||||
export const getLimitsYAxis = (values: { [key: string]: number[] }): AxisRange => {
|
||||
const result: AxisRange = {};
|
||||
for (const key in values) {
|
||||
const numbers = values[key];
|
||||
const min = getMinFromArray(numbers);
|
||||
const max = getMaxFromArray(numbers);
|
||||
result[key] = [min - (min * 0.05), max + (max * 0.05)];
|
||||
}
|
||||
return result;
|
||||
};
|
25
app/vmui/packages/vmui/src/utils/uplot/events.ts
Normal file
25
app/vmui/packages/vmui/src/utils/uplot/events.ts
Normal file
|
@ -0,0 +1,25 @@
|
|||
import {DragArgs} from "./types";
|
||||
|
||||
export const dragChart = ({e, factor = 0.85, u, setPanning, setPlotScale}: DragArgs): void => {
|
||||
if (e.button !== 0) return;
|
||||
e.preventDefault();
|
||||
setPanning(true);
|
||||
const leftStart = e.clientX;
|
||||
const xUnitsPerPx = u.posToVal(1, "x") - u.posToVal(0, "x");
|
||||
const scXMin = u.scales.x.min || 0;
|
||||
const scXMax = u.scales.x.max || 0;
|
||||
|
||||
const mouseMove = (e: MouseEvent) => {
|
||||
e.preventDefault();
|
||||
const dx = xUnitsPerPx * ((e.clientX - leftStart) * factor);
|
||||
setPlotScale({u, min: scXMin - dx, max: scXMax - dx});
|
||||
};
|
||||
const mouseUp = () => {
|
||||
setPanning(false);
|
||||
document.removeEventListener("mousemove", mouseMove);
|
||||
document.removeEventListener("mouseup", mouseUp);
|
||||
};
|
||||
|
||||
document.addEventListener("mousemove", mouseMove);
|
||||
document.addEventListener("mouseup", mouseUp);
|
||||
};
|
34
app/vmui/packages/vmui/src/utils/uplot/helpers.ts
Normal file
34
app/vmui/packages/vmui/src/utils/uplot/helpers.ts
Normal file
|
@ -0,0 +1,34 @@
|
|||
import uPlot from "uplot";
|
||||
import numeral from "numeral";
|
||||
import {getColorFromString} from "../color";
|
||||
|
||||
export const defaultOptions = {
|
||||
height: 500,
|
||||
legend: {
|
||||
show: false
|
||||
},
|
||||
cursor: {
|
||||
drag: {
|
||||
x: false,
|
||||
y: false
|
||||
},
|
||||
focus: {
|
||||
prox: 30
|
||||
},
|
||||
bind: {
|
||||
mouseup: (): null => null,
|
||||
mousedown: (): null => null,
|
||||
click: (): null => null,
|
||||
dblclick: (): null => null,
|
||||
mouseenter: (): null => null
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
export const formatTicks = (u: uPlot, ticks: number[]): (string | number)[] => {
|
||||
return ticks.map(n => n > 1000 ? numeral(n).format("0.0a") : n);
|
||||
};
|
||||
|
||||
export const getColorLine = (scale: number, label: string): string => getColorFromString(`${scale}${label}`);
|
||||
|
||||
export const getDashLine = (group: number): number[] => group <= 1 ? [] : [group*4, group*1.2];
|
41
app/vmui/packages/vmui/src/utils/uplot/series.ts
Normal file
41
app/vmui/packages/vmui/src/utils/uplot/series.ts
Normal file
|
@ -0,0 +1,41 @@
|
|||
import {MetricResult} from "../../api/types";
|
||||
import {Series} from "uplot";
|
||||
import {getNameForMetric} from "../metric";
|
||||
import {LegendItem} from "./types";
|
||||
import {getColorLine, getDashLine} from "./helpers";
|
||||
import {HideSeriesArgs} from "./types";
|
||||
|
||||
export const getSeriesItem = (d: MetricResult, hideSeries: string[]): Series => {
|
||||
const label = getNameForMetric(d);
|
||||
return {
|
||||
label,
|
||||
dash: getDashLine(d.group),
|
||||
width: 1.5,
|
||||
stroke: getColorLine(d.group, label),
|
||||
show: !includesHideSeries(label, d.group, hideSeries),
|
||||
scale: String(d.group)
|
||||
};
|
||||
};
|
||||
|
||||
export const getLegendItem = (s: Series, group: number): LegendItem => ({
|
||||
group,
|
||||
label: s.label || "",
|
||||
color: s.stroke as string,
|
||||
checked: s.show || false
|
||||
});
|
||||
|
||||
export const getHideSeries = ({hideSeries, legend, metaKey, series}: HideSeriesArgs): string[] => {
|
||||
const label = `${legend.group}.${legend.label}`;
|
||||
const include = includesHideSeries(legend.label, legend.group, hideSeries);
|
||||
const labels = series.map(s => `${s.scale}.${s.label}`);
|
||||
if (metaKey && include) {
|
||||
return [...labels.filter(l => l !== label)];
|
||||
} else if (metaKey && !include) {
|
||||
return hideSeries.length >= series.length - 1 ? [] : [...labels.filter(l => l !== label)];
|
||||
}
|
||||
return include ? hideSeries.filter(l => l !== label) : [...hideSeries, label];
|
||||
};
|
||||
|
||||
export const includesHideSeries = (label: string, group: string | number, hideSeries: string[]): boolean => {
|
||||
return hideSeries.includes(`${group}.${label}`);
|
||||
};
|
30
app/vmui/packages/vmui/src/utils/uplot/tooltip.ts
Normal file
30
app/vmui/packages/vmui/src/utils/uplot/tooltip.ts
Normal file
|
@ -0,0 +1,30 @@
|
|||
import dayjs from "dayjs";
|
||||
import {SetupTooltip} from "./types";
|
||||
import {getColorLine} from "./helpers";
|
||||
|
||||
export const setTooltip = ({u, tooltipIdx, metrics, series, tooltip, tooltipOffset}: SetupTooltip): void => {
|
||||
const {seriesIdx, dataIdx} = tooltipIdx;
|
||||
const dataSeries = u.data[seriesIdx][dataIdx];
|
||||
const dataTime = u.data[0][dataIdx];
|
||||
const metric = metrics[seriesIdx - 1]?.metric || {};
|
||||
const color = getColorLine(Number(series[seriesIdx].scale || 0), series[seriesIdx].label || "");
|
||||
|
||||
const {width, height} = u.over.getBoundingClientRect();
|
||||
const top = u.valToPos((dataSeries || 0), series[seriesIdx]?.scale || "1");
|
||||
const lft = u.valToPos(dataTime, "x");
|
||||
const {width: tooltipWidth, height: tooltipHeight} = tooltip.getBoundingClientRect();
|
||||
const overflowX = lft + tooltipWidth >= width;
|
||||
const overflowY = top + tooltipHeight >= height;
|
||||
|
||||
tooltip.style.display = "grid";
|
||||
tooltip.style.top = `${tooltipOffset.top + top + 10 - (overflowY ? tooltipHeight + 10 : 0)}px`;
|
||||
tooltip.style.left = `${tooltipOffset.left + lft + 10 - (overflowX ? tooltipWidth + 20 : 0)}px`;
|
||||
const date = dayjs(new Date(dataTime * 1000)).format("YYYY-MM-DD HH:mm:ss:SSS (Z)");
|
||||
const info = Object.keys(metric).filter(k => k !== "__name__").map(k => `<div><b>${k}</b>: ${metric[k]}</div>`).join("");
|
||||
const marker = `<div class="u-tooltip__marker" style="background: ${color}"></div>`;
|
||||
tooltip.innerHTML = `<div>${date}</div>
|
||||
<div class="u-tooltip-data">
|
||||
${marker}${metric.__name__ || ""}: <b class="u-tooltip-data__value">${dataSeries}</b>
|
||||
</div>
|
||||
<div class="u-tooltip__info">${info}</div>`;
|
||||
};
|
39
app/vmui/packages/vmui/src/utils/uplot/types.ts
Normal file
39
app/vmui/packages/vmui/src/utils/uplot/types.ts
Normal file
|
@ -0,0 +1,39 @@
|
|||
import uPlot, {Series} from "uplot";
|
||||
import {MetricResult} from "../../api/types";
|
||||
|
||||
export interface SetupTooltip {
|
||||
u: uPlot,
|
||||
metrics: MetricResult[],
|
||||
series: Series[],
|
||||
tooltip: HTMLDivElement,
|
||||
tooltipOffset: {
|
||||
left: number,
|
||||
top: number
|
||||
},
|
||||
tooltipIdx: {
|
||||
seriesIdx: number,
|
||||
dataIdx: number
|
||||
}
|
||||
}
|
||||
|
||||
export interface HideSeriesArgs {
|
||||
hideSeries: string[],
|
||||
legend: LegendItem,
|
||||
metaKey: boolean,
|
||||
series: Series[]
|
||||
}
|
||||
|
||||
export interface DragArgs {
|
||||
e: MouseEvent,
|
||||
u: uPlot,
|
||||
factor: number,
|
||||
setPanning: (enable: boolean) => void,
|
||||
setPlotScale: ({u, min, max}: { u: uPlot, min: number, max: number }) => void
|
||||
}
|
||||
|
||||
export interface LegendItem {
|
||||
group: number;
|
||||
label: string;
|
||||
color: string;
|
||||
checked: boolean;
|
||||
}
|
|
@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
|
|||
|
||||
ROOT_IMAGE ?= alpine:3.15.0
|
||||
CERTS_IMAGE := alpine:3.15.0
|
||||
GO_BUILDER_IMAGE := golang:1.17.3-alpine
|
||||
GO_BUILDER_IMAGE := golang:1.17.5-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ services:
|
|||
restart: always
|
||||
grafana:
|
||||
container_name: grafana
|
||||
image: grafana/grafana:8.2.2
|
||||
image: grafana/grafana:8.3.2
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
|
|
@ -6,6 +6,21 @@ sort: 15
|
|||
|
||||
## tip
|
||||
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to analyze the correlation between two queries on a single graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1916).
|
||||
* FEATURE: accept optional `extra_filters[]=series_selector` query args at Prometheus query APIs additionally to `extra_label` query args. This allows enforcing additional filters for all the Prometheus query APIs by using [vmgateway](https://docs.victoriametrics.com/vmgateway.html) or [vmauth](https://docs.victoriametrics.com/vmauth.html). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1863).
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): allow specifying `http` and `https` urls in `-auth.config` command-line flag. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1898). Thanks for @TFM93 .
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying `http` and `https` urls in the following command-line flags: `-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`.
|
||||
* FEATURE: vminsert: allow specifying `http` and `https` urls in `-relabelConfig` command-line flag.
|
||||
* FEATURE: vminsert: add `-maxLabelValueLen` command-line flag for the ability to configure the maximum length of label value. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1908).
|
||||
* FEATURE: preserve the order of time series passed to [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset) function. This allows implementing series paging via `limit_offset(limit, offset, sort_by_label(...))`. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1920) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/951) issues.
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to override the interval between returned datapoints. By default it is automatically calculated depending on the selected time range and horizontal resolution of the graph. Now it is possible to override it with custom values. This may be useful during data exploration and debugging.
|
||||
* FEATURE: automaticall convert `(value1|...|valueN)` into `{value1,...,valueN}` inside `__graphite__` pseudo-label. This allows using [Grafana multi-value template variables](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, `{__graphite__=~"foo.($bar)"}` is expanded to `{__graphite__=~"foo.{x,y}"}` if both `x` and `y` are selected for `$bar` template variable. See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics) for details.
|
||||
|
||||
* BUGFIX: fix `unaligned 64-bit atomic operation` panic on 32-bit architectures, which has been introduced in v1.70.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1944).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): restore the ability to use `$labels.alertname` in labels templating. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1921).
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): add missing `query` caption to the input field for the query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1900).
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix navigation over query history with `Ctrl+up/down` and fix zoom relatively to the cursor position. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1936).
|
||||
|
||||
|
||||
## [v1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0)
|
||||
|
||||
|
@ -14,7 +29,7 @@ sort: 15
|
|||
* FEATURE: vmauth: allow using optional `name` field in configs. This field is then used as `username` label value for `vmauth_user_requests_total` metric. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1805).
|
||||
* FEATURE: vmagent: export `vm_persistentqueue_read_duration_seconds_total` and `vm_persistentqueue_write_duration_seconds_total` metrics, which can be used for detecting persistent queue saturation with `rate(vm_persistentqueue_write_duration_seconds_total) > 0.9` alerting rule.
|
||||
* FEATURE: export `vm_filestream_read_duration_seconds_total` and `vm_filestream_write_duration_seconds_total` metrics, which can be used for detecting persistent disk saturation with `rate(vm_filestream_read_duration_seconds_total) > 0.9` alerting rule.
|
||||
* FEATURE: export `vm_cache_size_max_bytes` metrics, which show capacity for various caches. These metrics can be used for determining caches reaches its capacity with `vm_cache_size_bytes / vm_cache_size_max_bytes > 0.9` query.
|
||||
* FEATURE: export `vm_cache_size_max_bytes` metrics, which show capacity for various caches. These metrics can be used for determining caches with reach its capacity with `vm_cache_size_bytes / vm_cache_size_max_bytes > 0.9` query.
|
||||
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html), [vmrestore](https://docs.victoriametrics.com/vmrestore.html): add `-s3ForcePathStyle` command-line flag, which can be used for making backups to [Aliyun OSS](https://www.aliyun.com/product/oss). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1802).
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): improve data migration from OpenTSDB. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1809). Thanks to @johnseekins .
|
||||
* FEATURE: suppress `connection reset by peer` errors when remote client resets TCP connection to VictoriaMetrics / vmagent while ingesting the data via InfluxDB line protocol, Graphite protocol or OpenTSDB protocol. This error is expected, so there is no need in logging it.
|
||||
|
@ -22,7 +37,7 @@ sort: 15
|
|||
* FEATURE: vmalert: make `-notifier.url` command-line flag optional. This flag can be omitted if `vmalert` is used solely for recording rules and doesn't evaluate alerting rules. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1870).
|
||||
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html), [vmrestore](https://docs.victoriametrics.com/vmrestore.html): export internal metrics at `http://vmbackup:8420/metrics` and `http://vmrestore:8421/metrics` for better visibility of the backup/restore process.
|
||||
* FEATURE: allow trailing whitespace after the timestamp when [parsing Graphite plaintext lines](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1865).
|
||||
* FEATURE: expose `/-/healthy` and `/-/ready` endpoints as Prometheus does. This is needed for improving integration with third-party solutions, which rely on these endpoints. See [tis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1833).
|
||||
* FEATURE: expose `/-/healthy` and `/-/ready` endpoints as Prometheus does. This is needed for improving integration with third-party solutions, which rely on these endpoints. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1833).
|
||||
|
||||
* BUGFIX: vmagent: prevent from scraping duplicate targets if `-promscrape.dropOriginalLabels` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1830). Thanks to @guidao for the fix.
|
||||
* BUGFIX: vmstorage [enterprise](https://victoriametrics.com/enterprise.html): added missing `vm_tenant_used_tenant_bytes` metric, which shows the approximate per-tenant disk usage. See [these docs](https://docs.victoriametrics.com/PerTenantStatistic.html) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1605).
|
||||
|
|
|
@ -544,8 +544,10 @@ Below is the output for `/path/to/vminsert -help`:
|
|||
-maxInsertRequestSize size
|
||||
The maximum size in bytes of a single Prometheus remote_write API request
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
||||
-maxLabelValueLen int
|
||||
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
|
||||
-maxLabelsPerTimeseries int
|
||||
The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30)
|
||||
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
|
||||
-memory.allowedBytes size
|
||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
|
@ -563,7 +565,7 @@ Below is the output for `/path/to/vminsert -help`:
|
|||
-opentsdbhttpTrimTimestamp duration
|
||||
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-relabelConfig string
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
-relabelDebug
|
||||
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
||||
-replicationFactor int
|
||||
|
|
|
@ -30,7 +30,7 @@ MetricsQL implements [PromQL](https://medium.com/@valyala/promql-tutorial-for-be
|
|||
|
||||
This functionality can be evaluated at [an editable Grafana dashboard](https://play-grafana.victoriametrics.com/d/4ome8yJmz/node-exporter-on-victoriametrics-demo) or at your own [VictoriaMetrics instance](https://docs.victoriametrics.com/#how-to-start-victoriametrics).
|
||||
|
||||
- Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax. This is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but usually works faster and is easier to use when migrating from Graphite. VictoriaMetrics also can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details. See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
- Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax. See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics). VictoriaMetrics also can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details. See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
- Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries)). For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`. It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
- [Aggregate functions](#aggregate-functions) accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point across time series returned by `q1`, `q2` and `q3`.
|
||||
- [offset](https://prometheus.io/docs/prometheus/latest/querying/basics/#offset-modifier), lookbehind window in square brackets and `step` value for [subquery](#subqueries) may refer to the current step aka `$__interval` value from Grafana with `[Ni]` syntax. For instance, `rate(metric[10i] offset 5i)` would return per-second rate over a range covering 10 previous steps with the offset of 5 steps.
|
||||
|
@ -487,6 +487,10 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
`keep_next_value(q)` fills gaps with the value of the next non-empty point in every time series returned by `q`. See also [keep_last_value](#keep_last_value) and [interpolate](#interpolate).
|
||||
|
||||
#### limit_offset
|
||||
|
||||
`limit_offset(limit, offset, q)` skips `offset` time series from series returned by `q` and then returns up to `limit` of the remaining time series per each group. This allows implementing simple paging for `q` time series. See also [limitk](#limitk).
|
||||
|
||||
#### ln
|
||||
|
||||
`ln(q)` calculates `ln(v)` for every point `v` of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
||||
|
@ -823,11 +827,6 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
`histogram(q)` calculates [VictoriaMetrics histogram](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) per each group of points with the same timestamp. Useful for visualizing big number of time series via a heatmap. See [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) for more details.
|
||||
|
||||
#### limit_offset
|
||||
|
||||
`limit_offset(limit, offset, q)` skips `offset` time series from series returned by `q` and then returns up to `limit` of the remaining time series. This allows implementing simple paging for `q` time series. See also [limitk](#limitk).
|
||||
|
||||
|
||||
#### limitk
|
||||
|
||||
`limitk(k, q) by (group_labels)` returns up to `k` time series per each `group_labels` out of time series returned by `q`. The returned set of time series remain the same across calls. See also [limit_offset](#limit_offset).
|
||||
|
|
114
docs/README.md
114
docs/README.md
|
@ -13,46 +13,13 @@
|
|||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
|
||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
||||
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and follow [these instructions](#how-to-start-victoriametrics).
|
||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||
|
||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Case studies and talks
|
||||
|
||||
Case studies:
|
||||
|
||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||
|
||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Prominent features
|
||||
|
@ -95,6 +62,37 @@ VictoriaMetrics has the following prominent features:
|
|||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
||||
## Case studies and talks
|
||||
|
||||
Case studies:
|
||||
|
||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||
|
||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||
|
||||
|
||||
## Operation
|
||||
|
||||
## How to start VictoriaMetrics
|
||||
|
@ -418,9 +416,15 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||
|
||||
* [Graphite API](#graphite-api-usage)
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). See also [selecting Graphite metrics](#selecting-graphite-metrics).
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||
|
||||
## Selecting Graphite metrics
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See [docs for Graphite paths and wildcards](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
|
||||
The `__graphite__` pseudo-label supports e.g. alternate regexp filters such as `(value1|...|valueN)`. They are transparently converted to `{value1,...,valueN}` syntax [used in Graphite](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). This allows using [multi-value template variables in Grafana](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, Grafana expands `{__graphite__=~"foo.$bar.baz"}` into `{__graphite__=~"foo.(x|y).baz"}` if `$bar` template variable contains `x` and `y` values. In this case the query is automatically converted into `{__graphite__=~"foo.{x,y}.baz"}` before execution.
|
||||
|
||||
## How to send data from OpenTSDB-compatible agents
|
||||
|
||||
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||
|
@ -517,9 +521,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
|||
### Prometheus querying API enhancements
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
|
@ -556,12 +561,11 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
|||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
||||
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). See [these docs](#selecting-graphite-metrics).
|
||||
|
||||
|
||||
### Graphite Render API usage
|
||||
|
@ -612,6 +616,10 @@ Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressin
|
|||
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
||||
VMUI allows investigating correlations between two queries on the same graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
|
||||
|
||||
|
@ -1025,6 +1033,7 @@ VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-sc
|
|||
|
||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
||||
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||
|
||||
Example contents for `-relabelConfig` file:
|
||||
|
@ -1217,7 +1226,8 @@ Consider setting the following command-line flags:
|
|||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
* `-configAuthKey` for protecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
- `-pprofAuthKey` for protecting `/debug/pprof/*` endpoints, which can be used for [profiling](#profiling).
|
||||
|
||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||
|
@ -1372,9 +1382,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
|||
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
||||
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
||||
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then `{__graphite__="foo.*.baz"}` filter can be used for selecting such metrics. See [these docs](#selecting-graphite-metrics) for details.
|
||||
|
||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
|
@ -1493,9 +1501,11 @@ Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics
|
|||
Feel free asking any questions regarding VictoriaMetrics:
|
||||
|
||||
* [slack](https://slack.victoriametrics.com/)
|
||||
* [linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
||||
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||
* [articles and talks about VictoriaMetrics in Russian](https://github.com/denisgolius/victoriametrics-ru-links)
|
||||
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
@ -1650,8 +1660,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-maxInsertRequestSize size
|
||||
The maximum size in bytes of a single Prometheus remote_write API request
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
||||
-maxLabelValueLen int
|
||||
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
|
||||
-maxLabelsPerTimeseries int
|
||||
The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30)
|
||||
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
|
||||
-memory.allowedBytes size
|
||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
|
@ -1681,7 +1693,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-promscrape.cluster.replicationFactor int
|
||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||
-promscrape.config string
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
-promscrape.config.dryRun
|
||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||
-promscrape.config.strictParse
|
||||
|
@ -1748,7 +1760,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-promscrape.suppressScrapeErrors
|
||||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
||||
-relabelConfig string
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
-relabelDebug
|
||||
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
||||
-retentionPeriod value
|
||||
|
|
|
@ -31,10 +31,12 @@ sort: 17
|
|||
|
||||
### Public Announcement
|
||||
|
||||
1. Publish message in slack (victoriametrics.slack.com, general channel)
|
||||
2. Post twit with release notes URL
|
||||
3. Post in subreddit https://www.reddit.com/r/VictoriaMetrics/
|
||||
4. Post in linkedin
|
||||
- Publish message in Slack at https://victoriametrics.slack.com
|
||||
- Post at Twitter at https://twitter.com/MetricsVictoria
|
||||
- Post in Reddit at https://www.reddit.com/r/VictoriaMetrics/
|
||||
- Post in Linkedin at https://www.linkedin.com/company/victoriametrics/
|
||||
- Publish message in Telegram at https://t.me/VictoriaMetrics_en and https://t.me/VictoriaMetrics_ru1
|
||||
- Publish message in google groups at https://groups.google.com/forum/#!forum/victorametrics-users
|
||||
|
||||
## Helm Charts
|
||||
|
||||
|
|
|
@ -17,46 +17,13 @@ sort: 1
|
|||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
|
||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
in [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), in [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics follow [these instructions](#how-to-start-victoriametrics).
|
||||
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and follow [these instructions](#how-to-start-victoriametrics).
|
||||
Then read [Prometheus setup](#prometheus-setup) and [Grafana setup](#grafana-setup) docs.
|
||||
|
||||
Cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/enterprise.html).
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Case studies and talks
|
||||
|
||||
Case studies:
|
||||
|
||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||
|
||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. See [features available in enterprise package](https://victoriametrics.com/enterprise.html). Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Prominent features
|
||||
|
@ -99,6 +66,37 @@ VictoriaMetrics has the following prominent features:
|
|||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
||||
## Case studies and talks
|
||||
|
||||
Case studies:
|
||||
|
||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
|
||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||
|
||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||
|
||||
|
||||
## Operation
|
||||
|
||||
## How to start VictoriaMetrics
|
||||
|
@ -422,9 +420,15 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via the following APIs:
|
||||
|
||||
* [Graphite API](#graphite-api-usage)
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
* [Prometheus querying API](#prometheus-querying-api-usage). See also [selecting Graphite metrics](#selecting-graphite-metrics).
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml)
|
||||
|
||||
## Selecting Graphite metrics
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for selecting time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See [docs for Graphite paths and wildcards](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). VictoriaMetrics also supports [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function for extracting the given groups from Graphite metric name.
|
||||
|
||||
The `__graphite__` pseudo-label supports e.g. alternate regexp filters such as `(value1|...|valueN)`. They are transparently converted to `{value1,...,valueN}` syntax [used in Graphite](https://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards). This allows using [multi-value template variables in Grafana](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, Grafana expands `{__graphite__=~"foo.$bar.baz"}` into `{__graphite__=~"foo.(x|y).baz"}` if `$bar` template variable contains `x` and `y` values. In this case the query is automatically converted into `{__graphite__=~"foo.{x,y}.baz"}` before execution.
|
||||
|
||||
## How to send data from OpenTSDB-compatible agents
|
||||
|
||||
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||
|
@ -521,9 +525,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
|||
### Prometheus querying API enhancements
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
|
@ -560,12 +565,11 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
|||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
||||
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster and it is easier to use when migrating from Graphite to VictoriaMetrics. See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function.
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). See [these docs](#selecting-graphite-metrics).
|
||||
|
||||
|
||||
### Graphite Render API usage
|
||||
|
@ -616,6 +620,10 @@ Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressin
|
|||
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
||||
VMUI allows investigating correlations between two queries on the same graph. Just click `+Query` button, enter the second query in the newly appeared input field and press `Ctrl+Enter`. Results for both queries should be displayed simultaneously on the same graph. Every query has its own vertical scale, which is displayed on the left and the right side of the graph. Lines for the second query are dashed.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
|
||||
|
||||
|
@ -1029,6 +1037,7 @@ VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-sc
|
|||
|
||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||
to a file containing a list of [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) entries.
|
||||
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||
|
||||
Example contents for `-relabelConfig` file:
|
||||
|
@ -1221,7 +1230,8 @@ Consider setting the following command-line flags:
|
|||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||
* `-forceMergeAuthKey` for protecting `/internal/force_merge` endpoint. See [force merge docs](#forced-merge).
|
||||
* `-search.resetCacheAuthKey` for protecting `/internal/resetRollupResultCache` endpoint. See [backfilling](#backfilling) for more details.
|
||||
* `-configAuthKey` for pretecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
* `-configAuthKey` for protecting `/config` endpoint, since it may contain sensitive information such as passwords.
|
||||
- `-pprofAuthKey` for protecting `/debug/pprof/*` endpoints, which can be used for [profiling](#profiling).
|
||||
|
||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||
|
@ -1376,9 +1386,7 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
|||
This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total`
|
||||
metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload.
|
||||
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then use `{__graphite__="foo.*.baz"}` syntax for selecting such metrics.
|
||||
This expression is equivalent to `{__name__=~"foo[.][^.]*[.]baz"}`, but it works faster and it is easier to use when migrating from Graphite.
|
||||
See also [label_graphite_group](https://docs.victoriametrics.com/MetricsQL.html#label_graphite_group) function, which allows extracting the given groups from Graphite metric names.
|
||||
* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then `{__graphite__="foo.*.baz"}` filter can be used for selecting such metrics. See [these docs](#selecting-graphite-metrics) for details.
|
||||
|
||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
|
@ -1497,9 +1505,11 @@ Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics
|
|||
Feel free asking any questions regarding VictoriaMetrics:
|
||||
|
||||
* [slack](https://slack.victoriametrics.com/)
|
||||
* [linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
||||
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||
* [articles and talks about VictoriaMetrics in Russian](https://github.com/denisgolius/victoriametrics-ru-links)
|
||||
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
@ -1654,8 +1664,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-maxInsertRequestSize size
|
||||
The maximum size in bytes of a single Prometheus remote_write API request
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432)
|
||||
-maxLabelValueLen int
|
||||
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
|
||||
-maxLabelsPerTimeseries int
|
||||
The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30)
|
||||
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
|
||||
-memory.allowedBytes size
|
||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
|
@ -1685,7 +1697,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-promscrape.cluster.replicationFactor int
|
||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||
-promscrape.config string
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
-promscrape.config.dryRun
|
||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||
-promscrape.config.strictParse
|
||||
|
@ -1752,7 +1764,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-promscrape.suppressScrapeErrors
|
||||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
|
||||
-relabelConfig string
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
-relabelDebug
|
||||
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
|
||||
-retentionPeriod value
|
||||
|
|
|
@ -50,7 +50,7 @@ to `vmagent` such as the ability to push metrics instead of pulling them. We did
|
|||
Please download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
||||
and configure the following flags to the `vmagent` binary in order to start scraping Prometheus targets:
|
||||
|
||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`)
|
||||
* `-promscrape.config` with the path to Prometheus config file (usually located at `/etc/prometheus/prometheus.yml`). The path can point either to local file or to http url.
|
||||
* `-remoteWrite.url` with the remote storage endpoint such as VictoriaMetrics, the `-remoteWrite.url` argument can be specified multiple times to replicate data concurrently to an arbitrary number of remote storage systems.
|
||||
|
||||
Example command line:
|
||||
|
@ -218,15 +218,16 @@ The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders w
|
|||
|
||||
## Loading scrape configs from multiple files
|
||||
|
||||
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory plus a `single_scrape_config.yml` file:
|
||||
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||
|
||||
```yml
|
||||
scrape_config_files:
|
||||
- configs/*.yml
|
||||
- single_scrape_config.yml
|
||||
- https://config-server/scrape_config.yml
|
||||
```
|
||||
|
||||
Every referred file can contain arbitrary number of any [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
|
||||
```yml
|
||||
- job_name: foo
|
||||
|
@ -283,7 +284,7 @@ The relabeling can be defined in the following places:
|
|||
|
||||
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied to target labels. This relabeling can be debugged by passing `relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs target labels before and after the relabeling and then drops the logged target.
|
||||
* At the `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to all the scraped metrics in the given `scrape_config`. This relabeling can be debugged by passing `metric_relabel_debug: true` option to the corresponding `scrape_config` section. In this case `vmagent` logs metrics before and after the relabeling and then drops the logged metrics.
|
||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is aplied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is applied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||
|
||||
You can read more about relabeling in the following articles:
|
||||
|
@ -810,7 +811,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-promscrape.cluster.replicationFactor int
|
||||
The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication (default 1)
|
||||
-promscrape.config string
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. The path can point to local file and to http url. See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details
|
||||
-promscrape.config.dryRun
|
||||
Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output.
|
||||
-promscrape.config.strictParse
|
||||
|
@ -935,7 +936,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Optional rate limit in bytes per second for data sent to -remoteWrite.url. By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data is sent after temporary unavailability of the remote storage
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.relabelConfig string
|
||||
Optional path to file with relabel_config entries. These entries are applied to all the metrics before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details
|
||||
Optional path to file with relabel_config entries. The path can point either to local file or to http url. These entries are applied to all the metrics before sending them to -remoteWrite.url. See https://docs.victoriametrics.com/vmagent.html#relabeling for details
|
||||
-remoteWrite.relabelDebug
|
||||
Whether to log metrics before and after relabeling with -remoteWrite.relabelConfig. If the -remoteWrite.relabelDebug is enabled, then the metrics aren't sent to remote storage. This is useful for debugging the relabeling configs
|
||||
-remoteWrite.roundDigits array
|
||||
|
@ -970,7 +971,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Remote storage URL to write data to. It must support Prometheus remote_write API. It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . Pass multiple -remoteWrite.url flags in order to replicate data to multiple remote storage systems. See also -remoteWrite.multitenantURL
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.urlRelabelConfig array
|
||||
Optional path to relabel config for the corresponding -remoteWrite.url
|
||||
Optional path to relabel config for the corresponding -remoteWrite.url. The path can point either to local file or to http url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.urlRelabelDebug array
|
||||
Whether to log metrics before and after relabeling with -remoteWrite.urlRelabelConfig. If the -remoteWrite.urlRelabelDebug is enabled, then the metrics aren't sent to the corresponding -remoteWrite.url. This is useful for debugging the relabeling configs
|
||||
|
|
|
@ -103,12 +103,24 @@ name: <string>
|
|||
# By default "prometheus" type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# Optional list of label filters applied to every rule's
|
||||
# request withing a group. Is compatible only with VM datasource.
|
||||
# See more details at https://docs.victoriametrics.com#prometheus-querying-api-enhancements
|
||||
# Warning: DEPRECATED
|
||||
# Please use `params` instead:
|
||||
# params:
|
||||
# extra_label: ["job=nodeexporter", "env=prod"]
|
||||
extra_filter_labels:
|
||||
[ <labelname>: <labelvalue> ... ]
|
||||
|
||||
# Optional list of HTTP URL parameters
|
||||
# applied for all rules requests within a group
|
||||
# For example:
|
||||
# params:
|
||||
# nocache: ["1"] # disable caching for vmselect
|
||||
# denyPartialResponse: ["true"] # fail if one or more vmstorage nodes returned an error
|
||||
# extra_label: ["env=dev"] # apply additional label filter "env=dev" for all requests
|
||||
# see more details at https://docs.victoriametrics.com#prometheus-querying-api-enhancements
|
||||
params:
|
||||
[ <string>: [<string>, ...]]
|
||||
|
||||
# Optional list of labels added to every rule within a group.
|
||||
# It has priority over the external labels.
|
||||
# Labels are commonly used for adding environment
|
||||
|
@ -476,6 +488,8 @@ a review to the dashboard.
|
|||
|
||||
## Configuration
|
||||
|
||||
### Flags
|
||||
|
||||
Pass `-help` to `vmalert` in order to see the full list of supported
|
||||
command-line flags with their descriptions.
|
||||
|
||||
|
@ -697,12 +711,32 @@ The shortlist of configuration flags is the following:
|
|||
Show VictoriaMetrics version
|
||||
```
|
||||
|
||||
### Hot config reload
|
||||
`vmalert` supports "hot" config reload via the following methods:
|
||||
* send SIGHUP signal to `vmalert` process;
|
||||
* send GET request to `/-/reload` endpoint;
|
||||
* configure `-rule.configCheckInterval` flag for periodic reload
|
||||
on config change.
|
||||
|
||||
### URL params
|
||||
|
||||
To set additional URL params for `datasource.url`, `remoteWrite.url` or `remoteRead.url`
|
||||
just add them in address: `-datasource.url=http://localhost:8428?nocache=1`.
|
||||
|
||||
To set additional URL params for specific [group of rules](#Groups) modify
|
||||
the `params` group:
|
||||
```yaml
|
||||
groups:
|
||||
- name: TestGroup
|
||||
params:
|
||||
denyPartialResponse: ["true"]
|
||||
extra_label: ["env=dev"]
|
||||
```
|
||||
Please note, `params` are used only for executing rules expressions (requests to `datasource.url`).
|
||||
If there would be a conflict between URL params set in `datasource.url` flag and params in group definition
|
||||
the latter will have higher priority.
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
`vmalert` is mostly designed and built by VictoriaMetrics community.
|
||||
|
@ -718,7 +752,7 @@ It is recommended using
|
|||
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert` binary and puts it into the `bin` folder.
|
||||
|
||||
|
@ -735,7 +769,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
|
||||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make vmalert-arm` or `make vmalert-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-arm` or `vmalert-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ sort: 5
|
|||
`vmauth` is a simple auth proxy, router and [load balancer](#load-balancing) for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It reads auth credentials from `Authorization` http header ([Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication) and `Bearer token` is supported),
|
||||
matches them against configs pointed by [-auth.config](#auth-config) command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match.
|
||||
|
||||
The `-auth.config` can point to either local file or to http url.
|
||||
|
||||
## Quick start
|
||||
|
||||
|
@ -30,12 +30,10 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi
|
|||
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML,
|
||||
accounting and rate limiting such as [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
||||
|
||||
|
||||
## Load balancing
|
||||
|
||||
Each `url_prefix` in the [-auth.config](#auth-config) may contain either a single url or a list of urls. In the latter case `vmauth` balances load among the configured urls in a round-robin manner. This feature is useful for balancing the load among multiple `vmselect` and/or `vminsert` nodes in [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
|
||||
## Auth config
|
||||
|
||||
`-auth.config` is represented in the following simple `yml` format:
|
||||
|
@ -128,7 +126,6 @@ users:
|
|||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
This may be useful for passing secrets to the config.
|
||||
|
||||
|
||||
## Security
|
||||
|
||||
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
|
||||
|
@ -146,7 +143,6 @@ Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termi
|
|||
|
||||
It is recommended protecting `/-/reload` endpoint with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload.
|
||||
|
||||
|
||||
## Monitoring
|
||||
|
||||
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
||||
|
@ -165,7 +161,6 @@ users:
|
|||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
||||
|
||||
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.16.
|
||||
|
@ -191,7 +186,6 @@ by setting it via `<ROOT_IMAGE>` environment variable. For example, the followin
|
|||
ROOT_IMAGE=scratch make package-vmauth
|
||||
```
|
||||
|
||||
|
||||
## Profiling
|
||||
|
||||
`vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
@ -212,7 +206,6 @@ The command for collecting CPU profile waits for 30 seconds before returning.
|
|||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
|
||||
|
||||
## Advanced usage
|
||||
|
||||
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
|
||||
|
@ -225,7 +218,7 @@ vmauth authenticates and authorizes incoming requests and proxies them to Victor
|
|||
See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||
|
||||
-auth.config string
|
||||
Path to auth config. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
||||
Path to auth config. It can point either to local file or to http url. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
||||
-enableTCP6
|
||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||
-envflag.enable
|
||||
|
@ -253,7 +246,7 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
-httpListenAddr string
|
||||
TCP address to listen for http connections (default ":8427")
|
||||
-logInvalidAuthTokens
|
||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmagent_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||
-loggerDisableTimestamps
|
||||
Whether to disable writing timestamps in logs
|
||||
-loggerErrorsPerSecondLimit int
|
||||
|
@ -276,9 +269,9 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage (default 60)
|
||||
-metricsAuthKey string
|
||||
Auth key for /metrics. It overrides httpAuth settings
|
||||
Auth key for /metrics. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof. It overrides httpAuth settings
|
||||
Auth key for /debug/pprof. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-reloadAuthKey string
|
||||
Auth key for /-/reload http endpoint. It must be passed as authKey=...
|
||||
-tls
|
||||
|
|
13
go.mod
13
go.mod
|
@ -1,6 +1,7 @@
|
|||
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/storage v1.18.2
|
||||
github.com/VictoriaMetrics/fastcache v1.8.0
|
||||
|
||||
|
@ -8,9 +9,9 @@ require (
|
|||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.1
|
||||
github.com/VictoriaMetrics/metricsql v0.31.0
|
||||
github.com/VictoriaMetrics/metricsql v0.32.0
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.42.17
|
||||
github.com/aws/aws-sdk-go v1.42.22
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cheggaaa/pb/v3 v3.0.8
|
||||
|
@ -34,11 +35,11 @@ require (
|
|||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/gozstd v1.14.2
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881
|
||||
google.golang.org/api v0.60.0
|
||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 // indirect
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486
|
||||
google.golang.org/api v0.62.0
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.42.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
|
34
go.sum
34
go.sum
|
@ -26,8 +26,10 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc
|
|||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
|
||||
cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -108,8 +110,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
|
|||
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metricsql v0.31.0 h1:7cpjby64WVcRNBiMieEytuvAcU/jOOz+39RLigENz4E=
|
||||
github.com/VictoriaMetrics/metricsql v0.31.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VictoriaMetrics/metricsql v0.32.0 h1:yTZFB1FvbOsD5ahl6mxKYprHpZ248nVk3s8Kl7UBg5c=
|
||||
github.com/VictoriaMetrics/metricsql v0.32.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
|
@ -154,8 +156,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
|||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.42.17 h1:NEMRZcLd+YhXhUqdjwqNGtEYthiUZ+3BudGmK4/0yaA=
|
||||
github.com/aws/aws-sdk-go v1.42.17/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.42.22 h1:EwcM7/+Ytg6xK+jbeM2+f9OELHqPiEiEKetT/GgAr7I=
|
||||
github.com/aws/aws-sdk-go v1.42.22/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
||||
|
@ -1179,8 +1181,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c h1:WtYZ93XtWSO5KlOMgPZu7hXY9WhMZpprvlm5VwvAl8c=
|
||||
golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1306,9 +1308,10 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486 h1:5hpz5aRr+W1erYCL5JRhSUBJRph7l9XkNveoExlrKYk=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1456,8 +1459,9 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
|
|||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
|
||||
google.golang.org/api v0.60.0 h1:eq/zs5WPH4J9undYM9IP1O7dSr7Yh8Y0GtSCpzGzIUk=
|
||||
google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4=
|
||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||
google.golang.org/api v0.62.0 h1:PhGymJMXfGBzc4lBRmrx9+1w4w2wEzURHNGF/sD/xGc=
|
||||
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1530,9 +1534,12 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc
|
|||
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 h1:DN5b3HU13J4sMd/QjDx34U6afpaexKTDdop+26pdjdk=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1565,6 +1572,7 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
|||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
|
|
39
lib/fs/fs.go
39
lib/fs/fs.go
|
@ -4,6 +4,8 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
@ -372,3 +374,40 @@ type freeSpaceEntry struct {
|
|||
updateTime uint64
|
||||
freeSpace uint64
|
||||
}
|
||||
|
||||
// ReadFileOrHTTP reads path either from local filesystem or from http if path starts with http or https.
|
||||
func ReadFileOrHTTP(path string) ([]byte, error) {
|
||||
if isHTTPURL(path) {
|
||||
// reads remote file via http or https, if url is given
|
||||
resp, err := http.Get(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch %q: %w", path, err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read %q: %s", path, err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read %q: %w", path, err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// GetFilepath returns full path to file for the given baseDir and path.
|
||||
func GetFilepath(baseDir, path string) string {
|
||||
if filepath.IsAbs(path) || isHTTPURL(path) {
|
||||
return path
|
||||
}
|
||||
return filepath.Join(baseDir, path)
|
||||
}
|
||||
|
||||
// isHTTPURL checks if a given targetURL is valid and contains a valid http scheme
|
||||
func isHTTPURL(targetURL string) bool {
|
||||
parsed, err := url.Parse(targetURL)
|
||||
return err == nil && (parsed.Scheme == "http" || parsed.Scheme == "https") && parsed.Host != ""
|
||||
|
||||
}
|
||||
|
|
|
@ -22,3 +22,18 @@ func TestIsTemporaryFileName(t *testing.T) {
|
|||
f("asdf.sdfds.tmp.dfd", false)
|
||||
f("dfd.sdfds.dfds.1232", false)
|
||||
}
|
||||
|
||||
func TestIsHTTPURLSuccess(t *testing.T) {
|
||||
f := func(s string, expected bool) {
|
||||
t.Helper()
|
||||
res := isHTTPURL(s)
|
||||
if res != expected {
|
||||
t.Fatalf("expecting %t, got %t", expected, res)
|
||||
}
|
||||
}
|
||||
f("http://isvalid:8000/filepath", true) // test http
|
||||
f("https://isvalid:8000/filepath", true) // test https
|
||||
f("tcp://notvalid:8000/filepath", false) // test tcp
|
||||
f("0/filepath", false) // something invalid
|
||||
f("filepath.extension", false) // something invalid
|
||||
}
|
||||
|
|
|
@ -84,8 +84,12 @@ func Serve(addr string, rh RequestHandler) {
|
|||
if *tlsEnable {
|
||||
scheme = "https"
|
||||
}
|
||||
logger.Infof("starting http server at %s://%s/", scheme, addr)
|
||||
logger.Infof("pprof handlers are exposed at %s://%s/debug/pprof/", scheme, addr)
|
||||
hostAddr := addr
|
||||
if strings.HasPrefix(hostAddr, ":") {
|
||||
hostAddr = "127.0.0.1" + hostAddr
|
||||
}
|
||||
logger.Infof("starting http server at %s://%s/", scheme, hostAddr)
|
||||
logger.Infof("pprof handlers are exposed at %s://%s/debug/pprof/", scheme, hostAddr)
|
||||
lnTmp, err := netutil.NewTCPListener(scheme, addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start http server at %s: %s", addr, err)
|
||||
|
|
|
@ -7,11 +7,11 @@ import (
|
|||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
|
@ -161,7 +161,7 @@ func newOAuth2ConfigInternal(baseDir string, o *OAuth2Config) (*oauth2ConfigInte
|
|||
},
|
||||
}
|
||||
if o.ClientSecretFile != "" {
|
||||
oi.clientSecretFile = getFilepath(baseDir, o.ClientSecretFile)
|
||||
oi.clientSecretFile = fs.GetFilepath(baseDir, o.ClientSecretFile)
|
||||
secret, err := readPasswordFromFile(oi.clientSecretFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read OAuth2 secret from %q: %w", oi.clientSecretFile, err)
|
||||
|
@ -304,7 +304,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
if az.Credentials != nil {
|
||||
return nil, fmt.Errorf("both `credentials`=%q and `credentials_file`=%q are set", az.Credentials, az.CredentialsFile)
|
||||
}
|
||||
filePath := getFilepath(baseDir, az.CredentialsFile)
|
||||
filePath := fs.GetFilepath(baseDir, az.CredentialsFile)
|
||||
getAuthHeader = func() string {
|
||||
token, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
|
@ -332,7 +332,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
if basicAuth.Password != nil {
|
||||
return nil, fmt.Errorf("both `password`=%q and `password_file`=%q are set in `basic_auth` section", basicAuth.Password, basicAuth.PasswordFile)
|
||||
}
|
||||
filePath := getFilepath(baseDir, basicAuth.PasswordFile)
|
||||
filePath := fs.GetFilepath(baseDir, basicAuth.PasswordFile)
|
||||
getAuthHeader = func() string {
|
||||
password, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
|
@ -362,7 +362,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
if bearerToken != "" {
|
||||
return nil, fmt.Errorf("both `bearer_token`=%q and `bearer_token_file`=%q are set", bearerToken, bearerTokenFile)
|
||||
}
|
||||
filePath := getFilepath(baseDir, bearerTokenFile)
|
||||
filePath := fs.GetFilepath(baseDir, bearerTokenFile)
|
||||
getAuthHeader = func() string {
|
||||
token, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
|
@ -416,8 +416,8 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
if tlsConfig.CertFile != "" || tlsConfig.KeyFile != "" {
|
||||
getTLSCert = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
// Re-read TLS certificate from disk. This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1420
|
||||
certPath := getFilepath(baseDir, tlsConfig.CertFile)
|
||||
keyPath := getFilepath(baseDir, tlsConfig.KeyFile)
|
||||
certPath := fs.GetFilepath(baseDir, tlsConfig.CertFile)
|
||||
keyPath := fs.GetFilepath(baseDir, tlsConfig.KeyFile)
|
||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tlsConfig.CertFile, tlsConfig.KeyFile, err)
|
||||
|
@ -431,8 +431,8 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
|||
tlsCertDigest = fmt.Sprintf("certFile=%q, keyFile=%q", tlsConfig.CertFile, tlsConfig.KeyFile)
|
||||
}
|
||||
if tlsConfig.CAFile != "" {
|
||||
path := getFilepath(baseDir, tlsConfig.CAFile)
|
||||
data, err := ioutil.ReadFile(path)
|
||||
path := fs.GetFilepath(baseDir, tlsConfig.CAFile)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", tlsConfig.CAFile, err)
|
||||
}
|
||||
|
|
|
@ -1,21 +1,14 @@
|
|||
package promauth
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
)
|
||||
|
||||
func getFilepath(baseDir, path string) string {
|
||||
if filepath.IsAbs(path) {
|
||||
return path
|
||||
}
|
||||
return filepath.Join(baseDir, path)
|
||||
}
|
||||
|
||||
func readPasswordFromFile(path string) (string, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -2,12 +2,12 @@ package promrelabel
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
|
@ -123,7 +123,7 @@ func (pcs *ParsedConfigs) String() string {
|
|||
|
||||
// LoadRelabelConfigs loads relabel configs from the given path.
|
||||
func LoadRelabelConfigs(path string, relabelDebug bool) (*ParsedConfigs, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %w", path, err)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package promscrape
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -15,6 +14,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
|
@ -227,7 +227,7 @@ type StaticConfig struct {
|
|||
}
|
||||
|
||||
func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `static_configs` from %q: %w", path, err)
|
||||
}
|
||||
|
@ -241,7 +241,7 @@ func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
|||
|
||||
// loadConfig loads Prometheus config from the given path.
|
||||
func loadConfig(path string) (*Config, []byte, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err)
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ func loadScrapeConfigFiles(baseDir string, scrapeConfigFiles []string) ([]Scrape
|
|||
var scrapeConfigs []ScrapeConfig
|
||||
var scsData []byte
|
||||
for _, filePath := range scrapeConfigFiles {
|
||||
filePath := getFilepath(baseDir, filePath)
|
||||
filePath := fs.GetFilepath(baseDir, filePath)
|
||||
paths := []string{filePath}
|
||||
if strings.Contains(filePath, "*") {
|
||||
ps, err := filepath.Glob(filePath)
|
||||
|
@ -268,7 +268,7 @@ func loadScrapeConfigFiles(baseDir string, scrapeConfigFiles []string) ([]Scrape
|
|||
paths = ps
|
||||
}
|
||||
for _, path := range paths {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot load %q: %w", path, err)
|
||||
}
|
||||
|
@ -877,7 +877,7 @@ func appendScrapeWorkForTargetLabels(dst []*ScrapeWork, swc *scrapeWorkConfig, t
|
|||
|
||||
func (sdc *FileSDConfig) appendScrapeWork(dst []*ScrapeWork, swsMapPrev map[string][]*ScrapeWork, baseDir string, swc *scrapeWorkConfig) []*ScrapeWork {
|
||||
for _, file := range sdc.Files {
|
||||
pathPattern := getFilepath(baseDir, file)
|
||||
pathPattern := fs.GetFilepath(baseDir, file)
|
||||
paths := []string{pathPattern}
|
||||
if strings.Contains(pathPattern, "*") {
|
||||
var err error
|
||||
|
@ -1201,13 +1201,6 @@ func mergeLabels(swc *scrapeWorkConfig, target string, extraLabels, metaLabels m
|
|||
return result
|
||||
}
|
||||
|
||||
func getFilepath(baseDir, path string) string {
|
||||
if filepath.IsAbs(path) {
|
||||
return path
|
||||
}
|
||||
return filepath.Join(baseDir, path)
|
||||
}
|
||||
|
||||
func addMissingPort(scheme, target string) string {
|
||||
if strings.Contains(target, ":") {
|
||||
return target
|
||||
|
|
|
@ -32,6 +32,7 @@ var (
|
|||
suppressDuplicateScrapeTargetErrors = flag.Bool("promscrape.suppressDuplicateScrapeTargetErrors", false, "Whether to suppress 'duplicate scrape target' errors; "+
|
||||
"see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details")
|
||||
promscrapeConfigFile = flag.String("promscrape.config", "", "Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. "+
|
||||
"The path can point to local file and to http url. "+
|
||||
"See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details")
|
||||
|
||||
fileSDCheckInterval = flag.Duration("promscrape.fileSDCheckInterval", 30*time.Second, "Interval for checking for changes in 'file_sd_config'. "+
|
||||
|
|
|
@ -424,7 +424,18 @@ const maxLabelNameLen = 256
|
|||
// The maximum length of label value.
|
||||
//
|
||||
// Longer values are truncated.
|
||||
const maxLabelValueLen = 16 * 1024
|
||||
var maxLabelValueLen = 16 * 1024
|
||||
|
||||
// SetMaxLabelValueLen sets the limit on the label value length.
|
||||
//
|
||||
// This function can be called before using the storage package.
|
||||
//
|
||||
// Label values with longer length are truncated.
|
||||
func SetMaxLabelValueLen(n int) {
|
||||
if n > 0 {
|
||||
maxLabelValueLen = n
|
||||
}
|
||||
}
|
||||
|
||||
// The maximum number of labels per each timeseries.
|
||||
var maxLabelsPerTimeseries = 30
|
||||
|
@ -432,12 +443,13 @@ var maxLabelsPerTimeseries = 30
|
|||
// SetMaxLabelsPerTimeseries sets the limit on the number of labels
|
||||
// per each time series.
|
||||
//
|
||||
// This function can be called before using the storage package.
|
||||
//
|
||||
// Superfluous labels are dropped.
|
||||
func SetMaxLabelsPerTimeseries(maxLabels int) {
|
||||
if maxLabels <= 0 {
|
||||
logger.Panicf("BUG: maxLabels must be positive; got %d", maxLabels)
|
||||
if maxLabels > 0 {
|
||||
maxLabelsPerTimeseries = maxLabels
|
||||
}
|
||||
maxLabelsPerTimeseries = maxLabels
|
||||
}
|
||||
|
||||
// MarshalMetricNameRaw marshals labels to dst and returns the result.
|
||||
|
|
|
@ -1228,9 +1228,52 @@ func (s *Storage) SearchTagValueSuffixes(tr TimeRange, tagKey, tagValuePrefix []
|
|||
|
||||
// SearchGraphitePaths returns all the matching paths for the given graphite query on the given tr.
|
||||
func (s *Storage) SearchGraphitePaths(tr TimeRange, query []byte, maxPaths int, deadline uint64) ([]string, error) {
|
||||
query = replaceAlternateRegexpsWithGraphiteWildcards(query)
|
||||
return s.searchGraphitePaths(tr, nil, query, maxPaths, deadline)
|
||||
}
|
||||
|
||||
// replaceAlternateRegexpsWithGraphiteWildcards replaces (foo|..|bar) with {foo,...,bar} in b and returns the new value.
|
||||
func replaceAlternateRegexpsWithGraphiteWildcards(b []byte) []byte {
|
||||
var dst []byte
|
||||
for {
|
||||
n := bytes.IndexByte(b, '(')
|
||||
if n < 0 {
|
||||
if len(dst) == 0 {
|
||||
// Fast path - b doesn't contain the openining brace.
|
||||
return b
|
||||
}
|
||||
dst = append(dst, b...)
|
||||
return dst
|
||||
}
|
||||
dst = append(dst, b[:n]...)
|
||||
b = b[n+1:]
|
||||
n = bytes.IndexByte(b, ')')
|
||||
if n < 0 {
|
||||
dst = append(dst, '(')
|
||||
dst = append(dst, b...)
|
||||
return dst
|
||||
}
|
||||
x := b[:n]
|
||||
b = b[n+1:]
|
||||
if string(x) == ".*" {
|
||||
dst = append(dst, '*')
|
||||
continue
|
||||
}
|
||||
dst = append(dst, '{')
|
||||
for len(x) > 0 {
|
||||
n = bytes.IndexByte(x, '|')
|
||||
if n < 0 {
|
||||
dst = append(dst, x...)
|
||||
break
|
||||
}
|
||||
dst = append(dst, x[:n]...)
|
||||
x = x[n+1:]
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
dst = append(dst, '}')
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) searchGraphitePaths(tr TimeRange, qHead, qTail []byte, maxPaths int, deadline uint64) ([]string, error) {
|
||||
n := bytes.IndexAny(qTail, "*[{")
|
||||
if n < 0 {
|
||||
|
|
|
@ -14,6 +14,24 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
||||
)
|
||||
|
||||
func TestReplaceAlternateRegexpsWithGraphiteWildcards(t *testing.T) {
|
||||
f := func(q, resultExpected string) {
|
||||
t.Helper()
|
||||
result := replaceAlternateRegexpsWithGraphiteWildcards([]byte(q))
|
||||
if string(result) != resultExpected {
|
||||
t.Fatalf("unexpected result for %s\ngot\n%s\nwant\n%s", q, result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("", "")
|
||||
f("foo", "foo")
|
||||
f("foo(bar", "foo(bar")
|
||||
f("foo.(bar|baz", "foo.(bar|baz")
|
||||
f("foo.(bar).x", "foo.{bar}.x")
|
||||
f("foo.(bar|baz).*.{x,y}", "foo.{bar,baz}.*.{x,y}")
|
||||
f("foo.(bar|baz).*.{x,y}(z|aa)", "foo.{bar,baz}.*.{x,y}{z,aa}")
|
||||
f("foo(.*)", "foo*")
|
||||
}
|
||||
|
||||
func TestGetRegexpForGraphiteNodeQuery(t *testing.T) {
|
||||
f := func(q, expectedRegexp string) {
|
||||
t.Helper()
|
||||
|
|
|
@ -25,6 +25,9 @@ type Cache struct {
|
|||
curr atomic.Value
|
||||
prev atomic.Value
|
||||
|
||||
// cs holds cache stats
|
||||
cs fastcache.Stats
|
||||
|
||||
// mode indicates whether to use only curr and skip prev.
|
||||
//
|
||||
// This flag is set to switching if curr is filled for more than 50% space.
|
||||
|
@ -39,9 +42,6 @@ type Cache struct {
|
|||
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
|
||||
// cs holds cache stats
|
||||
cs fastcache.Stats
|
||||
}
|
||||
|
||||
// Load loads the cache from filePath and limits its size to maxBytes
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue