Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2023-05-18 12:34:50 -07:00
commit 43f0baabcd
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
278 changed files with 6092 additions and 4129 deletions

View file

@ -282,7 +282,11 @@ http://<victoriametrics-addr>:8428
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
Then build graphs and dashboards for the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
Then build graphs and dashboards for the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/)
or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
Alternatively, use VictoriaMetrics [datasource plugin](https://github.com/VictoriaMetrics/grafana-datasource) with support of extra features.
See more in [description](https://github.com/VictoriaMetrics/grafana-datasource#victoriametrics-data-source-for-grafana).
## How to upgrade VictoriaMetrics
@ -305,10 +309,14 @@ Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](
VictoriaMetrics provides UI for query troubleshooting and exploration. The UI is available at `http://victoriametrics:8428/vmui`.
The UI allows exploring query results via graphs and tables. It also provides the following features:
- [metrics explorer](#metrics-explorer)
- [cardinality explorer](#cardinality-explorer)
- [query tracer](#query-tracing)
- [top queries explorer](#top-queries)
- Explore:
- [Metrics explorer](#metrics-explorer) - automatically builds graphs for selected metrics;
- [Cardinality explorer](#cardinality-explorer) - stats about existing metrics in TSDB;
- [Top queries](#top-queries) - shows most frequently executed queries;
- Tools:
- [Trace analyzer](#query-tracing) - playground for loading query traces in JSON format;
- [WITH expressions playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/expand-with-exprs) - test how WITH expressions work;
- [Metric relabel debugger](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/relabeling) - playground for [relabeling](#relabeling) configs.
VMUI automatically switches from graph view to heatmap view when the query returns [histogram](https://docs.victoriametrics.com/keyConcepts.html#histogram) buckets
(both [Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram)
@ -374,8 +382,8 @@ VictoriaMetrics provides an ability to explore time series cardinality at `Explo
may show lower than expected number of unique label values for labels with small number of unique values.
This is because of [implementation limits](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/5a6e617b5e41c9170e7c562aecd15ee0c901d489/app/vmselect/netstorage/netstorage.go#L1039-L1045).
By default cardinality explorer analyzes time series for the current date. It provides the ability to select different day at the top right corner.
By default all the time series for the selected date are analyzed. It is possible to narrow down the analysis to series
By default, cardinality explorer analyzes time series for the current date. It provides the ability to select different day at the top right corner.
By default, all the time series for the selected date are analyzed. It is possible to narrow down the analysis to series
matching the specified [series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors).
Cardinality explorer is built on top of [/api/v1/status/tsdb](#tsdb-stats).
@ -813,9 +821,11 @@ in [query APIs](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
in [export APIs](https://docs.victoriametrics.com/#how-to-export-time-series).
- Unix timestamps in seconds with optional milliseconds after the point. For example, `1562529662.678`.
- [RFC3339](https://www.ietf.org/rfc/rfc3339.txt). For example, '2022-03-29T01:02:03Z`.
- Partial RFC3339. Examples: `2022`, `2022-03`, `2022-03-29`, `2022-03-29T01`, `2022-03-29T01:02`.
- Relative duration comparing to the current time. For example, `1h5m` means `one hour and five minutes ago`.
- [RFC3339](https://www.ietf.org/rfc/rfc3339.txt). For example, `2022-03-29T01:02:03Z` or `2022-03-29T01:02:03+02:30`.
- Partial RFC3339. Examples: `2022`, `2022-03`, `2022-03-29`, `2022-03-29T01`, `2022-03-29T01:02`, `2022-03-29T01:02:03`.
The partial RFC3339 time is in UTC timezone by default. It is possible to specify timezone there by adding `+hh:mm` or `-hh:mm` suffix to partial time.
For example, `2022-03-01+06:30` is `2022-03-01` at `06:30` timezone.
- Relative duration comparing to the current time. For example, `1h5m`, `-1h5m` or `now-1h5m` means `one hour and five minutes ago`, while `now` means `now`.
## Graphite API usage
@ -852,7 +862,7 @@ VictoriaMetrics supports the following handlers from [Graphite Metrics API](http
VictoriaMetrics accepts the following additional query args at `/metrics/find` and `/metrics/expand`:
* `label` - for selecting arbitrary label values. By default `label=__name__`, i.e. metric names are selected.
* `label` - for selecting arbitrary label values. By default, `label=__name__`, i.e. metric names are selected.
* `delimiter` - for using different delimiters in metric name hierarchy. For example, `/metrics/find?delimiter=_&query=node_*` would return all the metric name prefixes
that start with `node_`. By default `delimiter=.`.
@ -978,7 +988,7 @@ Note that background merges may never occur for data from previous months, so st
In this case [forced merge](#forced-merge) may help freeing up storage space.
It is recommended verifying which metrics will be deleted with the call to `http://<victoria-metrics-addr>:8428/api/v1/series?match[]=<timeseries_selector_for_delete>`
before actually deleting the metrics. By default this query will only scan series in the past 5 minutes, so you may need to
before actually deleting the metrics. By default, this query will only scan series in the past 5 minutes, so you may need to
adjust `start` and `end` to a suitable range to achieve match hits.
The `/api/v1/admin/tsdb/delete_series` handler may be protected with `authKey` if `-deleteAuthKey` command-line flag is set.
@ -1344,7 +1354,8 @@ Example contents for `-relabelConfig` file:
VictoriaMetrics provides additional relabeling features such as Graphite-style relabeling.
See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
The relabeling can be debugged at `http://victoriametrics:8428/metric-relabel-debug` page.
The relabeling can be debugged at `http://victoriametrics:8428/metric-relabel-debug` page
or at our [public playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/relabeling).
See [these docs](https://docs.victoriametrics.com/vmagent.html#relabel-debug) for more details.
@ -1391,7 +1402,7 @@ See also [resource usage limits docs](#resource-usage-limits).
## Resource usage limits
By default VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
By default, VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query. Queries, which need more memory, are rejected. Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
@ -1511,8 +1522,8 @@ for fast block lookups, which belong to the given `TSID` and cover the given tim
Newly added `parts` either successfully appear in the storage or fail to appear.
The newly added `part` is atomically registered in the `parts.json` file under the corresponding partition
after it is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html) to the storage.
Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off
occurrs in the middle of writing the `part` to disk - such incompletely written `parts`
Thanks to this algorithm, storage never contains partially created parts, even if hardware power off
occurs in the middle of writing the `part` to disk - such incompletely written `parts`
are automatically deleted on the next VictoriaMetrics start.
The same applies to merge process — `parts` are either fully merged into a new `part` or fail to merge,
@ -1539,14 +1550,14 @@ Retention is configured with the `-retentionPeriod` command-line flag, which tak
Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
Data partitions outside the configured retention are deleted on the first day of the new month.
Each partition consists of one or more data parts. Data parts outside of the configured retention are eventually deleted during
Each partition consists of one or more data parts. Data parts outside the configured retention are eventually deleted during
[background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
The maximum disk space usage for a given `-retentionPeriod` is going to be (`-retentionPeriod` + 1) months.
For example, if `-retentionPeriod` is set to 1, data for January is deleted on March 1st.
Please note, the time range covered by data part is not limited by retention period unit. Hence, data part may contain data
for multiple days and will be deleted only when fully outside of the configured retention.
for multiple days and will be deleted only when fully outside the configured retention.
It is safe to extend `-retentionPeriod` on existing data. If `-retentionPeriod` is set to a lower
value than before, then data outside the configured period will be eventually deleted.
@ -1590,7 +1601,7 @@ For example, the following config sets 3 days retention for time series with `te
Important notes:
- The data outside of the configured retention isn't deleted instantly - it is deleted eventually during [background merges](https://docs.victoriametrics.com/#storage).
- The data outside the configured retention isn't deleted instantly - it is deleted eventually during [background merges](https://docs.victoriametrics.com/#storage).
- The `-retentionFilter` doesn't remove old data from `indexdb` (aka inverted index) until the configured [-retentionPeriod](#retention).
So the `indexdb` size can grow big under [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)
even for small retentions configured via `-retentionFilter`.
@ -2044,9 +2055,9 @@ Enterprise binaries can be downloaded and evaluated for free from [the releases
A single-node VictoriaMetrics is capable of proxying requests to [vmalert](https://docs.victoriametrics.com/vmalert.html)
when `-vmalert.proxyURL` flag is set. Use this feature for the following cases:
* for proxying requests from [Grafana Alerting UI](https://grafana.com/docs/grafana/latest/alerting/);
* for accessing vmalert's UI through single-node VictoriaMetrics Web interface.
* for accessing vmalerts UI through single-node VictoriaMetrics Web interface.
For accessing vmalert's UI through single-node VictoriaMetrics configure `-vmalert.proxyURL` flag and visit
For accessing vmalerts UI through single-node VictoriaMetrics configure `-vmalert.proxyURL` flag and visit
`http://<victoriametrics-addr>:8428/vmalert/` link.
## Benchmarks
@ -2194,7 +2205,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-deleteAuthKey string
authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries
-denyQueriesOutsideRetention
Whether to deny queries outside of the configured -retentionPeriod. When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. This may be useful when multiple data sources with distinct retentions are hidden behind query-tee
Whether to deny queries outside the configured -retentionPeriod. When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. This may be useful when multiple data sources with distinct retentions are hidden behind query-tee
-denyQueryTracing
Whether to disable the ability to trace queries. See https://docs.victoriametrics.com/#query-tracing
-downsampling.period array
@ -2203,7 +2214,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-dryRun
Whether to check config files without running VictoriaMetrics. The following config files are checked: -promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag
-enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
-envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string
@ -2219,7 +2230,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-forceMergeAuthKey string
authKey, which must be passed in query string to /internal/force_merge pages
-fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-graphiteListenAddr string
TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty. See also -graphiteListenAddr.useProxyProtocol
-graphiteListenAddr.useProxyProtocol
@ -2229,7 +2240,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-http.connTimeout duration
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
-http.disableResponseCompression
Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
-http.idleConnTimeout duration
Timeout for incoming idle http connections (default 1m0s)
-http.maxGracefulShutdownDuration duration
@ -2266,7 +2277,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-influxSkipMeasurement
Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'
-influxSkipSingleField
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metric name if InfluxDB line contains only a single field
-influxTrimTimestamp duration
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-inmemoryDataFlushInterval duration
@ -2274,7 +2285,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-insert.maxQueueDuration duration
The maximum duration to wait in the queue when -maxConcurrentInserts concurrent insert requests are executed (default 1m0s)
-internStringCacheExpireDuration duration
The expire duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
The expiry duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
-internStringDisableCache
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int
@ -2337,9 +2348,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.azureSDCheckInterval duration
Interval for checking for changes in Azure. This works only if azure_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#azure_sd_configs for details (default 1m0s)
-promscrape.cluster.memberNum string
The number of number in the cluster of scrapers. It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name (default "0")
The number of number in the cluster of scrapers. It must be a unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name (default "0")
-promscrape.cluster.membersCount int
The number of members in a cluster of scrapers. Each member must have an unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default cluster scraping is disabled, i.e. a single scraper scrapes all the targets
The number of members in a cluster of scrapers. Each member must have a unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default, cluster scraping is disabled, i.e. a single scraper scrapes all the targets
-promscrape.cluster.name string
Optional name of the cluster. If multiple vmagent clusters scrape the same targets, then each cluster must have unique name in order to properly de-duplicate samples received from these clusters. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679
-promscrape.cluster.replicationFactor int
@ -2351,7 +2362,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.config.strictParse
Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields (default true)
-promscrape.configCheckInterval duration
Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
Interval for checking for changes in '-promscrape.config' file. By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes
-promscrape.consul.waitTime duration
Wait time used by Consul service discovery. Default value is used if not set
-promscrape.consulSDCheckInterval duration
@ -2359,9 +2370,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.digitaloceanSDCheckInterval duration
Interval for checking for changes in digital ocean. This works only if digitalocean_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#digitalocean_sd_configs for details (default 1m0s)
-promscrape.disableCompression
Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control
-promscrape.disableKeepAlive
Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets
Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets
-promscrape.discovery.concurrency int
The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100)
-promscrape.discovery.concurrentWaitTime duration
@ -2412,7 +2423,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.seriesLimitPerTarget int
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
-promscrape.streamParse
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control
-promscrape.suppressDuplicateScrapeTargetErrors
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
-promscrape.suppressScrapeErrors
@ -2427,7 +2438,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-pushmetrics.interval duration
Interval for pushing metrics to -pushmetrics.url (default 10s)
-pushmetrics.url array
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
Supports an array of values separated by comma or specified via multiple flags.
-relabelConfig string
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
@ -2467,7 +2478,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.maxGraphiteSeries int
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage (default 300000)
-search.maxLookback duration
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaning due to historical reasons
-search.maxMemoryPerQuery size
The maximum amounts of memory a single query may consume. Queries requiring more memory are rejected. The total memory limit for concurrently executed queries can be estimated as -search.maxMemoryPerQuery multiplied by -search.maxConcurrentRequests . See also -search.logQueryMemoryUsage
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
@ -2491,7 +2502,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.maxSeriesPerAggrFunc int
The maximum number of time series an aggregate MetricsQL function can generate (default 1000000)
-search.maxStalenessInterval duration
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.setLookbackToStep' flag
The maximum interval for staleness calculations. By default, it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.setLookbackToStep' flag
-search.maxStatusRequestDuration duration
The maximum duration for /api/v1/status/* requests (default 5m0s)
-search.maxStepForPointsAdjustment duration
@ -2563,7 +2574,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-streamAggr.dedupInterval duration
Input samples are de-duplicated with this interval before being aggregated. Only the last sample per each time series per each interval is aggregated if the interval is greater than zero
-streamAggr.keepInput
Whether to keep input samples after the aggregation with -streamAggr.config. By default the input is dropped after the aggregation, so only the aggregate data is stored. See https://docs.victoriametrics.com/stream-aggregation.html
Whether to keep input samples after the aggregation with -streamAggr.config. By default, the input is dropped after the aggregation, so only the aggregate data is stored. See https://docs.victoriametrics.com/stream-aggregation.html
-tls
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
-tlsCertFile string

View file

@ -13,7 +13,7 @@ See [Quick Start](#quick-start) for details.
While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast
and RAM friendly to scrape metrics from Prometheus-compatible exporters into VictoriaMetrics.
Also, we found that our user's infrastructure are like snowflakes in that no two are alike. Therefore we decided to add more flexibility
Also, we found that our user's infrastructure are like snowflakes in that no two are alike. Therefore, we decided to add more flexibility
to `vmagent` such as the ability to [accept metrics via popular push protocols](#how-to-push-data-to-vmagent)
additionally to [discovering Prometheus-compatible targets and scraping metrics from them](#how-to-collect-metrics-in-prometheus-format).
@ -56,7 +56,7 @@ and sending the data to the Prometheus-compatible remote storage:
In this case `vmagent` ignores unsupported sections. See [the list of unsupported sections](#unsupported-prometheus-config-sections).
* `-remoteWrite.url` with Prometheus-compatible remote storage endpoint such as VictoriaMetrics.
Example command for writing the data recieved via [supported push-based protocols](#how-to-push-data-to-vmagent)
Example command for writing the data received via [supported push-based protocols](#how-to-push-data-to-vmagent)
to [single-node VictoriaMetrics](https://docs.victoriametrics.com/) located at `victoria-metrics-host:8428`:
```console
@ -151,7 +151,7 @@ to other remote storage systems, which support Prometheus `remote_write` protoco
`vmagent` replicates the collected metrics among multiple remote storage instances configured via `-remoteWrite.url` args.
If a single remote storage instance temporarily is out of service, then the collected data remains available in another remote storage instance.
`vmagent` buffers the collected data in files at `-remoteWrite.tmpDataPath` until the remote storage becomes available again
`vmagent` buffers the collected data in files at `-remoteWrite.tmpDataPath` until the remote storage becomes available again,
and then it sends the buffered data to the remote storage in order to prevent data gaps.
[VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) already supports replication,
@ -170,7 +170,7 @@ Please see [these docs](#relabeling) for details.
`vmagent` supports splitting the collected data between multiple destinations with the help of `-remoteWrite.urlRelabelConfig`,
which is applied independently for each configured `-remoteWrite.url` destination. For example, it is possible to replicate or split
data among long-term remote storage, short-term remote storage and a real-time analytical system [built on top of Kafka](https://github.com/Telefonica/prometheus-kafka-adapter).
Note that each destination can receive it's own subset of the collected data due to per-destination relabeling via `-remoteWrite.urlRelabelConfig`.
Note that each destination can receive its own subset of the collected data due to per-destination relabeling via `-remoteWrite.urlRelabelConfig`.
### Prometheus remote_write proxy
@ -182,7 +182,7 @@ Also, Basic Auth can be enabled for the incoming `remote_write` requests with `-
### remote_write for clustered version
While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets,
writes are always performed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html),
writes are always performed in Prometheus remote_write protocol. Therefore, for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html),
the `-remoteWrite.url` command-line flag should be configured as `<schema>://<vminsert-host>:8480/insert/<accountID>/prometheus/api/v1/write`
according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format).
There is also support for multitenant writes. See [these docs](#multitenancy).
@ -236,7 +236,7 @@ This allows using a single `vmagent` instance in front of multiple VictoriaMetri
If `-remoteWrite.multitenantURL` command-line flag is set and `vmagent` is configured to scrape Prometheus-compatible targets
(e.g. if `-promscrape.config` command-line flag is set) then `vmagent` reads tenantID from `__tenant_id__` label
for the discovered targets and routes all the metrics from this target to the given `__tenant_id__`,
e.g. to the url `<-remoteWrite.multitnenatURL>/insert/<__tenant_id__>/prometheus/api/v1/write`.
e.g. to the url `<-remoteWrite.multitenantURL>/insert/<__tenant_id__>/prometheus/api/v1/write`.
For example, the following relabeling rule instructs sending metrics to tenantID defined in the `prometheus.io/tenant` annotation of Kubernetes pod deployment:
@ -282,14 +282,14 @@ scrape_configs:
- "My-Auth: TopSecret"
```
* `disable_compression: true` for disabling response compression on a per-job basis. By default `vmagent` requests compressed responses
* `disable_compression: true` for disabling response compression on a per-job basis. By default, `vmagent` requests compressed responses
from scrape targets for saving network bandwidth.
* `disable_keepalive: true` for disabling [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection)
on a per-job basis. By default `vmagent` uses keep-alive connections to scrape targets for reducing overhead on connection re-establishing.
on a per-job basis. By default, `vmagent` uses keep-alive connections to scrape targets for reducing overhead on connection re-establishing.
* `series_limit: N` for limiting the number of unique time series a single scrape target can expose. See [these docs](#cardinality-limiter).
* `stream_parse: true` for scraping targets in a streaming manner. This may be useful when targets export big number of metrics. See [these docs](#stream-parsing-mode).
* `scrape_align_interval: duration` for aligning scrapes to the given interval instead of using random offset
in the range `[0 ... scrape_interval]` for scraping each target. The random offset helps spreading scrapes evenly in time.
in the range `[0 ... scrape_interval]` for scraping each target. The random offset helps to spread scrapes evenly in time.
* `scrape_offset: duration` for specifying the exact offset for scraping instead of using random offset in the range `[0 ... scrape_interval]`.
See [scrape_configs docs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) for more details on all the supported options.
@ -338,7 +338,7 @@ There is no need in specifying top-level `scrape_configs` section in these files
The list of supported service discovery types is available [here](#how-to-collect-metrics-in-prometheus-format).
Additionally `vmagent` doesn't support `refresh_interval` option at service discovery sections.
Additionally, `vmagent` doesn't support `refresh_interval` option at service discovery sections.
This option is substituted with `-promscrape.*CheckInterval` command-line options, which are specific per each service discovery type.
See [the full list of command-line flags for vmagent](#advanced-usage).
@ -460,7 +460,7 @@ VictoriaMetrics components support [Prometheus-compatible relabeling](https://pr
with [additional enhancements](#relabeling-enhancements). The relabeling can be defined in the following places processed by `vmagent`:
* At the `scrape_config -> relabel_configs` section in `-promscrape.config` file.
This relabeling is used for modifying labels in discovered targets and for dropping unneded targets.
This relabeling is used for modifying labels in discovered targets and for dropping unneeded targets.
See [relabeling cookbook](https://docs.victoriametrics.com/relabeling.html) for details.
This relabeling can be debugged by clicking the `debug` link at the corresponding target on the `http://vmagent:8429/targets` page
@ -547,7 +547,7 @@ The following articles contain useful information about Prometheus relabeling:
* VictoriaMetrics provides the following additional relabeling actions on top of standard actions
from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement`
* `replace_all` replaces all the occurrences of `regex` in the values of `source_labels` with the `replacement`
and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences
of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
@ -559,7 +559,7 @@ The following articles contain useful information about Prometheus relabeling:
replacement: "_"
```
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`.
* `labelmap_all` replaces all the occurrences of `regex` in all the label names with the `replacement`.
For example, the following relabeling config replaces all the occurrences of `-` char in all the label names
with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
@ -679,7 +679,7 @@ e.g. it sets `scrape_series_added` metric to zero. See [these docs](#automatical
## Stream parsing mode
By default `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling)
By default, `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling)
and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases
when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory
when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode.
@ -707,8 +707,8 @@ scrape_configs:
stream_parse: true
static_configs:
- targets:
- big-prometeus1
- big-prometeus2
- big-prometheus1
- big-prometheus2
honor_labels: true
metrics_path: /federate
params:
@ -716,7 +716,7 @@ scrape_configs:
```
Note that `vmagent` in stream parsing mode stores up to `sample_limit` samples to the configured `-remoteStorage.url`
instead of droping all the samples read from the target, because the parsed data is sent to the remote storage
instead of dropping all the samples read from the target, because the parsed data is sent to the remote storage
as soon as it is parsed in stream parsing mode.
## Scraping big number of targets
@ -736,7 +736,7 @@ spread scrape targets among a cluster of two `vmagent` instances:
The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes.
The pod name must end with a number in the range `0 ... promscrape.cluster.memberNum-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`.
By default each scrape target is scraped only by a single `vmagent` instance in the cluster. If there is a need for replicating scrape targets among multiple `vmagent` instances,
By default, each scrape target is scraped only by a single `vmagent` instance in the cluster. If there is a need for replicating scrape targets among multiple `vmagent` instances,
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
start a cluster of three `vmagent` instances, where each target is scraped by two `vmagent` instances:
@ -802,7 +802,7 @@ scrape_configs:
## Cardinality limiter
By default `vmagent` doesn't limit the number of time series each scrape target can expose.
By default, `vmagent` doesn't limit the number of time series each scrape target can expose.
The limit can be enforced in the following places:
* Via `-promscrape.seriesLimitPerTarget` command-line option. This limit is applied individually
@ -830,7 +830,7 @@ These metrics allow building the following alerting rules:
See also `sample_limit` option at [scrape_config section](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
By default `vmagent` doesn't limit the number of time series written to remote storage systems specified at `-remoteWrite.url`.
By default, `vmagent` doesn't limit the number of time series written to remote storage systems specified at `-remoteWrite.url`.
The limit can be enforced by setting the following command-line flags:
* `-remoteWrite.maxHourlySeries` - limits the number of unique time series `vmagent` can write to remote storage systems during the last hour.
@ -873,7 +873,7 @@ If you have suggestions for improvements or have found a bug - please open an is
* `http://vmagent-host:8429/api/v1/targets`. This handler returns JSON response
compatible with [the corresponding page from Prometheus API](https://prometheus.io/docs/prometheus/latest/querying/api/#targets).
* `http://vmagent-host:8429/ready`. This handler returns http 200 status code when `vmagent` finishes
it's initialization for all the [service_discovery configs](https://docs.victoriametrics.com/sd_configs.html).
its initialization for all the [service_discovery configs](https://docs.victoriametrics.com/sd_configs.html).
It may be useful to perform `vmagent` rolling update without any scrape loss.
## Troubleshooting
@ -901,9 +901,9 @@ If you have suggestions for improvements or have found a bug - please open an is
* The `/service-discovery` page could be useful for debugging relabeling process for scrape targets.
This page contains original labels for targets dropped during relabeling.
By default the `-promscrape.maxDroppedTargets` targets are shown here. If your setup drops more targets during relabeling,
By default, the `-promscrape.maxDroppedTargets` targets are shown here. If your setup drops more targets during relabeling,
then increase `-promscrape.maxDroppedTargets` command-line flag value to see all the dropped targets.
Note that tracking each dropped target requires up to 10Kb of RAM. Therefore big values for `-promscrape.maxDroppedTargets`
Note that tracking each dropped target requires up to 10Kb of RAM. Therefore, big values for `-promscrape.maxDroppedTargets`
may result in increased memory usage if a big number of scrape targets are dropped during relabeling.
* We recommend you increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported
@ -913,7 +913,7 @@ If you have suggestions for improvements or have found a bug - please open an is
* If you see gaps in the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set,
try increasing `-remoteWrite.queues`. Such gaps may appear because `vmagent` cannot keep up with sending the collected data to remote storage.
Therefore it starts dropping the buffered data if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
Therefore, it starts dropping the buffered data if the on-disk buffer size exceeds `-remoteWrite.maxDiskUsagePerURL`.
* `vmagent` drops data blocks if remote storage replies with `400 Bad Request` and `409 Conflict` HTTP responses.
The number of dropped blocks can be monitored via `vmagent_remotewrite_packets_dropped_total` metric exported at [/metrics page](#monitoring).
@ -979,7 +979,7 @@ See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting
* [Writing metrics to Kafka](#writing-metrics-to-kafka)
The enterprise version of vmagent is available for evaluation at [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page
in `vmutils-...-enteprise.tar.gz` archives and in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
in `vmutils-...-enterprise.tar.gz` archives and in [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
### Reading metrics from Kafka
@ -1027,7 +1027,7 @@ data_format = "influx"
These command-line flags are available only in [enterprise](https://docs.victoriametrics.com/enterprise.html) version of `vmagent`,
which can be downloaded for evaluation from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page
(see `vmutils-...-enteprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
(see `vmutils-...-enterprise.tar.gz` archives) and from [docker images](https://hub.docker.com/r/victoriametrics/vmagent/tags) with tags containing `enterprise` suffix.
```
-kafka.consumer.topic array
@ -1163,7 +1163,7 @@ It is safe sharing the collected profiles from security point of view, since the
## Advanced usage
`vmagent` can be fine-tuned with various command-line flags. Run `./vmagent -help` in order to see the full list of these flags with their desciptions and default values:
`vmagent` can be fine-tuned with various command-line flags. Run `./vmagent -help` in order to see the full list of these flags with their descriptions and default values:
```
./vmagent -help
@ -1188,7 +1188,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-dryRun
Whether to check config files without running vmagent. The following files are checked: -promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig, -remoteWrite.streamAggr.config . Unknown config entries aren't allowed in -promscrape.config by default. This can be changed by passing -promscrape.config.strictParse=false command-line flag
-enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
-envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string
@ -1198,7 +1198,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-graphiteListenAddr string
TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty. See also -graphiteListenAddr.useProxyProtocol
-graphiteListenAddr.useProxyProtocol
@ -1208,7 +1208,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-http.connTimeout duration
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
-http.disableResponseCompression
Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
-http.idleConnTimeout duration
Timeout for incoming idle http connections (default 1m0s)
-http.maxGracefulShutdownDuration duration
@ -1245,13 +1245,13 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-influxSkipMeasurement
Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'
-influxSkipSingleField
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metric name if InfluxDB line contains only a single field
-influxTrimTimestamp duration
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-insert.maxQueueDuration duration
The maximum duration to wait in the queue when -maxConcurrentInserts concurrent insert requests are executed (default 1m0s)
-internStringCacheExpireDuration duration
The expire duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
The expiry duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
-internStringDisableCache
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int
@ -1332,9 +1332,9 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-promscrape.azureSDCheckInterval duration
Interval for checking for changes in Azure. This works only if azure_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#azure_sd_configs for details (default 1m0s)
-promscrape.cluster.memberNum string
The number of number in the cluster of scrapers. It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name (default "0")
The number of number in the cluster of scrapers. It must be a unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name (default "0")
-promscrape.cluster.membersCount int
The number of members in a cluster of scrapers. Each member must have an unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default cluster scraping is disabled, i.e. a single scraper scrapes all the targets
The number of members in a cluster of scrapers. Each member must have a unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default, cluster scraping is disabled, i.e. a single scraper scrapes all the targets
-promscrape.cluster.name string
Optional name of the cluster. If multiple vmagent clusters scrape the same targets, then each cluster must have unique name in order to properly de-duplicate samples received from these clusters. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679
-promscrape.cluster.replicationFactor int
@ -1346,7 +1346,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-promscrape.config.strictParse
Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields (default true)
-promscrape.configCheckInterval duration
Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
Interval for checking for changes in '-promscrape.config' file. By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes
-promscrape.consul.waitTime duration
Wait time used by Consul service discovery. Default value is used if not set
-promscrape.consulSDCheckInterval duration
@ -1356,9 +1356,9 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-promscrape.digitaloceanSDCheckInterval duration
Interval for checking for changes in digital ocean. This works only if digitalocean_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#digitalocean_sd_configs for details (default 1m0s)
-promscrape.disableCompression
Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control
-promscrape.disableKeepAlive
Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets
Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets
-promscrape.discovery.concurrency int
The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100)
-promscrape.discovery.concurrentWaitTime duration
@ -1409,7 +1409,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-promscrape.seriesLimitPerTarget int
Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info
-promscrape.streamParse
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control
-promscrape.suppressDuplicateScrapeTargetErrors
Whether to suppress 'duplicate scrape target' errors; see https://docs.victoriametrics.com/vmagent.html#troubleshooting for details
-promscrape.suppressScrapeErrors
@ -1424,7 +1424,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-pushmetrics.interval duration
Interval for pushing metrics to -pushmetrics.url (default 10s)
-pushmetrics.url array
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
Supports an array of values separated by comma or specified via multiple flags.
-remoteWrite.aws.accessKey array
Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set
@ -1516,14 +1516,14 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-remoteWrite.queues int
The number of concurrent queues to each -remoteWrite.url. Set more queues if default number of queues isn't enough for sending high volume of collected data to remote storage. Default value is 2 * numberOfAvailableCPUs (default 8)
-remoteWrite.rateLimit array
Optional rate limit in bytes per second for data sent to the corresponding -remoteWrite.url. By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data is sent after temporary unavailability of the remote storage
Optional rate limit in bytes per second for data sent to the corresponding -remoteWrite.url. By default, the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data is sent after temporary unavailability of the remote storage
Supports array of values separated by comma or specified via multiple flags.
-remoteWrite.relabelConfig string
Optional path to file with relabeling configs, which are applied to all the metrics before sending them to -remoteWrite.url. See also -remoteWrite.urlRelabelConfig. The path can point either to local file or to http url. See https://docs.victoriametrics.com/vmagent.html#relabeling
-remoteWrite.keepDanglingQueues
Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. Useful when -remoteWrite.url is changed temporarily and persistent queue files will be needed later on.
-remoteWrite.roundDigits array
Round metric values to this number of decimal digits after the point before writing them to remote storage. Examples: -remoteWrite.roundDigits=2 would round 1.236 to 1.24, while -remoteWrite.roundDigits=-1 would round 126.78 to 130. By default digits rounding is disabled. Set it to 100 for disabling it for a particular remote storage. This option may be used for improving data compression for the stored metrics
Round metric values to this number of decimal digits after the point before writing them to remote storage. Examples: -remoteWrite.roundDigits=2 would round 1.236 to 1.24, while -remoteWrite.roundDigits=-1 would round 126.78 to 130. By default, digits rounding is disabled. Set it to 100 for disabling it for a particular remote storage. This option may be used for improving data compression for the stored metrics
Supports array of values separated by comma or specified via multiple flags.
-remoteWrite.sendTimeout array
Timeout for sending a single block of data to the corresponding -remoteWrite.url
@ -1540,10 +1540,10 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
Input samples are de-duplicated with this interval before being aggregated. Only the last sample per each time series per each interval is aggregated if the interval is greater than zero
Supports array of values separated by comma or specified via multiple flags.
-remoteWrite.streamAggr.keepInput array
Whether to keep input samples after the aggregation with -remoteWrite.streamAggr.config. By default the input is dropped after the aggregation, so only the aggregate data is sent to the -remoteWrite.url. See https://docs.victoriametrics.com/stream-aggregation.html
Whether to keep input samples after the aggregation with -remoteWrite.streamAggr.config. By default, the input is dropped after the aggregation, so only the aggregate data is sent to the -remoteWrite.url. See https://docs.victoriametrics.com/stream-aggregation.html
Supports array of values separated by comma or specified via multiple flags.
-remoteWrite.tlsCAFile array
Optional path to TLS CA file to use for verifying connections to the corresponding -remoteWrite.url. By default system CA is used
Optional path to TLS CA file to use for verifying connections to the corresponding -remoteWrite.url. By default, system CA is used
Supports an array of values separated by comma or specified via multiple flags.
-remoteWrite.tlsCertFile array
Optional path to client-side TLS certificate file to use when connecting to the corresponding -remoteWrite.url
@ -1555,7 +1555,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
Optional path to client-side TLS certificate key to use when connecting to the corresponding -remoteWrite.url
Supports an array of values separated by comma or specified via multiple flags.
-remoteWrite.tlsServerName array
Optional TLS server name to use for connections to the corresponding -remoteWrite.url. By default the server name from -remoteWrite.url is used
Optional TLS server name to use for connections to the corresponding -remoteWrite.url. By default, the server name from -remoteWrite.url is used
Supports an array of values separated by comma or specified via multiple flags.
-remoteWrite.tmpDataPath string
Path to directory where temporary data for remote write component is stored. See also -remoteWrite.maxDiskUsagePerURL (default "vmagent-remotewrite-data")

View file

@ -22,7 +22,7 @@ import (
var (
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol")
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field")
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metric name if InfluxDB line contains only a single field")
skipMeasurement = flag.Bool("influxSkipMeasurement", false, "Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'")
dbLabel = flag.String("influxDBLabel", "db", "Default label for the DB name sent over '?db={db_name}' query parameter")
)

View file

@ -27,7 +27,7 @@ var (
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol")
rateLimit = flagutil.NewArrayInt("remoteWrite.rateLimit", "Optional rate limit in bytes per second for data sent to the corresponding -remoteWrite.url. "+
"By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+
"By default, the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+
"is sent after temporary unavailability of the remote storage")
sendTimeout = flagutil.NewArrayDuration("remoteWrite.sendTimeout", "Timeout for sending a single block of data to the corresponding -remoteWrite.url")
proxyURL = flagutil.NewArrayString("remoteWrite.proxyURL", "Optional proxy URL for writing data to the corresponding -remoteWrite.url. "+
@ -38,9 +38,9 @@ var (
"to the corresponding -remoteWrite.url")
tlsKeyFile = flagutil.NewArrayString("remoteWrite.tlsKeyFile", "Optional path to client-side TLS certificate key to use when connecting to the corresponding -remoteWrite.url")
tlsCAFile = flagutil.NewArrayString("remoteWrite.tlsCAFile", "Optional path to TLS CA file to use for verifying connections to the corresponding -remoteWrite.url. "+
"By default system CA is used")
"By default, system CA is used")
tlsServerName = flagutil.NewArrayString("remoteWrite.tlsServerName", "Optional TLS server name to use for connections to the corresponding -remoteWrite.url. "+
"By default the server name from -remoteWrite.url is used")
"By default, the server name from -remoteWrite.url is used")
headers = flagutil.NewArrayString("remoteWrite.headers", "Optional HTTP headers to send with each request to the corresponding -remoteWrite.url. "+
"For example, -remoteWrite.headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding -remoteWrite.url. "+

View file

@ -54,7 +54,7 @@ var (
"This option may be used for improving data compression for the stored metrics. See also -remoteWrite.roundDigits")
roundDigits = flagutil.NewArrayInt("remoteWrite.roundDigits", "Round metric values to this number of decimal digits after the point before writing them to remote storage. "+
"Examples: -remoteWrite.roundDigits=2 would round 1.236 to 1.24, while -remoteWrite.roundDigits=-1 would round 126.78 to 130. "+
"By default digits rounding is disabled. Set it to 100 for disabling it for a particular remote storage. "+
"By default, digits rounding is disabled. Set it to 100 for disabling it for a particular remote storage. "+
"This option may be used for improving data compression for the stored metrics")
sortLabels = flag.Bool("sortLabels", false, `Whether to sort labels for incoming samples before writing them to all the configured remote storage systems. `+
`This may be needed for reducing memory usage at remote storage when the order of labels in incoming samples is random. `+
@ -69,7 +69,7 @@ var (
"See https://docs.victoriametrics.com/stream-aggregation.html . "+
"See also -remoteWrite.streamAggr.keepInput and -remoteWrite.streamAggr.dedupInterval")
streamAggrKeepInput = flagutil.NewArrayBool("remoteWrite.streamAggr.keepInput", "Whether to keep input samples after the aggregation with -remoteWrite.streamAggr.config. "+
"By default the input is dropped after the aggregation, so only the aggregate data is sent to the -remoteWrite.url. "+
"By default, the input is dropped after the aggregation, so only the aggregate data is sent to the -remoteWrite.url. "+
"See https://docs.victoriametrics.com/stream-aggregation.html")
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", "Input samples are de-duplicated with this interval before being aggregated. "+
"Only the last sample per each time series per each interval is aggregated if the interval is greater than zero")

View file

@ -14,7 +14,7 @@ or [cluster version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.ht
of VictoriaMetrics are capable of proxying requests to vmalert via `-vmalert.proxyURL` command-line flag.
Use this feature for the following cases:
* for proxying requests from [Grafana Alerting UI](https://grafana.com/docs/grafana/latest/alerting/);
* for accessing vmalert's UI through VictoriaMetrics Web interface.
* for accessing vmalerts UI through VictoriaMetrics Web interface.
## Features
@ -23,7 +23,7 @@ Use this feature for the following cases:
support and expressions validation;
* Prometheus [alerting rules definition format](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/#defining-alerting-rules)
support;
* Integration with [Alertmanager](https://github.com/prometheus/alertmanager) starting from [Alertmanager v0.16.0-aplha](https://github.com/prometheus/alertmanager/releases/tag/v0.16.0-alpha.0);
* Integration with [Alertmanager](https://github.com/prometheus/alertmanager) starting from [Alertmanager v0.16.0-alpha](https://github.com/prometheus/alertmanager/releases/tag/v0.16.0-alpha.0);
* Keeps the alerts [state on restarts](#alerts-state-on-restarts);
* Graphite datasource can be used for alerting and recording rules. See [these docs](#graphite);
* Recording and Alerting rules backfilling (aka `replay`). See [these docs](#rules-backfilling);
@ -271,7 +271,7 @@ Additionally, `vmalert` provides some extra templating functions listed [here](#
query at `-datasource.url` and returns the first result.
- `queryEscape` - escapes the input string, so it can be safely put inside [query arg](https://en.wikipedia.org/wiki/Percent-encoding) part of URL.
- `quotesEscape` - escapes the input string, so it can be safely embedded into JSON string.
- `reReplaceAll regex repl` - replaces all the occurences of the `regex` in input string with the `repl`.
- `reReplaceAll regex repl` - replaces all the occurrences of the `regex` in input string with the `repl`.
- `safeHtml` - marks the input string as safe to use in HTML context without the need to html-escape it.
- `sortByLabel name` - sorts the input query results by the label with the given `name`.
- `stripDomain` - leaves the first part of the domain. For example, `foo.bar.baz` is converted to `foo`.
@ -483,7 +483,7 @@ Cluster mode could have multiple `vminsert` and `vmselect` components.
<img alt="vmalert cluster" src="vmalert_cluster.png">
In case when you want to spread the load on these components - add balancers before them and configure
`vmalert` with balancer's addresses. Please, see more about VM's cluster architecture
`vmalert` with balancer addresses. Please, see more about VM's cluster architecture
[here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#architecture-overview).
#### HA vmalert
@ -508,7 +508,7 @@ Alertmanagers.
To avoid recording rules results and alerts state duplication in VictoriaMetrics server
don't forget to configure [deduplication](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#deduplication).
The recommended value for `-dedup.minScrapeInterval` must be greater or equal to vmalert's `evaluation_interval`.
The recommended value for `-dedup.minScrapeInterval` must be greater or equal to vmalert `evaluation_interval`.
If you observe inconsistent or "jumping" values in series produced by vmalert, try disabling `-datasource.queryTimeAlignment`
command line flag. Because of alignment, two or more vmalert HA pairs will produce results with the same timestamps.
But due of backfilling (data delivered to the datasource with some delay) values of such results may differ,
@ -518,7 +518,7 @@ Alertmanager will automatically deduplicate alerts with identical labels, so ens
all `vmalert`s are having the same config.
Don't forget to configure [cluster mode](https://prometheus.io/docs/alerting/latest/alertmanager/)
for Alertmanagers for better reliability. List all Alertmanager URLs in vmalert's `-notifier.url`
for Alertmanagers for better reliability. List all Alertmanager URLs in vmalert `-notifier.url`
to ensure [high availability](https://github.com/prometheus/alertmanager#high-availability).
This example uses single-node VM server for the sake of simplicity.
@ -610,7 +610,7 @@ or time series modification via [relabeling](https://docs.victoriametrics.com/vm
`vmalert` web UI can be accessed from [single-node version of VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
and from [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
This may be used for better integraion with Grafana unified alerting system. See the following docs for details:
This may be used for better integration with Grafana unified alerting system. See the following docs for details:
* [How to query vmalert from single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmalert)
* [How to query vmalert from VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#vmalert)
@ -762,11 +762,11 @@ Try the following recommendations to reduce the chance of hitting the data delay
[time series resolution](https://docs.victoriametrics.com/keyConcepts.html#time-series-resolution). For example,
if expression is `rate(my_metric[2m]) > 0` then ensure that `my_metric` resolution is at least `1m` or better `30s`.
If you use VictoriaMetrics as datasource, `[duration]` can be omitted and VictoriaMetrics will adjust it automatically.
* If you know in advance, that data in datasource is delayed - try changing vmalert's `-datasource.lookback`
* If you know in advance, that data in datasource is delayed - try changing vmalerts `-datasource.lookback`
command-line flag to add a time shift for evaluations. Or extend `[duration]` to tolerate the delay.
For example, `max_over_time(errors_total[10m]) > 0` will be active even if there is no data in datasource for last `9m`.
* If [time series resolution](https://docs.victoriametrics.com/keyConcepts.html#time-series-resolution)
in datasource is inconsistent or `>=5min` - try changing vmalert's `-datasource.queryStep` command-line flag to specify
in datasource is inconsistent or `>=5min` - try changing vmalerts `-datasource.queryStep` command-line flag to specify
how far search query can lookback for the recent datapoint. The recommendation is to have the step
at least two times bigger than the resolution.
@ -818,8 +818,8 @@ and vmalert will start printing additional log messages:
vmalert can detect if alert's expression doesn't match any time series in runtime. This problem usually happens
when alerting expression selects time series which aren't present in the datasource (i.e. wrong `job` label)
or there is a typo in the series selector (i.e. `env=rpod`). Such alerting rules will be marked with special icon in
vmalert's UI and exposed via `vmalert_alerting_rules_last_evaluation_series_fetched` metric. The metric's value will
show how many time series were matched before the filtering by rule's expression. If metric's value is `-1`, then
vmalerts UI and exposed via `vmalert_alerting_rules_last_evaluation_series_fetched` metric. The metric value will
show how many time series were matched before the filtering by rule's expression. If metric value is `-1`, then
this feature is not supported by the datasource (old versions of VictoriaMetrics). The following expression can be
used to detect rules matching no series:
```
@ -872,7 +872,7 @@ The shortlist of configuration flags is the following:
-clusterMode
If clusterMode is enabled, then vmalert automatically adds the tenant specified in config groups to -datasource.url, -remoteWrite.url and -remoteRead.url. See https://docs.victoriametrics.com/vmalert.html#multitenancy . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-configCheckInterval duration
Interval for checking for changes in '-rule' or '-notifier.config' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes.
Interval for checking for changes in '-rule' or '-notifier.config' files. By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes.
-datasource.appendTypePrefix
Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.
-datasource.basicAuth.password string
@ -906,7 +906,7 @@ The shortlist of configuration flags is the following:
-datasource.queryStep duration
How far a value can fallback to when evaluating queries. For example, if -datasource.queryStep=15s then param "step" with value "15s" will be added to every query. If set to 0, rule's evaluation interval will be used instead. (default 5m0s)
-datasource.queryTimeAlignment
Whether to align "time" parameter with evaluation interval.Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257 (default true)
Whether to align "time" parameter with evaluation interval.Alignment supposed to produce deterministic results despite number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257 (default true)
-datasource.roundDigits int
Adds "round_digits" GET param to datasource requests. In VM "round_digits" limits the number of digits after the decimal point in response values.
-datasource.showURL
@ -932,7 +932,7 @@ The shortlist of configuration flags is the following:
-dryRun
Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.
-enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
-envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string
@ -951,11 +951,11 @@ The shortlist of configuration flags is the following:
-flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-http.connTimeout duration
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
-http.disableResponseCompression
Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
-http.idleConnTimeout duration
Timeout for incoming idle http connections (default 1m0s)
-http.maxGracefulShutdownDuration duration
@ -1036,7 +1036,7 @@ The shortlist of configuration flags is the following:
-notifier.suppressDuplicateTargetErrors
Whether to suppress 'duplicate target' errors during discovery
-notifier.tlsCAFile array
Optional path to TLS CA file to use for verifying connections to -notifier.url. By default system CA is used
Optional path to TLS CA file to use for verifying connections to -notifier.url. By default, system CA is used
Supports an array of values separated by comma or specified via multiple flags.
-notifier.tlsCertFile array
Optional path to client-side TLS certificate file to use when connecting to -notifier.url
@ -1048,7 +1048,7 @@ The shortlist of configuration flags is the following:
Optional path to client-side TLS certificate key to use when connecting to -notifier.url
Supports an array of values separated by comma or specified via multiple flags.
-notifier.tlsServerName array
Optional TLS server name to use for connections to -notifier.url. By default the server name from -notifier.url is used
Optional TLS server name to use for connections to -notifier.url. By default, the server name from -notifier.url is used
Supports an array of values separated by comma or specified via multiple flags.
-notifier.url array
Prometheus Alertmanager URL, e.g. http://127.0.0.1:9093. List all Alertmanager URLs if it runs in the cluster mode to ensure high availability.
@ -1071,7 +1071,7 @@ The shortlist of configuration flags is the following:
-pushmetrics.interval duration
Interval for pushing metrics to -pushmetrics.url (default 10s)
-pushmetrics.url array
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
Supports an array of values separated by comma or specified via multiple flags.
-remoteRead.basicAuth.password string
Optional basic auth password for -remoteRead.url
@ -1104,7 +1104,7 @@ The shortlist of configuration flags is the following:
-remoteRead.showURL
Whether to show -remoteRead.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
-remoteRead.tlsCAFile string
Optional path to TLS CA file to use for verifying connections to -remoteRead.url. By default system CA is used
Optional path to TLS CA file to use for verifying connections to -remoteRead.url. By default, system CA is used
-remoteRead.tlsCertFile string
Optional path to client-side TLS certificate file to use when connecting to -remoteRead.url
-remoteRead.tlsInsecureSkipVerify
@ -1112,7 +1112,7 @@ The shortlist of configuration flags is the following:
-remoteRead.tlsKeyFile string
Optional path to client-side TLS certificate key to use when connecting to -remoteRead.url
-remoteRead.tlsServerName string
Optional TLS server name to use for connections to -remoteRead.url. By default the server name from -remoteRead.url is used
Optional TLS server name to use for connections to -remoteRead.url. By default, the server name from -remoteRead.url is used
-remoteRead.url vmalert
Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect.Remote read is used to restore alerts state.This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.
-remoteWrite.basicAuth.password string
@ -1134,7 +1134,7 @@ The shortlist of configuration flags is the following:
-remoteWrite.headers string
Optional HTTP headers to send with each request to the corresponding -remoteWrite.url. For example, -remoteWrite.headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding -remoteWrite.url. Multiple headers must be delimited by '^^': -remoteWrite.headers='header1:value1^^header2:value2'
-remoteWrite.maxBatchSize int
Defines defines max number of timeseries to be flushed at once (default 1000)
Defines max number of timeseries to be flushed at once (default 1000)
-remoteWrite.maxQueueSize int
Defines the max number of pending datapoints to remote write endpoint (default 100000)
-remoteWrite.oauth2.clientID string
@ -1152,7 +1152,7 @@ The shortlist of configuration flags is the following:
-remoteWrite.showURL
Whether to show -remoteWrite.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
-remoteWrite.tlsCAFile string
Optional path to TLS CA file to use for verifying connections to -remoteWrite.url. By default system CA is used
Optional path to TLS CA file to use for verifying connections to -remoteWrite.url. By default, system CA is used
-remoteWrite.tlsCertFile string
Optional path to client-side TLS certificate file to use when connecting to -remoteWrite.url
-remoteWrite.tlsInsecureSkipVerify
@ -1160,7 +1160,7 @@ The shortlist of configuration flags is the following:
-remoteWrite.tlsKeyFile string
Optional path to client-side TLS certificate key to use when connecting to -remoteWrite.url
-remoteWrite.tlsServerName string
Optional TLS server name to use for connections to -remoteWrite.url. By default the server name from -remoteWrite.url is used
Optional TLS server name to use for connections to -remoteWrite.url. By default, the server name from -remoteWrite.url is used
-remoteWrite.url string
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.
-replay.disableProgressBar
@ -1170,7 +1170,7 @@ The shortlist of configuration flags is the following:
-replay.ruleRetryAttempts int
Defines how many retries to make before giving up on rule if request for it returns an error. (default 5)
-replay.rulesDelay duration
Delay between rules evaluation within the group. Could be important if there are chained rules inside of the groupand processing need to wait for previous rule results to be persisted by remote storage before evaluating the next rule.Keep it equal or bigger than -remoteWrite.flushInterval. (default 1s)
Delay between rules evaluation within the group. Could be important if there are chained rules inside the group and processing need to wait for previous rule results to be persisted by remote storage before evaluating the next rule.Keep it equal or bigger than -remoteWrite.flushInterval. (default 1s)
-replay.timeFrom string
The time filter in RFC3339 format to select time series with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'
-replay.timeTo string
@ -1193,7 +1193,7 @@ The shortlist of configuration flags is the following:
Supports an array of values separated by comma or specified via multiple flags.
-rule.configCheckInterval duration
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead
Interval for checking for changes in '-rule' files. By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead
-rule.maxResolveDuration duration
Limits the maximum duration for automatic alert expiration, which by default is 4 times evaluationInterval of the parent group.
-rule.resendDelay duration
@ -1208,7 +1208,7 @@ The shortlist of configuration flags is the following:
-rule.templates="dir/**/*.tpl". Includes all the .tpl files in "dir" subfolders recursively.
Supports an array of values separated by comma or specified via multiple flags.
-rule.updateEntriesLimit int
Defines the max number of rule's state updates stored in-memory. Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overriden per rule via update_entries_limit param. (default 20)
Defines the max number of rule's state updates stored in-memory. Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overridden per rule via update_entries_limit param. (default 20)
-rule.validateExpressions
Whether to validate rules expressions via MetricsQL engine (default true)
-rule.validateTemplates

View file

@ -129,7 +129,13 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
// means seriesFetched is unsupported
return -1
}
return float64(*e.seriesFetched)
seriesFetched := float64(*e.seriesFetched)
if seriesFetched == 0 && e.samples > 0 {
// `alert: 0.95` will fetch no series
// but will get one time series in response.
seriesFetched = float64(e.samples)
}
return seriesFetched
})
return ar
}

View file

@ -102,7 +102,7 @@ func TestParseBad(t *testing.T) {
},
{
[]string{"http://unreachable-url"},
"no such host",
"failed to read",
},
}
for _, tc := range testCases {

View file

@ -47,7 +47,7 @@ var (
"For example, if -datasource.queryStep=15s then param \"step\" with value \"15s\" will be added to every query. "+
"If set to 0, rule's evaluation interval will be used instead.")
queryTimeAlignment = flag.Bool("datasource.queryTimeAlignment", true, `Whether to align "time" parameter with evaluation interval.`+
"Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257")
"Alignment supposed to produce deterministic results despite number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257")
maxIdleConnections = flag.Int("datasource.maxIdleConnections", 100, `Defines the number of idle (keep-alive connections) to each configured datasource. Consider setting this value equal to the value: groups_total * group.concurrency. Too low a value may result in a high number of sockets in TIME_WAIT state.`)
disableKeepAlive = flag.Bool("datasource.disableKeepAlive", false, `Whether to disable long-lived connections to the datasource. `+
`If true, disables HTTP keep-alives and will only use the connection to the server for a single HTTP request.`)

View file

@ -55,10 +55,10 @@ absolute path to all .tpl files in root.
`)
rulesCheckInterval = flag.Duration("rule.configCheckInterval", 0, "Interval for checking for changes in '-rule' files. "+
"By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead")
"By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead")
configCheckInterval = flag.Duration("configCheckInterval", 0, "Interval for checking for changes in '-rule' or '-notifier.config' files. "+
"By default the checking is disabled. Send SIGHUP signal in order to force config check for changes.")
"By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes.")
httpListenAddr = flag.String("httpListenAddr", ":8880", "Address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
@ -72,7 +72,7 @@ absolute path to all .tpl files in root.
"which by default is 4 times evaluationInterval of the parent group.")
resendDelay = flag.Duration("rule.resendDelay", 0, "Minimum amount of time to wait before resending an alert to notifier")
ruleUpdateEntriesLimit = flag.Int("rule.updateEntriesLimit", 20, "Defines the max number of rule's state updates stored in-memory. "+
"Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overriden per rule via update_entries_limit param.")
"Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overridden per rule via update_entries_limit param.")
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+

View file

@ -31,9 +31,9 @@ var (
tlsCertFile = flagutil.NewArrayString("notifier.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting to -notifier.url")
tlsKeyFile = flagutil.NewArrayString("notifier.tlsKeyFile", "Optional path to client-side TLS certificate key to use when connecting to -notifier.url")
tlsCAFile = flagutil.NewArrayString("notifier.tlsCAFile", "Optional path to TLS CA file to use for verifying connections to -notifier.url. "+
"By default system CA is used")
"By default, system CA is used")
tlsServerName = flagutil.NewArrayString("notifier.tlsServerName", "Optional TLS server name to use for connections to -notifier.url. "+
"By default the server name from -notifier.url is used")
"By default, the server name from -notifier.url is used")
oauth2ClientID = flagutil.NewArrayString("notifier.oauth2.clientID", "Optional OAuth2 clientID to use for -notifier.url. "+
"If multiple args are set, then they are applied independently for the corresponding -notifier.url")

View file

@ -34,9 +34,9 @@ var (
tlsCertFile = flag.String("remoteRead.tlsCertFile", "", "Optional path to client-side TLS certificate file to use when connecting to -remoteRead.url")
tlsKeyFile = flag.String("remoteRead.tlsKeyFile", "", "Optional path to client-side TLS certificate key to use when connecting to -remoteRead.url")
tlsCAFile = flag.String("remoteRead.tlsCAFile", "", "Optional path to TLS CA file to use for verifying connections to -remoteRead.url. "+
"By default system CA is used")
"By default, system CA is used")
tlsServerName = flag.String("remoteRead.tlsServerName", "", "Optional TLS server name to use for connections to -remoteRead.url. "+
"By default the server name from -remoteRead.url is used")
"By default, the server name from -remoteRead.url is used")
oauth2ClientID = flag.String("remoteRead.oauth2.clientID", "", "Optional OAuth2 clientID to use for -remoteRead.url.")
oauth2ClientSecret = flag.String("remoteRead.oauth2.clientSecret", "", "Optional OAuth2 clientSecret to use for -remoteRead.url.")

View file

@ -29,7 +29,7 @@ var (
bearerTokenFile = flag.String("remoteWrite.bearerTokenFile", "", "Optional path to bearer token file to use for -remoteWrite.url.")
maxQueueSize = flag.Int("remoteWrite.maxQueueSize", 1e5, "Defines the max number of pending datapoints to remote write endpoint")
maxBatchSize = flag.Int("remoteWrite.maxBatchSize", 1e3, "Defines defines max number of timeseries to be flushed at once")
maxBatchSize = flag.Int("remoteWrite.maxBatchSize", 1e3, "Defines max number of timeseries to be flushed at once")
concurrency = flag.Int("remoteWrite.concurrency", 1, "Defines number of writers for concurrent writing into remote querier")
flushInterval = flag.Duration("remoteWrite.flushInterval", 5*time.Second, "Defines interval of flushes to remote write endpoint")
@ -37,9 +37,9 @@ var (
tlsCertFile = flag.String("remoteWrite.tlsCertFile", "", "Optional path to client-side TLS certificate file to use when connecting to -remoteWrite.url")
tlsKeyFile = flag.String("remoteWrite.tlsKeyFile", "", "Optional path to client-side TLS certificate key to use when connecting to -remoteWrite.url")
tlsCAFile = flag.String("remoteWrite.tlsCAFile", "", "Optional path to TLS CA file to use for verifying connections to -remoteWrite.url. "+
"By default system CA is used")
"By default, system CA is used")
tlsServerName = flag.String("remoteWrite.tlsServerName", "", "Optional TLS server name to use for connections to -remoteWrite.url. "+
"By default the server name from -remoteWrite.url is used")
"By default, the server name from -remoteWrite.url is used")
oauth2ClientID = flag.String("remoteWrite.oauth2.clientID", "", "Optional OAuth2 clientID to use for -remoteWrite.url.")
oauth2ClientSecret = flag.String("remoteWrite.oauth2.clientSecret", "", "Optional OAuth2 clientSecret to use for -remoteWrite.url.")

View file

@ -186,7 +186,7 @@ var (
)
// flush is a blocking function that marshals WriteRequest and sends
// it to remote write endpoint. Flush performs limited amount of retries
// it to remote-write endpoint. Flush performs limited amount of retries
// if request fails.
func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
if len(wr.Timeseries) < 1 {
@ -201,9 +201,14 @@ func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
return
}
const attempts = 5
b := snappy.Encode(nil, data)
for i := 0; i < attempts; i++ {
const (
retryCount = 5
retryBackoff = time.Second
)
for attempts := 0; attempts < retryCount; attempts++ {
err := c.send(ctx, b)
if err == nil {
sentRows.Add(len(wr.Timeseries))
@ -211,16 +216,29 @@ func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
return
}
logger.Warnf("attempt %d to send request failed: %s", i+1, err)
_, isRetriable := err.(*retriableError)
logger.Warnf("attempt %d to send request failed: %s (retriable: %v)", attempts+1, err, isRetriable)
if !isRetriable {
// exit fast if error isn't retriable
break
}
// check if request has been cancelled before backoff
select {
case <-ctx.Done():
break
default:
}
// sleeping to avoid remote db hammering
time.Sleep(time.Second)
continue
time.Sleep(retryBackoff)
}
droppedRows.Add(len(wr.Timeseries))
droppedBytes.Add(len(b))
logger.Errorf("all %d attempts to send request failed - dropping %d time series",
attempts, len(wr.Timeseries))
logger.Errorf("attempts to send remote-write request failed - dropping %d time series",
len(wr.Timeseries))
}
func (c *Client) send(ctx context.Context, data []byte) error {
@ -249,10 +267,31 @@ func (c *Client) send(ctx context.Context, data []byte) error {
req.URL.Redacted(), err, len(data), r.Size())
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
body, _ := io.ReadAll(resp.Body)
// according to https://prometheus.io/docs/concepts/remote_write_spec/
// Prometheus remote Write compatible receivers MUST
switch resp.StatusCode / 100 {
case 2:
// respond with a HTTP 2xx status code when the write is successful.
return nil
case 5:
// respond with HTTP status code 5xx when the write fails and SHOULD be retried.
return &retriableError{fmt.Errorf("unexpected response code %d for %s. Response body %q",
resp.StatusCode, req.URL.Redacted(), body)}
default:
// respond with HTTP status code 4xx when the request is invalid, will never be able to succeed
// and should not be retried.
return fmt.Errorf("unexpected response code %d for %s. Response body %q",
resp.StatusCode, req.URL.Redacted(), body)
}
return nil
}
type retriableError struct {
err error
}
func (e *retriableError) Error() string {
return e.err.Error()
}

View file

@ -22,7 +22,7 @@ var (
replayTo = flag.String("replay.timeTo", "",
"The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'")
replayRulesDelay = flag.Duration("replay.rulesDelay", time.Second,
"Delay between rules evaluation within the group. Could be important if there are chained rules inside of the group"+
"Delay between rules evaluation within the group. Could be important if there are chained rules inside the group "+
"and processing need to wait for previous rule results to be persisted by remote storage before evaluating the next rule."+
"Keep it equal or bigger than -remoteWrite.flushInterval.")
replayMaxDatapoints = flag.Int("replay.maxDatapointsPerQuery", 1e3,

View file

@ -0,0 +1,39 @@
function expandAll() {
$('.collapse').addClass('show');
}
function collapseAll() {
$('.collapse').removeClass('show');
}
function toggleByID(id) {
let el = $("#" + id);
if (el.length > 0) {
el.click();
}
}
$(document).ready(function () {
$(".group-heading a").click(function (e) {
e.stopPropagation(); // prevent collapse logic on link click
let target = $(this).attr('href');
if (target.length > 0) {
toggleByID(target.substr(1));
}
});
$(".group-heading").click(function (e) {
let target = $(this).attr('data-bs-target');
let el = $("#" + target);
new bootstrap.Collapse(el, {
toggle: true
});
});
let hash = window.location.hash.substr(1);
toggleByID(hash);
});
$(document).ready(function () {
$('[data-bs-toggle="tooltip"]').tooltip();
});

View file

@ -10,39 +10,7 @@
</main>
<script src="{%s prefix %}static/js/jquery-3.6.0.min.js" type="text/javascript"></script>
<script src="{%s prefix %}static/js/bootstrap.bundle.min.js" type="text/javascript"></script>
<script type="text/javascript">
function expandAll() {
$('.collapse').addClass('show');
}
function collapseAll() {
$('.collapse').removeClass('show');
}
$(document).ready(function() {
// prevent collapse logic on link click
$(".group-heading a").click(function(e) {
e.stopPropagation();
});
$(".group-heading").click(function(e) {
let target = $(this).attr('data-bs-target');
let el = $("#"+target);
new bootstrap.Collapse(el, {
toggle: true
});
});
var hash = window.location.hash.substr(1);
let group = $("#"+hash);
if (group.length > 0) {
group.click();
}
});
$(document).ready(function() {
$('[data-bs-toggle="tooltip"]').tooltip();
});
</script>
<script src="{%s prefix %}static/js/custom.js" type="text/javascript"></script>
</body>
</html>
{% endfunc %}

View file

@ -45,67 +45,39 @@ func StreamFooter(qw422016 *qt422016.Writer, r *http.Request) {
qw422016.E().S(prefix)
//line app/vmalert/tpl/footer.qtpl:12
qw422016.N().S(`static/js/bootstrap.bundle.min.js" type="text/javascript"></script>
<script type="text/javascript">
function expandAll() {
$('.collapse').addClass('show');
}
function collapseAll() {
$('.collapse').removeClass('show');
}
$(document).ready(function() {
// prevent collapse logic on link click
$(".group-heading a").click(function(e) {
e.stopPropagation();
});
$(".group-heading").click(function(e) {
let target = $(this).attr('data-bs-target');
let el = $("#"+target);
new bootstrap.Collapse(el, {
toggle: true
});
});
var hash = window.location.hash.substr(1);
let group = $("#"+hash);
if (group.length > 0) {
group.click();
}
});
$(document).ready(function() {
$('[data-bs-toggle="tooltip"]').tooltip();
});
</script>
<script src="`)
//line app/vmalert/tpl/footer.qtpl:13
qw422016.E().S(prefix)
//line app/vmalert/tpl/footer.qtpl:13
qw422016.N().S(`static/js/custom.js" type="text/javascript"></script>
</body>
</html>
`)
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
}
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
func WriteFooter(qq422016 qtio422016.Writer, r *http.Request) {
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
StreamFooter(qw422016, r)
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
}
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
func Footer(r *http.Request) string {
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
WriteFooter(qb422016, r)
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
qs422016 := string(qb422016.B)
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
return qs422016
//line app/vmalert/tpl/footer.qtpl:48
//line app/vmalert/tpl/footer.qtpl:16
}

View file

@ -30,104 +30,132 @@
{%= tpl.Footer(r) %}
{% endfunc %}
{% func ListGroups(r *http.Request, groups []APIGroup) %}
{% func buttonActive(filter, expValue string) %}
{% if filter != expValue %}
btn-secondary
{% else %}
btn-primary
{% endif %}
{% endfunc %}
{% func ListGroups(r *http.Request, originGroups []APIGroup) %}
{%code prefix := utils.Prefix(r.URL.Path) %}
{%= tpl.Header(r, navItems, "Groups") %}
{% if len(groups) > 0 %}
{%code
filter := r.URL.Query().Get("filter")
rOk := make(map[string]int)
rNotOk := make(map[string]int)
for _, g := range groups {
rNoMatch := make(map[string]int)
var groups []APIGroup
for _, g := range originGroups {
var rules []APIRule
for _, r := range g.Rules {
if r.LastError != "" {
rNotOk[g.ID]++
} else {
rOk[g.ID]++
}
if isNoMatch(r) {
rNoMatch[g.ID]++
}
if (filter == "unhealthy" && r.LastError == "") ||
(filter == "noMatch" && !isNoMatch(r)) {
continue
}
rules = append(rules, r)
}
if len(rules) > 0 {
g.Rules = rules
groups = append(groups, g)
}
}
%}
<a class="btn {%= buttonActive(filter, "") %}" role="button" onclick="window.location = window.location.pathname">All</a>
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
{% for _, g := range groups %}
<div class="group-heading{% if rNotOk[g.ID] > 0 %} alert-danger{% endif %}" data-bs-target="rules-{%s g.ID %}">
<span class="anchor" id="group-{%s g.ID %}"></span>
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %} (every {%f.0 g.Interval %}s) #</a>
{% if rNotOk[g.ID] > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d rNotOk[g.ID] %}</span> {% endif %}
<span class="badge bg-success" title="Number of rules withs status Ok">{%d rOk[g.ID] %}</span>
<p class="fs-6 fw-lighter">{%s g.File %}</p>
{% if len(g.Params) > 0 %}
<div class="fs-6 fw-lighter">Extra params
{% for _, param := range g.Params %}
<span class="float-left badge bg-primary">{%s param %}</span>
{% endfor %}
</div>
{% endif %}
{% if len(g.Headers) > 0 %}
<div class="fs-6 fw-lighter">Extra headers
{% for _, header := range g.Headers %}
<span class="float-left badge bg-primary">{%s header %}</span>
{% endfor %}
</div>
{% endif %}
</div>
<div class="collapse" id="rules-{%s g.ID %}">
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col" style="width: 60%">Rule</th>
<th scope="col" style="width: 20%" class="text-center" title="How many samples were produced by the rule">Samples</th>
<th scope="col" style="width: 20%" class="text-center" title="How many seconds ago rule was executed">Updated</th>
</tr>
</thead>
<tbody>
{% for _, r := range g.Rules %}
<tr{% if r.LastError != "" %} class="alert-danger"{% endif %}>
<td>
<div class="row">
<div class="col-12 mb-2">
{% if r.Type == "alerting" %}
<b>alert:</b> {%s r.Name %} (for: {%v r.Duration %} seconds)
{% else %}
<b>record:</b> {%s r.Name %}
{% endif %}
|
{%= seriesFetchedWarn(r) %}
<span><a target="_blank" href="{%s prefix+r.WebLink() %}">Details</a></span>
</div>
<div class="col-12">
<code><pre>{%s r.Query %}</pre></code>
</div>
<div class="col-12 mb-2">
{% if len(r.Labels) > 0 %} <b>Labels:</b>{% endif %}
{% for k, v := range r.Labels %}
<span class="ms-1 badge bg-primary">{%s k %}={%s v %}</span>
{% endfor %}
</div>
{% if r.LastError != "" %}
<div class="col-12">
<b>Error:</b>
<div class="error-cell">
{%s r.LastError %}
<a class="btn {%= buttonActive(filter, "unhealthy") %}" role="button" onclick="location.href='?filter=unhealthy'" title="Show only rules with errors">Unhealthy</a>
<a class="btn {%= buttonActive(filter, "noMatch") %}" role="button" onclick="location.href='?filter=noMatch'" title="Show only rules matching no time series during last evaluation">NoMatch</a>
{% if len(groups) > 0 %}
{% for _, g := range groups %}
<div
class="group-heading{% if rNotOk[g.ID] > 0 %} alert-danger{%endif%}" data-bs-target="rules-{%s g.ID %}">
<span class="anchor" id="group-{%s g.ID %}"></span>
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %} (every {%f.0 g.Interval %}s) #</a>
{% if rNotOk[g.ID] > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d rNotOk[g.ID] %}</span> {% endif %}
{% if rNoMatch[g.ID] > 0 %}<span class="badge bg-warning" title="Number of rules with status NoMatch">{%d rNoMatch[g.ID] %}</span> {% endif %}
<span class="badge bg-success" title="Number of rules withs status Ok">{%d rOk[g.ID] %}</span>
<p class="fs-6 fw-lighter">{%s g.File %}</p>
{% if len(g.Params) > 0 %}
<div class="fs-6 fw-lighter">Extra params
{% for _, param := range g.Params %}
<span class="float-left badge bg-primary">{%s param %}</span>
{% endfor %}
</div>
{% endif %}
{% if len(g.Headers) > 0 %}
<div class="fs-6 fw-lighter">Extra headers
{% for _, header := range g.Headers %}
<span class="float-left badge bg-primary">{%s header %}</span>
{% endfor %}
</div>
{% endif %}
</div>
<div class="collapse" id="rules-{%s g.ID %}">
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col" style="width: 60%">Rule</th>
<th scope="col" style="width: 20%" class="text-center" title="How many samples were produced by the rule">Samples</th>
<th scope="col" style="width: 20%" class="text-center" title="How many seconds ago rule was executed">Updated</th>
</tr>
</thead>
<tbody>
{% for _, r := range g.Rules %}
<tr{% if r.LastError != "" %} class="alert-danger"{% endif %}>
<td>
<div class="row">
<div class="col-12 mb-2">
{% if r.Type == "alerting" %}
<b>alert:</b> {%s r.Name %} (for: {%v r.Duration %} seconds)
{% else %}
<b>record:</b> {%s r.Name %}
{% endif %}
|
{%= seriesFetchedWarn(r) %}
<span><a target="_blank" href="{%s prefix+r.WebLink() %}">Details</a></span>
</div>
<div class="col-12">
<code><pre>{%s r.Query %}</pre></code>
</div>
<div class="col-12 mb-2">
{% if len(r.Labels) > 0 %} <b>Labels:</b>{% endif %}
{% for k, v := range r.Labels %}
<span class="ms-1 badge bg-primary">{%s k %}={%s v %}</span>
{% endfor %}
</div>
{% if r.LastError != "" %}
<div class="col-12">
<b>Error:</b>
<div class="error-cell">
{%s r.LastError %}
</div>
</div>
{% endif %}
</div>
{% endif %}
</div>
</td>
<td class="text-center">{%d r.LastSamples %}</td>
<td class="text-center">{%f.3 time.Since(r.LastEvaluation).Seconds() %}s ago</td>
</tr>
{% endfor %}
</tbody>
</table>
</td>
<td class="text-center">{%d r.LastSamples %}</td>
<td class="text-center">{%f.3 time.Since(r.LastEvaluation).Seconds() %}s ago</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endfor %}
{% else %}
<div>
<p>No groups...</p>
</div>
{% endfor %}
{% else %}
<div>
<p>No groups...</p>
</div>
{% endif %}
{% endif %}
{%= tpl.Footer(r) %}
@ -239,9 +267,9 @@
{%code typeK, ns := keys[i], targets[notifier.TargetType(keys[i])]
count := len(ns)
%}
<div class="group-heading data-bs-target="rules-{%s typeK %}">
<span class="anchor" id="notifiers-{%s typeK %}"></span>
<a href="#notifiers-{%s typeK %}">{%s typeK %} ({%d count %})</a>
<div class="group-heading" data-bs-target="notifiers-{%s typeK %}">
<span class="anchor" id="group-{%s typeK %}"></span>
<a href="#group-{%s typeK %}">{%s typeK %} ({%d count %})</a>
</div>
<div class="collapse show" id="notifiers-{%s typeK %}">
<table class="table table-striped table-hover table-sm">
@ -534,14 +562,20 @@
{% endfunc %}
{% func seriesFetchedWarn(r APIRule) %}
{% if r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0 %}
{% if isNoMatch(r) %}
<svg xmlns="http://www.w3.org/2000/svg"
data-bs-toggle="tooltip"
title="This rule last evaluation hasn't selected any time series from the datasource.
title="No match! This rule last evaluation hasn't selected any time series from the datasource.
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
See more in Details."
width="18" height="18" fill="currentColor" class="bi bi-exclamation-triangle-fill flex-shrink-0 me-2" viewBox="0 0 16 16" role="img" aria-label="Warning:">
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
</svg>
{% endif %}
{% endfunc %}
{% endfunc %}
{%code
func isNoMatch (r APIRule) bool {
return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
}
%}

File diff suppressed because it is too large Load diff

View file

@ -334,7 +334,7 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
-configCheckInterval duration
Interval for config file re-read. Zero value disables config re-reading. By default, refreshing is disabled, send SIGHUP for config refresh.
-enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
-envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string
@ -344,11 +344,11 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
-flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-http.connTimeout duration
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
-http.disableResponseCompression
Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
-http.idleConnTimeout duration
Timeout for incoming idle http connections (default 1m0s)
-http.maxGracefulShutdownDuration duration

View file

@ -392,8 +392,8 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
uis := ac.Users
if len(uis) == 0 {
return nil, fmt.Errorf("`users` section cannot be empty in AuthConfig")
if len(uis) == 0 && ac.UnauthorizedUser == nil {
return nil, fmt.Errorf("Missing `users` or `unauthorized_user` sections")
}
byAuthToken := make(map[string]*UserInfo, len(uis))
for i := range uis {

View file

@ -105,15 +105,15 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
ui := ac[authToken]
if ui == nil {
invalidAuthTokenRequests.Inc()
err := fmt.Errorf("cannot find the provided auth token %q in config", authToken)
if *logInvalidAuthTokens {
err := fmt.Errorf("cannot find the provided auth token %q in config", authToken)
err = &httpserver.ErrorWithStatusCode{
Err: err,
StatusCode: http.StatusUnauthorized,
}
httpserver.Errorf(w, r, "%s", err)
} else {
http.Error(w, err.Error(), http.StatusUnauthorized)
http.Error(w, "Unauthorized", http.StatusUnauthorized)
}
return true
}
@ -158,6 +158,9 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
up, headers = ui.DefaultURL, ui.Headers
isDefault = true
}
r.Body = &readTrackingBody{
r: r.Body,
}
maxAttempts := up.getBackendsCount()
for i := 0; i < maxAttempts; i++ {
@ -195,11 +198,10 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
transportOnce.Do(transportInit)
res, err := transport.RoundTrip(req)
if err != nil {
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
requestURI := httpserver.GetRequestURI(r)
if r.Method == http.MethodPost || r.Method == http.MethodPut {
// It is impossible to retry POST and PUT requests,
// since we already proxied the request body to the backend.
rtb := req.Body.(*readTrackingBody)
if rtb.readStarted {
// Request body has been already read, so it is impossible to retry the request.
// Return the error to the client then.
err = &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("cannot proxy the request to %q: %w", targetURL, err),
StatusCode: http.StatusServiceUnavailable,
@ -207,7 +209,11 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
httpserver.Errorf(w, r, "%s", err)
return true
}
logger.Warnf("remoteAddr: %s; requestURI: %s; error when proxying the request to %q: %s", remoteAddr, requestURI, targetURL, err)
// Retry the request if its body wasn't read yet. This usually means that the backend isn't reachable.
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
// NOTE: do not use httpserver.GetRequestURI
// it explicitly reads request body and fails retries.
logger.Warnf("remoteAddr: %s; requestURI: %s; error when proxying the request to %q: %s", remoteAddr, req.URL, targetURL, err)
return false
}
removeHopHeaders(res.Header)
@ -218,7 +224,6 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
copyBuf.B = bytesutil.ResizeNoCopyNoOverallocate(copyBuf.B, 16*1024)
_, err = io.CopyBuffer(w, res.Body, copyBuf.B)
copyBufPool.Put(copyBuf)
_ = res.Body.Close()
if err != nil && !netutil.IsTrivialNetworkError(err) {
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
requestURI := httpserver.GetRequestURI(r)
@ -350,3 +355,28 @@ func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err err
}
httpserver.Errorf(w, r, "%s", err)
}
type readTrackingBody struct {
r io.ReadCloser
readStarted bool
}
// Read implements io.Reader interface
// tracks body reading requests
func (rtb *readTrackingBody) Read(p []byte) (int, error) {
if len(p) > 0 {
rtb.readStarted = true
}
return rtb.r.Read(p)
}
// Close implements io.Closer interface.
func (rtb *readTrackingBody) Close() error {
// Close rtb.r only if at least a single Read call was performed.
// http.Roundtrip performs body.Close call even without any Read calls
// so this hack allows us to reuse request body
if rtb.readStarted {
return rtb.r.Close()
}
return nil
}

View file

@ -82,7 +82,7 @@ The command will upload only changed data to `gs://<bucket>/latest`.
Where `<daily-snapshot>` is the snapshot for the last day `<YYYYMMDD>`.
This apporach saves network bandwidth costs on hourly backups (since they are incremental) and allows recovering data from either the last hour (`latest` backup)
This approach saves network bandwidth costs on hourly backups (since they are incremental) and allows recovering data from either the last hour (`latest` backup)
or from any day (`YYYYMMDD` backups). Note that hourly backup shouldn't run when creating daily backup.
Do not forget to remove old backups when they are no longer needed in order to save storage costs.
@ -103,7 +103,7 @@ The backup algorithm is the following:
7. Delete the created snapshot.
The algorithm splits source files into 1 GiB chunks in the backup. Each chunk is stored as a separate file in the backup.
Such splitting balances between the number of files in the backup and the amounts of data that needs to be re-transfered after temporary errors.
Such splitting balances between the number of files in the backup and the amounts of data that needs to be re-transferred after temporary errors.
`vmbackup` relies on [instant snapshot](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) properties:
@ -190,7 +190,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://container/path/to/backup or fs:///path/to/local/backup/dir
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
-enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
-envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string
@ -200,11 +200,11 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
-flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-http.connTimeout duration
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
-http.disableResponseCompression
Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
-http.idleConnTimeout duration
Timeout for incoming idle http connections (default 1m0s)
-http.maxGracefulShutdownDuration duration
@ -255,7 +255,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
-pushmetrics.interval duration
Interval for pushing metrics to -pushmetrics.url (default 10s)
-pushmetrics.url array
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
Supports an array of values separated by comma or specified via multiple flags.
-s3ForcePathStyle
Prefixing endpoint with bucket name when set false, true by default. (default true)

View file

@ -48,7 +48,7 @@ Backup manager uploads only the data that has been changed or created since the
This reduces the consumed network traffic and the time needed for performing the backup.
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for details.
*Please take into account that the first backup upload could take a significant amount of time as it needs to upload all of the data.*
*Please take into account that the first backup upload could take a significant amount of time as it needs to upload all the data.*
There are two flags which could help with performance tuning:
@ -150,6 +150,30 @@ The result on the GCS bucket. We see only 3 daily backups:
![daily](vmbackupmanager_rp_daily_2.png)
### Protection backups against deletion by retention policy
You can protect any backup against deletion by retention policy with the `vmbackupmanager backups lock` command.
For instance:
```console
./vmbackupmanager backup lock daily/2021-02-13 -dst=<DST_PATH> -storageDataPath=/vmstorage-data -eula
```
After that the backup won't be deleted by retention policy.
You can view the `locked` attribute in backup list:
```console
./vmbackupmanager backup list -dst=<DST_PATH> -storageDataPath=/vmstorage-data -eula
```
To remove protection, you can use the command `vmbackupmanager backups unlock`.
For example:
```console
./vmbackupmanager backup unlock daily/2021-02-13 -dst=<DST_PATH> -storageDataPath=/vmstorage-data -eula
```
## API methods
@ -160,7 +184,23 @@ The result on the GCS bucket. We see only 3 daily backups:
```json
[{"name":"daily/2023-04-07","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:07+00:00"},{"name":"hourly/2023-04-07:11","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:06+00:00"},{"name":"latest","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:04+00:00"},{"name":"monthly/2023-04","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:10+00:00"},{"name":"weekly/2023-14","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:09+00:00"}]
```
> Note: `created_at` field is in RFC3339 format.
> Note: `created_at` field is in RFC3339 format.
* GET `/api/v1/backups/<BACKUP_NAME>` - returns backup info by name.
Example output:
```json
{"name":"daily/2023-04-07","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:07+00:00","locked":true}
```
* PUT `/api/v1/backups/<BACKUP_NAME>` - update "locked" attribute for backup by name.
Example request body:
```json
{"locked":true}
```
Example response:
```json
{"name":"daily/2023-04-07","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:07+00:00", "locked": true}
```
* POST `/api/v1/restore` - saves backup name to restore when [performing restore](#restore-commands).
Example request body:
@ -187,7 +227,13 @@ vmbackupmanager backup
vmbackupmanager backup list
List backups in remote storage
vmbackupmanager restore
vmbackupmanager backup lock
Locks backup in remote storage against deletion
vmbackupmanager backup unlock
Unlocks backup in remote storage for deletion
vmbackupmanager restore
Restore backup specified by restore mark if it exists
vmbackupmanager restore get
@ -377,7 +423,7 @@ command-line flags:
-dst string
The root folder of Victoria Metrics backups. Example: gs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
-enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
-envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string
@ -387,11 +433,11 @@ command-line flags:
-flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-http.connTimeout duration
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
-http.disableResponseCompression
Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
-http.idleConnTimeout duration
Timeout for incoming idle http connections (default 1m0s)
-http.maxGracefulShutdownDuration duration
@ -447,16 +493,16 @@ command-line flags:
-pushmetrics.interval duration
Interval for pushing metrics to -pushmetrics.url (default 10s)
-pushmetrics.url array
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
Supports an array of values separated by comma or specified via multiple flags.
-runOnStart
Upload backups immediately after start of the service. Otherwise the backup starts on new hour
Upload backups immediately after start of the service. Otherwise, the backup starts on new hour
-s3ForcePathStyle
Prefixing endpoint with bucket name when set false, true by default. (default true)
-snapshot.createURL string
VictoriaMetrics create snapshot url. When this is given a snapshot will automatically be created during backup.Example: http://victoriametrics:8428/snapshot/create
-snapshot.deleteURL string
VictoriaMetrics delete snapshot url. Optional. Will be generated from snapshot.createURL if not provided. All created snaphosts will be automatically deleted.Example: http://victoriametrics:8428/snapshot/delete
VictoriaMetrics delete snapshot url. Optional. Will be generated from snapshot.createURL if not provided. All created snapshots will be automatically deleted.Example: http://victoriametrics:8428/snapshot/delete
-storageDataPath string
Path to VictoriaMetrics data. Must match -storageDataPath from VictoriaMetrics or vmstorage (default "victoria-metrics-data")
-tls

View file

@ -93,7 +93,7 @@ OpenTSDB migration works like so:
- e.g. `curl -Ss "http://opentsdb:4242/api/search/lookup?m=system.load5&limit=1000000"`
Here `results` return field should not be empty. Otherwise it means that meta tables are absent and needs to be turned on previously.
Here `results` return field should not be empty. Otherwise, it means that meta tables are absent and needs to be turned on previously.
3. Download data for each series in chunks defined in the CLI switches
@ -146,7 +146,7 @@ Retention strings essentially define the two levels of aggregation for our colle
First-order aggregation addresses how to aggregate any un-mentioned tags.
This is, conceptually, directly opposite to how PromQL deals with tags. In OpenTSDB, if a tag isn't explicitly mentioned, all values assocaited with that tag will be aggregated.
This is, conceptually, directly opposite to how PromQL deals with tags. In OpenTSDB, if a tag isn't explicitly mentioned, all values associated with that tag will be aggregated.
It is recommended to use `sum` for the first aggregation because it is relatively quick and should not cause any changes to the incoming data (because we collect each individual series).
@ -154,9 +154,9 @@ It is recommended to use `sum` for the first aggregation because it is relativel
Second-order aggregation (`1m-avg` in our example) defines any windowing that should occur before returning the data
It is recommended to match the stat collection interval so we again avoid transforming incoming data.
It is recommended to match the stat collection interval, so we again avoid transforming incoming data.
We do not allow for defining the "null value" portion of the rollup window (e.g. in the aggreagtion, `1m-avg-none`, the user cannot change `none`), as the goal of this tool is to avoid modifying incoming data.
We do not allow for defining the "null value" portion of the rollup window (e.g. in the aggregation, `1m-avg-none`, the user cannot change `none`), as the goal of this tool is to avoid modifying incoming data.
#### Windows
@ -173,9 +173,9 @@ The window `1h` means that each individual query to OpenTSDB should only span 1
It is important to ensure this window somewhat matches the row size in HBase to help improve query times.
For example, if the query is hitting a rollup table with a 4 hour row size, we should set a chunk size of a multiple of 4 hours (e.g. `4h`, `8h`, etc.) to avoid requesting data across row boundaries. Landing on row boundaries allows for more consistent request times to HBase.
For example, if the query is hitting a rollup table with a 4-hour row size, we should set a chunk size of a multiple of 4 hours (e.g. `4h`, `8h`, etc.) to avoid requesting data across row boundaries. Landing on row boundaries allows for more consistent request times to HBase.
The default table created in HBase for OpenTSDB has a 1 hour row size, so if you aren't sure on a correct row size to use, `1h` is a reasonable choice.
The default table created in HBase for OpenTSDB has a 1-hour row size, so if you aren't sure on a correct row size to use, `1h` is a reasonable choice.
##### Time range
@ -197,7 +197,7 @@ Chunking the data like this means each individual query returns faster, so we ca
### Restarting OpenTSDB migrations
One important note for OpenTSDB migration: Queries/HBase scans can "get stuck" within OpenTSDB itself. This can cause instability and performance issues within an OpenTSDB cluster, so stopping the migrator to deal with it may be necessary. Because of this, we provide the timstamp we started collecting data from at thebeginning of the run. You can stop and restart the importer using this "hard timestamp" to ensure you collect data from the same time range over multiple runs.
One important note for OpenTSDB migration: Queries/HBase scans can "get stuck" within OpenTSDB itself. This can cause instability and performance issues within an OpenTSDB cluster, so stopping the migrator to deal with it may be necessary. Because of this, we provide the timestamp we started collecting data from at the beginning of the run. You can stop and restart the importer using this "hard timestamp" to ensure you collect data from the same time range over multiple runs.
## Migrating data from InfluxDB (1.x)
@ -376,7 +376,7 @@ The configuration flags should contain self-explanatory descriptions.
The filtering consists of three parts: by timeseries and time.
Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end`
in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with
in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with
overlapping time range.
Example of applying time filter:
@ -403,7 +403,7 @@ since this is heavy operation and will be done during import process.
Filtering by timeseries is configured with following flags:
- `--prom-filter-label` - the label name, e.g. `__name__` or `instance`;
- `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*`
- `--prom-filter-label-value` - the regular expression to filter the label value. By default, matches all `.*`
For example:
@ -659,7 +659,7 @@ requires an Authentication header like `X-Scope-OrgID`. You can define it via th
## Migrating data from Mimir
Mimir has similar implemintation as Cortex and also support of the Prometheus remote read protocol. That means
Mimir has similar implementation as Cortex and also support of the Prometheus remote read protocol. That means
`vmctl` in mode `remote-read` may also be used for Mimir historical data migration.
These instructions may vary based on the details of your Mimir configuration.
Please read carefully and verify as you go.
@ -727,13 +727,9 @@ requires an Authentication header like `X-Scope-OrgID`. You can define it via th
## Migrating data from VictoriaMetrics
### Native protocol
The [native binary protocol](https://docs.victoriametrics.com/#how-to-export-data-in-native-format)
was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0)
and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster,
single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0
or higher.
vmctl uses [native binary protocol](https://docs.victoriametrics.com/#how-to-export-data-in-native-format)
(available since [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0))
o migrate data between VM instances: single to single, cluster to cluster, single to cluster and vice versa.
See `./vmctl vm-native --help` for details and full list of flags.
@ -743,19 +739,20 @@ Migration in `vm-native` mode takes two steps:
```
./vmctl vm-native \
--vm-native-src-addr=http://127.0.0.1:8481/select/0/prometheus \
--vm-native-dst-addr=http://localhost:8428 \
--vm-native-filter-time-start='2022-11-20T00:00:00Z' \
--vm-native-filter-match='{__name__=~"vm_cache_.*"}'
--vm-native-src-addr=http://127.0.0.1:8481/select/0/prometheus \ # migrate from
--vm-native-dst-addr=http://localhost:8428 \ # migrate to
--vm-native-filter-time-start='2022-11-20T00:00:00Z' \ # starting from
--vm-native-filter-match='{__name__=~"vm_cache_.*"}' # only metrics matching the selector
VictoriaMetrics Native import mode
2023/03/02 09:22:02 Initing import process from "http://127.0.0.1:8481/select/0/prometheus/api/v1/export/native" to "http://localhost:8428/api/v1/import/native" with filter
2023/03/02 09:22:02 Initing import process from "http://127.0.0.1:8481/select/0/prometheus/api/v1/export/native"
to "http://localhost:8428/api/v1/import/native" with filter
filter: match[]={__name__=~"vm_cache_.*"}
start: 2022-11-20T00:00:00Z
2023/03/02 09:22:02 Exploring metrics...
Found 9 metrics to import. Continue? [Y/n]
2023/03/02 09:22:04 Requests to make: 9
Requests to make: 9 / 9 [███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
Requests to make: 9 / 9 [█████████████████████████████████████████████████████████████████████████████] 100.00%
2023/03/02 09:22:06 Import finished!
2023/03/02 09:22:06 VictoriaMetrics importer stats:
time spent while importing: 3.632638875s;
@ -765,109 +762,59 @@ Requests to make: 9 / 9 [██████████████████
requests retries: 0;
2023/03/02 09:22:06 Total time: 3.633127625s
```
`vmctl` uses retries with backoff policy by default.
The benefits of this retry backoff policy include:
1. Improved success rates:
With each retry attempt, the migration process has a higher chance of success.
By increasing the delay between retries, the system can avoid overwhelming the service with too many requests at once.
2. Reduced load on the system:
By increasing the delay between retries, the system can reduce the load on the service by limiting the number of
requests made in a short amount of time.
3. Can help to migrate a big amount of data
However, there are also some potential penalties associated with using a backoff retry policy, including:
1. Increased migration process latency:
`vmctl` need to make additional call to the `api/v1/label/__name__/values` with defined `--vm-native-filter-match` flag,
and after process all metric names with additional filters.
In case when retries with backoff policy is unneeded `--vm-native-disable-retries` command line flag can be used.
When this flag is set to `true`, `vmctl` skips additional call to the `api/v1/label/__name__/values` API and starts
migration process by making calls to the `/api/v1/export` and `api/v1/import`. If some errors happen `vmctl` immediately
stops the migration process.
```
./vmctl vm-native --vm-native-src-addr=http://127.0.0.1:8481/select/0/prometheus \
--vm-native-dst-addr=http://127.0.0.1:8428 \
--vm-native-filter-match='{__name__!=""}' \
--vm-native-filter-time-start='2023-04-08T11:30:30Z' \
--vm-native-disable-retries=true
VictoriaMetrics Native import mode
2023/04/11 10:17:14 Initing import process from "http://127.0.0.1:8481/select/0/prometheus/api/v1/export/native" to "http://localhost:8428/api/v1/import/native" with filter
filter: match[]={__name__!=""}
start: 2023-04-08T11:30:30Z
. Continue? [Y/n]
2023/04/11 10:17:15 Requests to make: 1
2023/04/11 10:17:15 number of workers decreased to 1, because vmctl calculated requests to make 1
Total: 0 ↙ Speed: ? p/s Continue import process with filter
filter: match[]={__name__!=""}
start: 2023-04-08T11:30:30Z
end: 2023-04-11T07:17:14Z:
Total: 1.64 GiB ↖ Speed: 11.20 MiB p/s
2023/04/11 10:19:45 Import finished!
2023/04/11 10:19:45 VictoriaMetrics importer stats:
time spent while importing: 2m30.813841541s;
total bytes: 1.8 GB;
bytes/s: 11.7 MB;
requests: 1;
requests retries: 0;
2023/04/11 10:19:45 Total time: 2m30.814721125s
```
_To disable explore phase and switch to the old way of data migration via single connection use
`--vm-native-disable-retries` cmd-line flag. Please note, in this mode vmctl won't be able to retry failed requests._
Importing tips:
1. Migrating big volumes of data may result in reaching the safety limits on `src` side.
Please verify that `-search.maxExportDuration` and `-search.maxExportSeries` were set with
proper values for `src`. If hitting the limits, follow the recommendations [here](https://docs.victoriametrics.com/#how-to-export-data-in-native-format).
If hitting `the number of matching timeseries exceeds...` error, adjust filters to match less time series or update `-search.maxSeries` command-line flag on vmselect/vmsingle;
proper values for `src`. If hitting the limits, follow the recommendations
[here](https://docs.victoriametrics.com/#how-to-export-data-in-native-format).
If hitting `the number of matching timeseries exceeds...` error, adjust filters to match less time series or
update `-search.maxSeries` command-line flag on vmselect/vmsingle;
2. Migrating all the metrics from one VM to another may collide with existing application metrics
(prefixed with `vm_`) at destination and lead to confusion when using
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag.
3. Migration is a backfilling process, so it is recommended to read
To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match='{__name__!~"vm_.*"}'` flag.
3. Migrating data with overlapping time range or via unstable network can produce duplicates series at destination.
To avoid duplicates set `-dedup.minScrapeInterval=1ms` for `vmselect`/`vmstorage` at the destination.
This will instruct `vmselect`/`vmstorage` to ignore duplicates with identical timestamps.
4. When migrating large volumes of data use `--vm-native-step-interval` flag to split migration [into steps](#using-time-based-chunking-of-migration).
5. When migrating data from one VM cluster to another, consider using [cluster-to-cluster mode](#cluster-to-cluster-migration-mode).
Or manually specify addresses according to [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format):
```console
# Migrating from cluster specific tenantID to single
--vm-native-src-addr=http://<src-vmselect>:8481/select/0/prometheus
--vm-native-dst-addr=http://<dst-vmsingle>:8428
# Migrating from single to cluster specific tenantID
--vm-native-src-addr=http://<src-vmsingle>:8428
--vm-native-src-addr=http://<dst-vminsert>:8480/insert/0/prometheus
# Migrating single to single
--vm-native-src-addr=http://<src-vmsingle>:8428
--vm-native-dst-addr=http://<dst-vmsingle>:8428
# Migrating cluster to cluster for specific tenant ID
--vm-native-src-addr=http://<src-vmselect>:8481/select/0/prometheus
--vm-native-dst-addr=http://<dst-vminsert>:8480/insert/0/prometheus
```
6. Migration speed can be adjusted via `--vm-concurrency` cmd-line flag, which controls the number of concurrent
workers busy with processing. Please note, that each worker can load up to a single vCPU core on VictoriaMetrics.
So try to set it according to allocated CPU resources of your VictoriaMetrics destination installation.
7. Migration is a backfilling process, so it is recommended to read
[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section.
4. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
8. `vmctl` doesn't provide relabeling or other types of labels management.
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
and specify `accountID` param. Example formats:
```console
# Migrating from cluster to single
--vm-native-src-addr=http://<src-vmselect>:8481/select/0/prometheus
--vm-native-dst-addr=http://<dst-vmsingle>:8428
# Migrating from single to cluster
--vm-native-src-addr=http://<src-vmsingle>:8428
--vm-native-src-addr=http://<dst-vminsert>:8480/insert/0/prometheus
# Migrating single to single
--vm-native-src-addr=http://<src-vmsingle>:8428
--vm-native-dst-addr=http://<dst-vmsingle>:8428
# Migrating cluster to cluster
--vm-native-src-addr=http://<src-vmselect>:8481/select/0/prometheus
--vm-native-dst-addr=http://<dst-vminsert>:8480/insert/0/prometheus
```
6. When migrating large volumes of data it might be useful to use `--vm-native-step-interval` flag to split single process into smaller steps.
7. `vmctl` supports `--vm-concurrency` which controls the number of concurrent workers that process the input from source query results.
Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according
to allocated CPU resources of your VictoriaMetrics installation.
8. `vmctl` supports `--vm-native-src-headers` and `--vm-native-dst-headers` which defines headers to send with each request
9. `vmctl` supports `--vm-native-src-headers` and `--vm-native-dst-headers` to define headers sent with each request
to the corresponding source address.
9. `vmctl` supports `--vm-native-disable-http-keep-alive` to allow `vmctl` to use non-persistent HTTP connections to avoid
10. `vmctl` supports `--vm-native-disable-http-keep-alive` to allow `vmctl` to use non-persistent HTTP connections to avoid
error `use of closed network connection` when run a longer export.
10. Migrating data with overlapping time range for destination data can produce duplicates series at destination.
To avoid duplicates on the destination set `-dedup.minScrapeInterval=1ms` for `vmselect` and `vmstorage`.
This will instruct `vmselect` and `vmstorage` to ignore duplicates with match timestamps.
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
and processing is done by "destination" (`dst`). So no extra memory or CPU resources required on `vmctl` side. Only
`src` and `dst` resource matter.
#### Using time-based chunking of migration
### Using time-based chunking of migration
It is possible split migration process into set of smaller batches based on time. This is especially useful when
migrating large volumes of data as this adds indication of progress and ability to restore process from certain point
@ -912,7 +859,7 @@ Requests to make: 45 / 45 [█████████████████
2023/03/02 09:18:12 Total time: 7.112405875s
```
#### Cluster-to-cluster migration mode
### Cluster-to-cluster migration mode
Using cluster-to-cluster migration mode helps to migrate all tenants data in a single `vmctl` run.
@ -960,7 +907,9 @@ Requests to make for tenant 1:0: 28 / 28 [████████████
## Verifying exported blocks from VictoriaMetrics
In this mode, `vmctl` allows verifying correctness and integrity of data exported via [native format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-export-data-in-native-format) from VictoriaMetrics.
In this mode, `vmctl` allows verifying correctness and integrity of data exported via
[native format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-export-data-in-native-format)
from VictoriaMetrics.
You can verify exported data at disk before uploading it by `vmctl verify-block` command:
```console
@ -995,7 +944,7 @@ to number of free CPU cores.
The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results.
Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according
to allocated CPU resources of your VictoriMetrics installation.
to allocated CPU resources of your VictoriaMetrics installation.
The flag `--vm-batch-size` controls max amount of samples collected before sending the import request.
For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more
@ -1040,7 +989,7 @@ according to [information theory](https://en.wikipedia.org/wiki/Information_theo
`vmctl` provides the following flags for improving data compression:
- `--vm-round-digits` flag for rounding processed values to the given number of decimal digits after the point.
For example, `--vm-round-digits=2` would round `1.2345` to `1.23`. By default the rounding is disabled.
For example, `--vm-round-digits=2` would round `1.2345` to `1.23`. By default, the rounding is disabled.
- `--vm-significant-figures` flag for limiting the number of significant figures in processed values. It takes no effect if set
to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`.
@ -1050,7 +999,7 @@ results such as `average`, `rate`, etc.
### Adding extra labels
`vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`.
`vmctl` allows to add extra labels to all imported series. It can be achieved with flag `--vm-extra-label label=value`.
If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`.
If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries.

View file

@ -113,7 +113,7 @@ var (
&cli.Int64Flag{
Name: vmRateLimit,
Usage: "Optional data transfer rate limit in bytes per second.\n" +
"By default the rate limit is disabled. It can be useful for limiting load on configured via '--vmAddr' destination.",
"By default, the rate limit is disabled. It can be useful for limiting load on configured via '--vmAddr' destination.",
},
&cli.BoolFlag{
Name: vmDisableProgressBar,
@ -362,6 +362,7 @@ var (
&cli.StringFlag{
Name: vmNativeStepInterval,
Usage: fmt.Sprintf("Split export data into chunks. Requires setting --%s. Valid values are '%s','%s','%s','%s'.", vmNativeFilterTimeStart, stepper.StepMonth, stepper.StepDay, stepper.StepHour, stepper.StepMinute),
Value: stepper.StepMonth,
},
&cli.BoolFlag{
Name: vmNativeDisableHTTPKeepAlive,
@ -431,7 +432,7 @@ var (
&cli.Int64Flag{
Name: vmRateLimit,
Usage: "Optional data transfer rate limit in bytes per second.\n" +
"By default the rate limit is disabled. It can be useful for limiting load on source or destination databases.",
"By default, the rate limit is disabled. It can be useful for limiting load on source or destination databases.",
},
&cli.BoolFlag{
Name: vmInterCluster,
@ -503,7 +504,7 @@ var (
},
&cli.BoolFlag{
Name: remoteReadUseStream,
Usage: "Defines whether to use SAMPLES or STREAMED_XOR_CHUNKS mode. By default is uses SAMPLES mode. See https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/#streamed-chunks",
Usage: "Defines whether to use SAMPLES or STREAMED_XOR_CHUNKS mode. By default, is uses SAMPLES mode. See https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/#streamed-chunks",
Value: false,
},
&cli.StringFlag{

View file

@ -121,6 +121,55 @@ func Test_splitDateRange(t *testing.T) {
},
wantErr: false,
},
{
name: "month chunking with one day time range",
args: args{
start: "2022-01-03T11:11:11Z",
end: "2022-01-04T12:12:12Z",
granularity: StepMonth,
},
want: []testTimeRange{
{
"2022-01-03T11:11:11Z",
"2022-01-04T12:12:12Z",
},
},
wantErr: false,
},
{
name: "month chunking with same day time range",
args: args{
start: "2022-01-03T11:11:11Z",
end: "2022-01-03T12:12:12Z",
granularity: StepMonth,
},
want: []testTimeRange{
{
"2022-01-03T11:11:11Z",
"2022-01-03T12:12:12Z",
},
},
wantErr: false,
},
{
name: "month chunking with one month and two days range",
args: args{
start: "2022-01-03T11:11:11Z",
end: "2022-02-03T00:00:00Z",
granularity: StepMonth,
},
want: []testTimeRange{
{
"2022-01-03T11:11:11Z",
"2022-01-31T23:59:59.999999999Z",
},
{
"2022-02-01T00:00:00Z",
"2022-02-03T00:00:00Z",
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View file

@ -104,7 +104,7 @@ func (p *vmNativeProcessor) do(ctx context.Context, f native.Filter, srcURL, dst
p.s.retries += attempts
p.s.Unlock()
if err != nil {
return fmt.Errorf("failed to migrate from %s to %s (retry attempts: %d): %w\nwith fileter %s", srcURL, dstURL, attempts, err, f)
return fmt.Errorf("failed to migrate from %s to %s (retry attempts: %d): %w\nwith filter %s", srcURL, dstURL, attempts, err, f)
}
return nil

View file

@ -44,7 +44,7 @@ jwt token must be in following format:
Where:
* `exp` - required, expire time in unix_timestamp. If the token expires then `vmgateway` rejects the request.
* `vm_access` - required, dict with claim info, minimum form: `{"vm_access": {"tenand_id": {}}`
* `vm_access` - required, dict with claim info, minimum form: `{"vm_access": {"tenant_id": {}}`
* `tenant_id` - optional, for cluster mode, routes requests to the corresponding tenant.
* `extra_labels` - optional, key-value pairs for label filters added to the ingested or selected metrics. Multiple filters are added with `and` operation. If defined, `extra_label` from original request removed.
* `extra_filters` - optional, [series selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) added to the select query requests. Multiple selectors are added with `or` operation. If defined, `extra_filter` from original request removed.
@ -144,7 +144,7 @@ EOF
./bin/vmselect -eula -storageNode 127.0.0.1:8401
./bin/vminsert -eula -storageNode 127.0.0.1:8400
# create base rate limitng config:
# create base rate limiting config:
cat << EOF > limit.yaml
limits:
- type: queries
@ -310,7 +310,7 @@ The shortlist of configuration flags include the following:
-datasource.queryStep duration
How far a value can fallback to when evaluating queries. For example, if -datasource.queryStep=15s then param "step" with value "15s" will be added to every query. If set to 0, rule's evaluation interval will be used instead. (default 5m0s)
-datasource.queryTimeAlignment
Whether to align "time" parameter with evaluation interval.Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257 (default true)
Whether to align "time" parameter with evaluation interval.Alignment supposed to produce deterministic results despite number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257 (default true)
-datasource.roundDigits int
Adds "round_digits" GET param to datasource requests. In VM "round_digits" limits the number of digits after the decimal point in response values.
-datasource.showURL
@ -332,7 +332,7 @@ The shortlist of configuration flags include the following:
-enable.rateLimit
enables rate limiter
-enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
-envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string
@ -342,11 +342,11 @@ The shortlist of configuration flags include the following:
-flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-http.connTimeout duration
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
-http.disableResponseCompression
Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
-http.idleConnTimeout duration
Timeout for incoming idle http connections (default 1m0s)
-http.maxGracefulShutdownDuration duration
@ -396,7 +396,7 @@ The shortlist of configuration flags include the following:
-pushmetrics.interval duration
Interval for pushing metrics to -pushmetrics.url (default 10s)
-pushmetrics.url array
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
Supports an array of values separated by comma or specified via multiple flags.
-ratelimit.config string
path for configuration file. Accepts url address

View file

@ -23,7 +23,7 @@ var (
"See https://docs.victoriametrics.com/stream-aggregation.html . "+
"See also -remoteWrite.streamAggr.keepInput and -streamAggr.dedupInterval")
streamAggrKeepInput = flag.Bool("streamAggr.keepInput", false, "Whether to keep input samples after the aggregation with -streamAggr.config. "+
"By default the input is dropped after the aggregation, so only the aggregate data is stored. "+
"By default, the input is dropped after the aggregation, so only the aggregate data is stored. "+
"See https://docs.victoriametrics.com/stream-aggregation.html")
streamAggrDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before being aggregated. "+
"Only the last sample per each time series per each interval is aggregated if the interval is greater than zero")

View file

@ -21,7 +21,7 @@ import (
var (
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via InfluxDB line protocol")
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field")
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metric name if InfluxDB line contains only a single field")
skipMeasurement = flag.Bool("influxSkipMeasurement", false, "Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator'")
dbLabel = flag.String("influxDBLabel", "db", "Default label for the DB name sent over '?db={db_name}' query parameter")
)

View file

@ -94,7 +94,7 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
-customS3Endpoint string
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
-enableTCP6
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
-envflag.enable
Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set. See https://docs.victoriametrics.com/#environment-variables for more details
-envflag.prefix string
@ -104,11 +104,11 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
-flagsAuthKey string
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
-fs.disableMmap
Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-http.connTimeout duration
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
-http.disableResponseCompression
Disable compression of HTTP responses to save CPU resources. By default compression is enabled to save network bandwidth
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
-http.idleConnTimeout duration
Timeout for incoming idle http connections (default 1m0s)
-http.maxGracefulShutdownDuration duration
@ -157,7 +157,7 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
-pushmetrics.interval duration
Interval for pushing metrics to -pushmetrics.url (default 10s)
-pushmetrics.url array
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
Supports an array of values separated by comma or specified via multiple flags.
-s3ForcePathStyle
Prefixing endpoint with bucket name when set false, true by default. (default true)

View file

@ -35,9 +35,9 @@ var (
maxQueryLen = flagutil.NewBytes("search.maxQueryLen", 16*1024, "The maximum search query length in bytes")
maxLookback = flag.Duration("search.maxLookback", 0, "Synonym to -search.lookback-delta from Prometheus. "+
"The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. "+
"See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons")
"See also '-search.maxStalenessInterval' flag, which has the same meaning due to historical reasons")
maxStalenessInterval = flag.Duration("search.maxStalenessInterval", 0, "The maximum interval for staleness calculations. "+
"By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning "+
"By default, it is automatically calculated from the median interval between samples. This flag could be useful for tuning "+
"Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. "+
"See also '-search.setLookbackToStep' flag")
setLookbackToStep = flag.Bool("search.setLookbackToStep", false, "Whether to fix lookback interval to 'step' query arg value. "+

View file

@ -1,14 +1,14 @@
{
"files": {
"main.css": "./static/css/main.0d9f8101.css",
"main.js": "./static/js/main.ba695a31.js",
"main.css": "./static/css/main.f31d05d1.css",
"main.js": "./static/js/main.ff1e4560.js",
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.0d9f8101.css",
"static/js/main.ba695a31.js"
"static/css/main.f31d05d1.css",
"static/js/main.ff1e4560.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.ba695a31.js"></script><link href="./static/css/main.0d9f8101.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.ff1e4560.js"></script><link href="./static/css/main.f31d05d1.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -7,7 +7,7 @@
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
/**
* @remix-run/router v1.3.0
* @remix-run/router v1.5.0
*
* Copyright (c) Remix Software Inc.
*
@ -18,7 +18,7 @@
*/
/**
* React Router DOM v6.7.0
* React Router DOM v6.10.0
*
* Copyright (c) Remix Software Inc.
*
@ -29,7 +29,7 @@
*/
/**
* React Router v6.7.0
* React Router v6.10.0
*
* Copyright (c) Remix Software Inc.
*

View file

@ -49,7 +49,7 @@ var (
logNewSeries = flag.Bool("logNewSeries", false, "Whether to log new series. This option is for debug purposes only. It can lead to performance issues "+
"when big number of new series are ingested into VictoriaMetrics")
denyQueriesOutsideRetention = flag.Bool("denyQueriesOutsideRetention", false, "Whether to deny queries outside of the configured -retentionPeriod. "+
denyQueriesOutsideRetention = flag.Bool("denyQueriesOutsideRetention", false, "Whether to deny queries outside the configured -retentionPeriod. "+
"When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. "+
"This may be useful when multiple data sources with distinct retentions are hidden behind query-tee")
maxHourlySeries = flag.Int("storage.maxHourlySeries", 0, "The maximum number of unique series can be added to the storage during the last hour. "+

View file

@ -6,7 +6,7 @@ COPY web/ /build/
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
FROM alpine:3.17.3
FROM alpine:3.18.0
USER root
COPY --from=build-web-stage /build/web-amd64 /app/web

File diff suppressed because it is too large Load diff

View file

@ -20,3 +20,8 @@ export interface TracingData {
duration_msec: number;
children: TracingData[];
}
export interface QueryStats {
seriesFetched?: string;
resultLength?: number;
}

View file

@ -1,19 +1,17 @@
import React, { FC, useEffect, useRef, useState } from "preact/compat";
import uPlot, { Options as uPlotOptions } from "uplot";
import useResize from "../../../hooks/useResize";
import { BarChartProps } from "./types";
import "./style.scss";
import { useAppState } from "../../../state/common/StateContext";
const BarChart: FC<BarChartProps> = ({
data,
container,
layoutSize,
configs }) => {
const { isDarkTheme } = useAppState();
const uPlotRef = useRef<HTMLDivElement>(null);
const [uPlotInst, setUPlotInst] = useState<uPlot>();
const layoutSize = useResize(container);
const options: uPlotOptions ={
...configs,

View file

@ -1,7 +1,8 @@
import { AlignedData as uPlotData, Options as uPlotOptions } from "uplot";
import { ElementSize } from "../../../hooks/useElementSize";
export interface BarChartProps {
data: uPlotData;
container: HTMLDivElement | null,
layoutSize: ElementSize,
configs: uPlotOptions,
}

View file

@ -19,7 +19,7 @@
&__description {
display: inline-block;
line-height: 20px;
line-height: 1.5;
svg,
code {

View file

@ -1,4 +1,4 @@
import React, { FC, useEffect, useMemo, useRef, useState } from "preact/compat";
import React, { FC, useCallback, useEffect, useRef, useState } from "preact/compat";
import uPlot from "uplot";
import ReactDOM from "react-dom";
import Button from "../../../Main/Button/Button";
@ -6,6 +6,7 @@ import { CloseIcon, DragIcon } from "../../../Main/Icons";
import classNames from "classnames";
import { MouseEvent as ReactMouseEvent } from "react";
import "../../Line/ChartTooltip/style.scss";
import useEventListener from "../../../../hooks/useEventListener";
export interface TooltipHeatmapProps {
cursor: {left: number, top: number}
@ -45,24 +46,22 @@ const ChartTooltipHeatmap: FC<ChartTooltipHeatmapProps> = ({
const [moving, setMoving] = useState(false);
const [moved, setMoved] = useState(false);
const targetPortal = useMemo(() => u.root.querySelector(".u-wrap"), [u]);
const handleClose = () => {
onClose && onClose(id);
};
const handleMouseDown = (e: ReactMouseEvent<HTMLButtonElement, MouseEvent>) => {
const handleMouseDown = (e: ReactMouseEvent) => {
setMoved(true);
setMoving(true);
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
};
const handleMouseMove = (e: MouseEvent) => {
const handleMouseMove = useCallback((e: MouseEvent) => {
if (!moving) return;
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
};
}, [moving]);
const handleMouseUp = () => {
setMoving(false);
@ -88,19 +87,10 @@ const ChartTooltipHeatmap: FC<ChartTooltipHeatmapProps> = ({
useEffect(calcPosition, [u, cursor, tooltipOffset, tooltipRef]);
useEffect(() => {
if (moving) {
document.addEventListener("mousemove", handleMouseMove);
document.addEventListener("mouseup", handleMouseUp);
}
useEventListener("mousemove", handleMouseMove);
useEventListener("mouseup", handleMouseUp);
return () => {
document.removeEventListener("mousemove", handleMouseMove);
document.removeEventListener("mouseup", handleMouseUp);
};
}, [moving]);
if (!targetPortal || !cursor.left || !cursor.top || !value) return null;
if (!cursor?.left || !cursor?.top || !value) return null;
return ReactDOM.createPortal((
<div
@ -146,7 +136,7 @@ const ChartTooltipHeatmap: FC<ChartTooltipHeatmapProps> = ({
{bucket}
</div>
</div>
), targetPortal);
), u.root);
};
export default ChartTooltipHeatmap;

View file

@ -10,7 +10,6 @@ import { getAxes } from "../../../../utils/uplot/axes";
import { MetricResult } from "../../../../api/types";
import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../../utils/time";
import throttle from "lodash.throttle";
import useResize from "../../../../hooks/useResize";
import { TimeParams } from "../../../../types";
import { YaxisState } from "../../../../state/graph/reducer";
import "uplot/dist/uPlot.min.css";
@ -23,6 +22,8 @@ import ChartTooltipHeatmap, {
ChartTooltipHeatmapProps,
TooltipHeatmapProps
} from "../ChartTooltipHeatmap/ChartTooltipHeatmap";
import { ElementSize } from "../../../../hooks/useElementSize";
import useEventListener from "../../../../hooks/useEventListener";
export interface HeatmapChartProps {
metrics: MetricResult[];
@ -31,7 +32,7 @@ export interface HeatmapChartProps {
yaxis: YaxisState;
unit?: string;
setPeriod: ({ from, to }: {from: Date, to: Date}) => void;
container: HTMLDivElement | null;
layoutSize: ElementSize,
height?: number;
onChangeLegend: (val: TooltipHeatmapProps) => void;
}
@ -45,7 +46,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
yaxis,
unit,
setPeriod,
container,
layoutSize,
height,
onChangeLegend,
}) => {
@ -56,7 +57,6 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
const [xRange, setXRange] = useState({ min: period.start, max: period.end });
const [uPlotInst, setUPlotInst] = useState<uPlot>();
const [startTouchDistance, setStartTouchDistance] = useState(0);
const layoutSize = useResize(container);
const [tooltipProps, setTooltipProps] = useState<TooltipHeatmapProps | null>(null);
const [tooltipOffset, setTooltipOffset] = useState({ left: 0, top: 0 });
@ -116,7 +116,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
});
};
const handleKeyDown = (e: KeyboardEvent) => {
const handleKeyDown = useCallback((e: KeyboardEvent) => {
const { target, ctrlKey, metaKey, key } = e;
const isInput = target instanceof HTMLInputElement || target instanceof HTMLTextAreaElement;
if (!uPlotInst || isInput) return;
@ -131,10 +131,10 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
max: xRange.max - factor
});
}
};
}, [uPlotInst, xRange]);
const handleClick = () => {
if (!tooltipProps) return;
const handleClick = useCallback(() => {
if (!tooltipProps?.value) return;
const id = `${tooltipProps?.bucket}_${tooltipProps?.startDate}`;
const props = {
id,
@ -147,13 +147,12 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
const res = JSON.parse(JSON.stringify(props));
setStickyToolTips(prev => [...prev, res]);
}
};
}, [stickyTooltips, tooltipProps, tooltipOffset, unit]);
const handleUnStick = (id:string) => {
const handleUnStick = (id: string) => {
setStickyToolTips(prev => prev.filter(t => t.id !== id));
};
const setCursor = (u: uPlot) => {
const left = u.cursor.left && u.cursor.left > 0 ? u.cursor.left : 0;
const top = u.cursor.top && u.cursor.top > 0 ? u.cursor.top : 0;
@ -263,21 +262,14 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
useEffect(() => {
setStickyToolTips([]);
setTooltipProps(null);
if (!uPlotRef.current || !layoutSize.width || !layoutSize.height) return;
const isValidData = data[0] === null && Array.isArray(data[1]);
if (!uPlotRef.current || !layoutSize.width || !layoutSize.height || !isValidData) return;
const u = new uPlot(options, data, uPlotRef.current);
setUPlotInst(u);
setXRange({ min: period.start, max: period.end });
return u.destroy;
}, [uPlotRef.current, layoutSize, height, isDarkTheme, data]);
useEffect(() => {
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, [xRange]);
const handleTouchStart = (e: TouchEvent) => {
if (e.touches.length !== 2) return;
e.preventDefault();
@ -287,7 +279,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
setStartTouchDistance(Math.sqrt(dx * dx + dy * dy));
};
const handleTouchMove = (e: TouchEvent) => {
const handleTouchMove = useCallback((e: TouchEvent) => {
if (e.touches.length !== 2 || !uPlotInst) return;
e.preventDefault();
@ -307,34 +299,20 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
min: min + zoomFactor,
max: max - zoomFactor
}));
};
useEffect(() => {
window.addEventListener("touchmove", handleTouchMove);
window.addEventListener("touchstart", handleTouchStart);
return () => {
window.removeEventListener("touchmove", handleTouchMove);
window.removeEventListener("touchstart", handleTouchStart);
};
}, [uPlotInst, startTouchDistance]);
}, [uPlotInst, startTouchDistance, xRange]);
useEffect(() => updateChart(typeChartUpdate.xRange), [xRange]);
useEffect(() => updateChart(typeChartUpdate.yRange), [yaxis]);
useEffect(() => {
const show = !!tooltipProps?.value;
if (show) window.addEventListener("click", handleClick);
return () => {
window.removeEventListener("click", handleClick);
};
}, [tooltipProps, stickyTooltips]);
useEffect(() => {
if (tooltipProps) onChangeLegend(tooltipProps);
}, [tooltipProps]);
useEventListener("click", handleClick);
useEventListener("keydown", handleKeyDown);
useEventListener("touchmove", handleTouchMove);
useEventListener("touchstart", handleTouchStart);
return (
<div
className={classNames({

View file

@ -1,4 +1,4 @@
import React, { FC, useEffect, useMemo, useRef, useState } from "preact/compat";
import React, { FC, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat";
import uPlot from "uplot";
import { MetricResult } from "../../../../api/types";
import { formatPrettyNumber } from "../../../../utils/uplot/helpers";
@ -12,6 +12,7 @@ import classNames from "classnames";
import { MouseEvent as ReactMouseEvent } from "react";
import "./style.scss";
import { SeriesItem } from "../../../../utils/uplot/series";
import useEventListener from "../../../../hooks/useEventListener";
export interface ChartTooltipProps {
id: string,
@ -47,8 +48,6 @@ const ChartTooltip: FC<ChartTooltipProps> = ({
const [seriesIdx, setSeriesIdx] = useState(tooltipIdx.seriesIdx);
const [dataIdx, setDataIdx] = useState(tooltipIdx.dataIdx);
const targetPortal = useMemo(() => u.root.querySelector(".u-wrap"), [u]);
const value = get(u, ["data", seriesIdx, dataIdx], 0);
const valueFormat = formatPrettyNumber(value, get(yRange, [0]), get(yRange, [1]));
const dataTime = u.data[0][dataIdx];
@ -85,11 +84,11 @@ const ChartTooltip: FC<ChartTooltipProps> = ({
setPosition({ top: clientY, left: clientX });
};
const handleMouseMove = (e: MouseEvent) => {
const handleMouseMove = useCallback((e: MouseEvent) => {
if (!moving) return;
const { clientX, clientY } = e;
setPosition({ top: clientY, left: clientX });
};
}, [moving]);
const handleMouseUp = () => {
setMoving(false);
@ -125,19 +124,10 @@ const ChartTooltip: FC<ChartTooltipProps> = ({
setDataIdx(tooltipIdx.dataIdx);
}, [tooltipIdx]);
useEffect(() => {
if (moving) {
document.addEventListener("mousemove", handleMouseMove);
document.addEventListener("mouseup", handleMouseUp);
}
useEventListener("mousemove", handleMouseMove);
useEventListener("mouseup", handleMouseUp);
return () => {
document.removeEventListener("mousemove", handleMouseMove);
document.removeEventListener("mouseup", handleMouseUp);
};
}, [moving]);
if (!targetPortal || tooltipIdx.seriesIdx < 0 || tooltipIdx.dataIdx < 0) return null;
if (tooltipIdx.seriesIdx < 0 || tooltipIdx.dataIdx < 0) return null;
return ReactDOM.createPortal((
<div
@ -190,7 +180,7 @@ const ChartTooltip: FC<ChartTooltipProps> = ({
{fullMetricName}
</div>
</div>
), targetPortal);
), u.root);
};
export default ChartTooltip;

View file

@ -66,7 +66,7 @@ $chart-tooltip-y: -1 * ($padding-small + $chart-tooltip-half-icon);
gap: $padding-small;
align-items: flex-start;
word-break: break-all;
line-height: 12px;
line-height: $font-size;
&__marker {
width: 12px;

View file

@ -5,6 +5,7 @@ import "./style.scss";
import classNames from "classnames";
import Tooltip from "../../../../Main/Tooltip/Tooltip";
import { getFreeFields } from "./helpers";
import useCopyToClipboard from "../../../../../hooks/useCopyToClipboard";
interface LegendItemProps {
legend: LegendItemType;
@ -13,16 +14,20 @@ interface LegendItemProps {
}
const LegendItem: FC<LegendItemProps> = ({ legend, onChange, isHeatmap }) => {
const copyToClipboard = useCopyToClipboard();
const [copiedValue, setCopiedValue] = useState("");
const freeFormFields = useMemo(() => {
const result = getFreeFields(legend);
return isHeatmap ? result.filter(f => f.key !== "vmrange") : result;
}, [legend, isHeatmap]);
const calculations = legend.calculations;
const showCalculations = Object.values(calculations).some(v => v);
const handleClickFreeField = async (val: string, id: string) => {
await navigator.clipboard.writeText(val);
const copied = await copyToClipboard(val);
if (!copied) return;
setCopiedValue(id);
setTimeout(() => setCopiedValue(""), 2000);
};

View file

@ -11,6 +11,7 @@
cursor: pointer;
transition: 0.2s ease;
margin-bottom: $padding-small;
font-size: $font-size-small;
&:hover {
background-color: rgba(0, 0, 0, 0.1);

View file

@ -13,7 +13,6 @@ import { getAxes, getMinMaxBuffer } from "../../../../utils/uplot/axes";
import { MetricResult } from "../../../../api/types";
import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../../utils/time";
import throttle from "lodash.throttle";
import useResize from "../../../../hooks/useResize";
import { TimeParams } from "../../../../types";
import { YaxisState } from "../../../../state/graph/reducer";
import "uplot/dist/uPlot.min.css";
@ -23,6 +22,8 @@ import ChartTooltip, { ChartTooltipProps } from "../ChartTooltip/ChartTooltip";
import dayjs from "dayjs";
import { useAppState } from "../../../../state/common/StateContext";
import { SeriesItem } from "../../../../utils/uplot/series";
import { ElementSize } from "../../../../hooks/useElementSize";
import useEventListener from "../../../../hooks/useEventListener";
export interface LineChartProps {
metrics: MetricResult[];
@ -32,7 +33,7 @@ export interface LineChartProps {
series: uPlotSeries[];
unit?: string;
setPeriod: ({ from, to }: {from: Date, to: Date}) => void;
container: HTMLDivElement | null;
layoutSize: ElementSize;
height?: number;
}
@ -46,7 +47,7 @@ const LineChart: FC<LineChartProps> = ({
yaxis,
unit,
setPeriod,
container,
layoutSize,
height
}) => {
const { isDarkTheme } = useAppState();
@ -57,7 +58,6 @@ const LineChart: FC<LineChartProps> = ({
const [yRange, setYRange] = useState([0, 1]);
const [uPlotInst, setUPlotInst] = useState<uPlot>();
const [startTouchDistance, setStartTouchDistance] = useState(0);
const layoutSize = useResize(container);
const [showTooltip, setShowTooltip] = useState(false);
const [tooltipIdx, setTooltipIdx] = useState({ seriesIdx: -1, dataIdx: -1 });
@ -115,7 +115,7 @@ const LineChart: FC<LineChartProps> = ({
});
};
const handleKeyDown = (e: KeyboardEvent) => {
const handleKeyDown = useCallback((e: KeyboardEvent) => {
const { target, ctrlKey, metaKey, key } = e;
const isInput = target instanceof HTMLInputElement || target instanceof HTMLTextAreaElement;
if (!uPlotInst || isInput) return;
@ -130,9 +130,10 @@ const LineChart: FC<LineChartProps> = ({
max: xRange.max - factor
});
}
};
}, [uPlotInst, xRange]);
const handleClick = () => {
const handleClick = useCallback(() => {
if (!showTooltip) return;
const id = `${tooltipIdx.seriesIdx}_${tooltipIdx.dataIdx}`;
const props = {
id,
@ -148,7 +149,7 @@ const LineChart: FC<LineChartProps> = ({
const tooltipProps = JSON.parse(JSON.stringify(props));
setStickyToolTips(prev => [...prev, tooltipProps]);
}
};
}, [metrics, series, stickyTooltips, tooltipIdx, tooltipOffset, showTooltip, unit, yRange]);
const handleUnStick = (id:string) => {
setStickyToolTips(prev => prev.filter(t => t.id !== id));
@ -231,14 +232,6 @@ const LineChart: FC<LineChartProps> = ({
return u.destroy;
}, [uPlotRef.current, series, layoutSize, height, isDarkTheme]);
useEffect(() => {
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, [xRange]);
const handleTouchStart = (e: TouchEvent) => {
if (e.touches.length !== 2) return;
e.preventDefault();
@ -248,7 +241,7 @@ const LineChart: FC<LineChartProps> = ({
setStartTouchDistance(Math.sqrt(dx * dx + dy * dy));
};
const handleTouchMove = (e: TouchEvent) => {
const handleTouchMove = useCallback((e: TouchEvent) => {
if (e.touches.length !== 2 || !uPlotInst) return;
e.preventDefault();
@ -268,17 +261,7 @@ const LineChart: FC<LineChartProps> = ({
min: min + zoomFactor,
max: max - zoomFactor
}));
};
useEffect(() => {
window.addEventListener("touchmove", handleTouchMove);
window.addEventListener("touchstart", handleTouchStart);
return () => {
window.removeEventListener("touchmove", handleTouchMove);
window.removeEventListener("touchstart", handleTouchStart);
};
}, [uPlotInst, startTouchDistance]);
}, [uPlotInst, startTouchDistance, xRange]);
useEffect(() => updateChart(typeChartUpdate.xRange), [xRange]);
useEffect(() => updateChart(typeChartUpdate.yRange), [yaxis]);
@ -286,14 +269,13 @@ const LineChart: FC<LineChartProps> = ({
useEffect(() => {
const show = tooltipIdx.dataIdx !== -1 && tooltipIdx.seriesIdx !== -1;
setShowTooltip(show);
if (show) window.addEventListener("click", handleClick);
return () => {
window.removeEventListener("click", handleClick);
};
}, [tooltipIdx, stickyTooltips]);
useEventListener("click", handleClick);
useEventListener("keydown", handleKeyDown);
useEventListener("touchmove", handleTouchMove);
useEventListener("touchstart", handleTouchStart);
return (
<div
className={classNames({

View file

@ -7,13 +7,13 @@ $color-bar-highest: #F79420;
display: grid;
grid-template-columns: auto 1fr;
height: 100%;
padding-bottom: #{$font-size-small/2};
padding-bottom: calc($font-size-small/2);
overflow: hidden;
&-y-axis {
position: relative;
display: grid;
transform: translateY(#{$font-size-small});
transform: translateY($font-size-small);
&__tick {
position: relative;

View file

@ -1,4 +1,4 @@
import React, { FC, useRef, useState } from "preact/compat";
import React, { FC, useRef } from "preact/compat";
import { useCustomPanelDispatch, useCustomPanelState } from "../../../state/customPanel/CustomPanelStateContext";
import { useQueryDispatch, useQueryState } from "../../../state/query/QueryStateContext";
import "./style.scss";
@ -8,6 +8,7 @@ import Popper from "../../Main/Popper/Popper";
import { TuneIcon } from "../../Main/Icons";
import Button from "../../Main/Button/Button";
import classNames from "classnames";
import useBoolean from "../../../hooks/useBoolean";
const AdditionalSettingsControls: FC<{isMobile?: boolean}> = ({ isMobile }) => {
const { autocomplete } = useQueryState();
@ -59,16 +60,13 @@ const AdditionalSettingsControls: FC<{isMobile?: boolean}> = ({ isMobile }) => {
const AdditionalSettings: FC = () => {
const { isMobile } = useDeviceDetect();
const [openList, setOpenList] = useState(false);
const targetRef = useRef<HTMLDivElement>(null);
const handleToggleList = () => {
setOpenList(prev => !prev);
};
const handleCloseList = () => {
setOpenList(false);
};
const {
value: openList,
toggle: handleToggleList,
setFalse: handleCloseList,
} = useBoolean(false);
if (isMobile) {
return (

View file

@ -5,7 +5,7 @@
align-items: center;
justify-content: flex-start;
flex-wrap: wrap;
gap: $padding-global;
gap: $padding-medium;
&__input {
flex-basis: 160px;

View file

@ -15,6 +15,7 @@ import Timezones from "./Timezones/Timezones";
import { useTimeDispatch, useTimeState } from "../../../state/time/TimeStateContext";
import ThemeControl from "../ThemeControl/ThemeControl";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import useBoolean from "../../../hooks/useBoolean";
const title = "Settings";
@ -40,15 +41,14 @@ const GlobalSettings: FC = () => {
setTimezone(stateTimezone);
};
const [open, setOpen] = useState(false);
const handleOpen = () => setOpen(true);
const handleClose = () => {
setOpen(false);
setDefaultsValues();
};
const {
value: open,
setTrue: handleOpen,
setFalse: handleClose,
} = useBoolean(false);
const handleCloseForce = () => {
setOpen(false);
const handleCloseAndReset = () => {
handleClose();
setDefaultsValues();
};
@ -60,7 +60,7 @@ const GlobalSettings: FC = () => {
dispatch({ type: "SET_SERVER", payload: serverUrl });
timeDispatch({ type: "SET_TIMEZONE", payload: timezone });
customPanelDispatch({ type: "SET_SERIES_LIMITS", payload: limits });
setOpen(false);
handleClose();
};
useEffect(() => {
@ -97,7 +97,7 @@ const GlobalSettings: FC = () => {
{open && (
<Modal
title={title}
onClose={handleClose}
onClose={handleCloseAndReset}
>
<div
className={classNames({
@ -140,7 +140,7 @@ const GlobalSettings: FC = () => {
<Button
color="error"
variant="outlined"
onClick={handleCloseForce}
onClick={handleCloseAndReset}
>
Cancel
</Button>

View file

@ -11,6 +11,7 @@ import Tooltip from "../../../Main/Tooltip/Tooltip";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import TextField from "../../../Main/TextField/TextField";
import { getTenantIdFromUrl, replaceTenantId } from "../../../../utils/tenants";
import useBoolean from "../../../../hooks/useBoolean";
const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
const appModeEnable = getAppModeEnable();
@ -21,9 +22,14 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
const timeDispatch = useTimeDispatch();
const [search, setSearch] = useState("");
const [openOptions, setOpenOptions] = useState(false);
const optionsButtonRef = useRef<HTMLDivElement>(null);
const {
value: openOptions,
toggle: toggleOpenOptions,
setFalse: handleCloseOptions,
} = useBoolean(false);
const accountIdsFiltered = useMemo(() => {
if (!search) return accountIds;
try {
@ -37,14 +43,6 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
const showTenantSelector = useMemo(() => accountIds.length > 1, [accountIds]);
const toggleOpenOptions = () => {
setOpenOptions(prev => !prev);
};
const handleCloseOptions = () => {
setOpenOptions(false);
};
const createHandlerChange = (value: string) => () => {
const tenant = value;
dispatch({ type: "SET_TENANT_ID", payload: tenant });

View file

@ -9,6 +9,7 @@ import TextField from "../../../Main/TextField/TextField";
import { Timezone } from "../../../../types";
import "./style.scss";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import useBoolean from "../../../../hooks/useBoolean";
interface TimezonesProps {
timezoneState: string
@ -19,10 +20,15 @@ const Timezones: FC<TimezonesProps> = ({ timezoneState, onChange }) => {
const { isMobile } = useDeviceDetect();
const timezones = getTimezoneList();
const [openList, setOpenList] = useState(false);
const [search, setSearch] = useState("");
const targetRef = useRef<HTMLDivElement>(null);
const {
value: openList,
toggle: toggleOpenList,
setFalse: handleCloseList,
} = useBoolean(false);
const searchTimezones = useMemo(() => {
if (!search) return timezones;
try {
@ -44,14 +50,6 @@ const Timezones: FC<TimezonesProps> = ({ timezoneState, onChange }) => {
utc: getUTCByTimezone(timezoneState)
}), [timezoneState]);
const toggleOpenList = () => {
setOpenList(prev => !prev);
};
const handleCloseList = () => {
setOpenList(false);
};
const handleChangeSearch = (val: string) => {
setSearch(val);
};

View file

@ -1,4 +1,4 @@
import React, { FC, useRef, useState } from "preact/compat";
import React, { FC, useRef } from "preact/compat";
import AxesLimitsConfigurator from "./AxesLimitsConfigurator/AxesLimitsConfigurator";
import { AxisRange, YaxisState } from "../../../state/graph/reducer";
import { SettingsIcon } from "../../Main/Icons";
@ -6,6 +6,7 @@ import Button from "../../Main/Button/Button";
import Popper from "../../Main/Popper/Popper";
import "./style.scss";
import Tooltip from "../../Main/Tooltip/Tooltip";
import useBoolean from "../../../hooks/useBoolean";
const title = "Axes settings";
@ -17,16 +18,13 @@ interface GraphSettingsProps {
const GraphSettings: FC<GraphSettingsProps> = ({ yaxis, setYaxisLimits, toggleEnableLimits }) => {
const popperRef = useRef<HTMLDivElement>(null);
const [openPopper, setOpenPopper] = useState(false);
const buttonRef = useRef<HTMLDivElement>(null);
const toggleOpen = () => {
setOpenPopper(prev => !prev);
};
const handleClose = () => {
setOpenPopper(false);
};
const {
value: openPopper,
toggle: toggleOpen,
setFalse: handleClose,
} = useBoolean(false);
return (
<div className="vm-graph-settings">

View file

@ -4,6 +4,9 @@ import { ErrorTypes } from "../../../types";
import TextField from "../../Main/TextField/TextField";
import Autocomplete from "../../Main/Autocomplete/Autocomplete";
import "./style.scss";
import { QueryStats } from "../../../api/types";
import Tooltip from "../../Main/Tooltip/Tooltip";
import { WarningIcon } from "../../Main/Icons";
export interface QueryEditorProps {
onChange: (query: string) => void;
@ -14,6 +17,7 @@ export interface QueryEditorProps {
oneLiner?: boolean;
autocomplete: boolean;
error?: ErrorTypes | string;
stats?: QueryStats;
options: string[];
label: string;
disabled?: boolean
@ -27,6 +31,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
onArrowDown,
autocomplete,
error,
stats,
options,
label,
disabled = false
@ -34,6 +39,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
const [openAutocomplete, setOpenAutocomplete] = useState(false);
const autocompleteAnchorEl = useRef<HTMLDivElement>(null);
const showSeriesFetchedWarning = stats?.seriesFetched === "0" && !stats.resultLength;
const handleSelect = (val: string) => {
onChange(val);
@ -90,6 +96,23 @@ const QueryEditor: FC<QueryEditorProps> = ({
onOpenAutocomplete={setOpenAutocomplete}
/>
)}
{showSeriesFetchedWarning && (
<div className="vm-query-editor-warning">
<Tooltip
placement="bottom-right"
title={(
<span className="vm-query-editor-warning__tooltip">
{`No match!
This query hasn't selected any time series from database.
Either the requested metrics are missing in the database,
or there is a typo in series selector.`}
</span>
)}
>
<WarningIcon/>
</Tooltip>
</div>
)}
</div>;
};

View file

@ -1,9 +1,27 @@
@use "src/styles/variables" as *;
.vm-query-editor {
position: relative;
&-autocomplete {
max-height: 300px;
overflow: auto;
}
&-warning {
position: absolute;
top: 50%;
right: $padding-global;
transform: translateY(-50%);
display: grid;
align-items: center;
justify-content: center;
width: 18px;
height: 18px;
color: $color-warning;
&__tooltip {
white-space: pre-line;
}
}
}

View file

@ -13,6 +13,7 @@ import { getAppModeEnable } from "../../../utils/app-mode";
import Popper from "../../Main/Popper/Popper";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import classNames from "classnames";
import useBoolean from "../../../hooks/useBoolean";
const StepConfigurator: FC = () => {
const appModeEnable = getAppModeEnable();
@ -28,20 +29,17 @@ const StepConfigurator: FC = () => {
return getStepFromDuration(end - start, isHistogram);
}, [step, isHistogram]);
const [openOptions, setOpenOptions] = useState(false);
const [customStep, setCustomStep] = useState(value || defaultStep);
const [error, setError] = useState("");
const {
value: openOptions,
toggle: toggleOpenOptions,
setFalse: handleCloseOptions,
} = useBoolean(false);
const buttonRef = useRef<HTMLDivElement>(null);
const toggleOpenOptions = () => {
setOpenOptions(prev => !prev);
};
const handleCloseOptions = () => {
setOpenOptions(false);
};
const handleApply = (value?: string) => {
const step = value || customStep || defaultStep || "1s";
const durations = step.match(/[a-zA-Z]+/g) || [];

View file

@ -34,16 +34,15 @@
&-info {
font-size: $font-size-small;
line-height: 1.6;
line-height: 1.8;
a {
margin: 0 0.2em;
margin: 0 0.4em;
}
code {
padding: 0.2em 0.4em;
margin: 0 0.2em;
font-size: 85%;
background-color: $color-hover-black;
border-radius: 6px;
}

View file

@ -8,6 +8,7 @@ import "./style.scss";
import classNames from "classnames";
import Tooltip from "../../../Main/Tooltip/Tooltip";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import useBoolean from "../../../../hooks/useBoolean";
interface AutoRefreshOption {
seconds: number
@ -38,12 +39,19 @@ export const ExecutionControls: FC = () => {
const [selectedDelay, setSelectedDelay] = useState<AutoRefreshOption>(delayOptions[0]);
const {
value: openOptions,
toggle: toggleOpenOptions,
setFalse: handleCloseOptions,
} = useBoolean(false);
const optionsButtonRef = useRef<HTMLDivElement>(null);
const handleChange = (d: AutoRefreshOption) => {
if ((autoRefresh && !d.seconds) || (!autoRefresh && d.seconds)) {
setAutoRefresh(prev => !prev);
}
setSelectedDelay(d);
setOpenOptions(false);
handleCloseOptions();
};
const handleUpdate = () => {
@ -65,17 +73,6 @@ export const ExecutionControls: FC = () => {
};
}, [selectedDelay, autoRefresh]);
const [openOptions, setOpenOptions] = useState(false);
const optionsButtonRef = useRef<HTMLDivElement>(null);
const toggleOpenOptions = () => {
setOpenOptions(prev => !prev);
};
const handleCloseOptions = () => {
setOpenOptions(false);
};
const createHandlerChange = (d: AutoRefreshOption) => () => {
handleChange(d);
};

View file

@ -9,20 +9,21 @@ import Button from "../../../Main/Button/Button";
import Popper from "../../../Main/Popper/Popper";
import Tooltip from "../../../Main/Tooltip/Tooltip";
import { DATE_TIME_FORMAT } from "../../../../constants/date";
import useResize from "../../../../hooks/useResize";
import "./style.scss";
import useClickOutside from "../../../../hooks/useClickOutside";
import classNames from "classnames";
import { useAppState } from "../../../../state/common/StateContext";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import DateTimeInput from "../../../Main/DatePicker/DateTimeInput/DateTimeInput";
import useBoolean from "../../../../hooks/useBoolean";
import useWindowSize from "../../../../hooks/useWindowSize";
export const TimeSelector: FC = () => {
const { isMobile } = useDeviceDetect();
const { isDarkTheme } = useAppState();
const wrapperRef = useRef<HTMLDivElement>(null);
const documentSize = useResize(document.body);
const displayFullDate = useMemo(() => documentSize.width > 1280, [documentSize]);
const documentSize = useWindowSize();
const displayFullDate = useMemo(() => documentSize.width > 1120, [documentSize]);
const [until, setUntil] = useState<string>();
const [from, setFrom] = useState<string>();
@ -31,6 +32,12 @@ export const TimeSelector: FC = () => {
const dispatch = useTimeDispatch();
const appModeEnable = getAppModeEnable();
const {
value: openOptions,
toggle: toggleOpenOptions,
setFalse: handleCloseOptions,
} = useBoolean(false);
const activeTimezone = useMemo(() => ({
region: timezone,
utc: getUTCByTimezone(timezone)
@ -46,7 +53,7 @@ export const TimeSelector: FC = () => {
const setDuration = ({ duration, until, id }: {duration: string, until: Date, id: string}) => {
dispatch({ type: "SET_RELATIVE_TIME", payload: { duration, until, id } });
setOpenOptions(false);
handleCloseOptions();
};
const formatRange = useMemo(() => {
@ -62,7 +69,6 @@ export const TimeSelector: FC = () => {
const fromPickerRef = useRef<HTMLDivElement>(null);
const untilPickerRef = useRef<HTMLDivElement>(null);
const [openOptions, setOpenOptions] = useState(false);
const buttonRef = useRef<HTMLDivElement>(null);
const setTimeAndClosePicker = () => {
@ -72,7 +78,7 @@ export const TimeSelector: FC = () => {
to: dayjs.tz(until).toDate()
} });
}
setOpenOptions(false);
handleCloseOptions();
};
const onSwitchToNow = () => dispatch({ type: "RUN_QUERY_TO_NOW" });
@ -80,15 +86,7 @@ export const TimeSelector: FC = () => {
const onCancelClick = () => {
setUntil(formatDateForNativeInput(dateFromSeconds(end)));
setFrom(formatDateForNativeInput(dateFromSeconds(start)));
setOpenOptions(false);
};
const toggleOpenOptions = () => {
setOpenOptions(prev => !prev);
};
const handleCloseOptions = () => {
setOpenOptions(false);
handleCloseOptions();
};
useEffect(() => {

View file

@ -11,6 +11,7 @@ import "./style.scss";
import classNames from "classnames";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import { getDurationFromMilliseconds, getSecondsFromDuration, getStepFromDuration } from "../../../utils/time";
import useBoolean from "../../../hooks/useBoolean";
interface ExploreMetricItemGraphProps {
name: string,
@ -41,7 +42,10 @@ const ExploreMetricItem: FC<ExploreMetricItemGraphProps> = ({
const [isHeatmap, setIsHeatmap] = useState(false);
const step = isHeatmap && customStep === defaultStep ? heatmapStep : customStep;
const [showAllSeries, setShowAllSeries] = useState(false);
const {
value: showAllSeries,
setTrue: handleShowAll,
} = useBoolean(false);
const query = useMemo(() => {
const params = Object.entries({ job, instance })
@ -81,10 +85,6 @@ with (q = ${queryBase}) (
timeDispatch({ type: "SET_PERIOD", payload: { from, to } });
};
const handleShowAll = () => {
setShowAllSeries(true);
};
useEffect(() => {
setIsHeatmap(isHistogram);
}, [isHistogram]);

View file

@ -2,8 +2,8 @@ import React, { FC, useEffect, useMemo, useState } from "preact/compat";
import ExploreMetricItemGraph from "../ExploreMetricGraph/ExploreMetricItemGraph";
import ExploreMetricItemHeader from "../ExploreMetricItemHeader/ExploreMetricItemHeader";
import "./style.scss";
import useResize from "../../../hooks/useResize";
import { GraphSize } from "../../../types";
import useWindowSize from "../../../hooks/useWindowSize";
interface ExploreMetricItemProps {
name: string
@ -32,7 +32,7 @@ const ExploreMetricItem: FC<ExploreMetricItemProps> = ({
const [rateEnabled, setRateEnabled] = useState(isCounter);
const windowSize = useResize(document.body);
const windowSize = useWindowSize();
const graphHeight = useMemo(size.height, [size, windowSize]);
useEffect(() => {

View file

@ -1,4 +1,4 @@
import React, { FC, useState } from "preact/compat";
import React, { FC } from "preact/compat";
import "./style.scss";
import Switch from "../../Main/Switch/Switch";
import Tooltip from "../../Main/Tooltip/Tooltip";
@ -6,6 +6,7 @@ import Button from "../../Main/Button/Button";
import { ArrowDownIcon, CloseIcon, MinusIcon, MoreIcon, PlusIcon } from "../../Main/Icons";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import Modal from "../../Main/Modal/Modal";
import useBoolean from "../../../hooks/useBoolean";
interface ExploreMetricItemControlsProps {
name: string
@ -30,7 +31,12 @@ const ExploreMetricItemHeader: FC<ExploreMetricItemControlsProps> = ({
onChangeOrder,
}) => {
const { isMobile } = useDeviceDetect();
const [openOptions, setOpenOptions] = useState(false);
const {
value: openOptions,
setTrue: handleOpenOptions,
setFalse: handleCloseOptions,
} = useBoolean(false);
const handleClickRemove = () => {
onRemoveItem(name);
@ -44,14 +50,6 @@ const ExploreMetricItemHeader: FC<ExploreMetricItemControlsProps> = ({
onChangeOrder(name, index, index - 1);
};
const handleOpenOptions = () => {
setOpenOptions(true);
};
const handleCloseOptions = () => {
setOpenOptions(false);
};
if (isMobile) {
return (
<div className="vm-explore-metrics-item-header vm-explore-metrics-item-header_mobile">

View file

@ -8,15 +8,15 @@ import "./style.scss";
import classNames from "classnames";
import { useAppState } from "../../../state/common/StateContext";
import HeaderNav from "./HeaderNav/HeaderNav";
import useResize from "../../../hooks/useResize";
import SidebarHeader from "./SidebarNav/SidebarHeader";
import HeaderControls from "./HeaderControls/HeaderControls";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import useWindowSize from "../../../hooks/useWindowSize";
const Header: FC = () => {
const { isMobile } = useDeviceDetect();
const windowSize = useResize(document.body);
const windowSize = useWindowSize();
const displaySidebar = useMemo(() => window.innerWidth < 1000, [windowSize]);
const { isDarkTheme } = useAppState();

View file

@ -1,4 +1,4 @@
import React, { FC, useMemo, useState } from "preact/compat";
import React, { FC, useMemo } from "preact/compat";
import { RouterOptions, routerOptions, RouterOptionsHeader } from "../../../../router";
import TenantsConfiguration from "../../../Configurators/GlobalSettings/TenantsConfiguration/TenantsConfiguration";
import StepConfigurator from "../../../Configurators/StepConfigurator/StepConfigurator";
@ -15,6 +15,7 @@ import "./style.scss";
import classNames from "classnames";
import { getAppModeEnable } from "../../../../utils/app-mode";
import Modal from "../../../Main/Modal/Modal";
import useBoolean from "../../../../hooks/useBoolean";
interface HeaderControlsProp {
displaySidebar: boolean
@ -50,22 +51,19 @@ const Controls: FC<HeaderControlsProp> = ({
const HeaderControls: FC<HeaderControlsProp> = (props) => {
const appModeEnable = getAppModeEnable();
const [openList, setOpenList] = useState(false);
const { pathname } = useLocation();
const { accountIds } = useFetchAccountIds();
const {
value: openList,
toggle: handleToggleList,
setFalse: handleCloseList,
} = useBoolean(false);
const headerSetup = useMemo(() => {
return ((routerOptions[pathname] || {}) as RouterOptions).header || {};
}, [pathname]);
const handleToggleList = () => {
setOpenList(prev => !prev);
};
const handleCloseList = () => {
setOpenList(false);
};
if (props.isMobile) {
return (
<>

View file

@ -5,6 +5,7 @@ import { ArrowDropDownIcon } from "../../../Main/Icons";
import Popper from "../../../Main/Popper/Popper";
import NavItem from "./NavItem";
import { useEffect } from "react";
import useBoolean from "../../../../hooks/useBoolean";
interface NavItemProps {
activeMenu: string,
@ -25,17 +26,18 @@ const NavSubItem: FC<NavItemProps> = ({
}) => {
const { pathname } = useLocation();
const [openSubmenu, setOpenSubmenu] = useState(false);
const [menuTimeout, setMenuTimeout] = useState<NodeJS.Timeout | null>(null);
const buttonRef = useRef<HTMLDivElement>(null);
const handleOpenSubmenu = () => {
setOpenSubmenu(true);
if (menuTimeout) clearTimeout(menuTimeout);
};
const {
value: openSubmenu,
setFalse: handleCloseSubmenu,
setTrue: setOpenSubmenu,
} = useBoolean(false);
const handleCloseSubmenu = () => {
setOpenSubmenu(false);
const handleOpenSubmenu = () => {
setOpenSubmenu();
if (menuTimeout) clearTimeout(menuTimeout);
};
const handleMouseLeave = () => {

View file

@ -5,8 +5,6 @@
align-items: center;
justify-content: flex-start;
gap: $padding-global;
font-size: $font-size-small;
font-weight: bold;
&_column {
flex-direction: column;
@ -25,10 +23,12 @@
&-item {
position: relative;
padding: $padding-global $padding-small;
opacity: 0.5;
opacity: 0.7;
cursor: pointer;
transition: opacity 200ms ease-in;
text-transform: uppercase;
text-transform: capitalize;
font-size: $font-size;
font-weight: normal;
&_sub {
display: grid;
@ -63,11 +63,9 @@
white-space: nowrap;
padding: $padding-small;
color: $color-white;
border-radius: 2px;
border-radius: $border-radius-small;
opacity: 1;
transform-origin: top center;
font-size: $font-size-small;
font-weight: bold;
&-item {
cursor: pointer;

View file

@ -1,4 +1,4 @@
import React, { FC, useEffect, useRef, useState } from "preact/compat";
import React, { FC, useEffect, useRef } from "preact/compat";
import { useLocation } from "react-router-dom";
import ShortcutKeys from "../../../Main/ShortcutKeys/ShortcutKeys";
import classNames from "classnames";
@ -7,6 +7,7 @@ import useClickOutside from "../../../../hooks/useClickOutside";
import MenuBurger from "../../../Main/MenuBurger/MenuBurger";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import "./style.scss";
import useBoolean from "../../../../hooks/useBoolean";
interface SidebarHeaderProps {
background: string
@ -21,15 +22,12 @@ const SidebarHeader: FC<SidebarHeaderProps> = ({
const { isMobile } = useDeviceDetect();
const sidebarRef = useRef<HTMLDivElement>(null);
const [openMenu, setOpenMenu] = useState(false);
const handleToggleMenu = () => {
setOpenMenu(prev => !prev);
};
const handleCloseMenu = () => {
setOpenMenu(false);
};
const {
value: openMenu,
toggle: handleToggleMenu,
setFalse: handleCloseMenu,
} = useBoolean(false);
useEffect(handleCloseMenu, [pathname]);

View file

@ -63,9 +63,4 @@
margin: 0 auto;
}
}
&-nav {
font-size: $font-size-small;
font-weight: bold;
}
}

View file

@ -26,16 +26,15 @@ const Layout: FC = () => {
// for support old links with search params
const redirectSearchToHashParams = () => {
const { search } = window.location;
const { search, href } = window.location;
if (search) {
const query = qs.parse(search, { ignoreQueryPrefix: true });
Object.entries(query).forEach(([key, value]) => {
searchParams.set(key, value as string);
setSearchParams(searchParams);
});
Object.entries(query).forEach(([key, value]) => searchParams.set(key, value as string));
setSearchParams(searchParams);
window.location.search = "";
}
window.location.replace(window.location.href.replace(/\/\?#\//, "/#/"));
const newHref = href.replace(/\/\?#\//, "/#/");
if (newHref !== href) window.location.replace(newHref);
};
useEffect(setDocumentTitle, [pathname]);

View file

@ -10,10 +10,10 @@
background-color: $color-background-block;
border-radius: $border-radius-medium;
box-shadow: $box-shadow;
font-size: $font-size-medium;
font-size: $font-size;
font-weight: normal;
color: $color-text;
line-height: 20px;
line-height: 1.5;
&_mobile {
align-items: flex-start;

View file

@ -1,9 +1,11 @@
import React, { FC, Ref, useEffect, useMemo, useRef, useState } from "preact/compat";
import React, { FC, Ref, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat";
import classNames from "classnames";
import Popper from "../Popper/Popper";
import "./style.scss";
import { DoneIcon } from "../Icons";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import useBoolean from "../../../hooks/useBoolean";
import useEventListener from "../../../hooks/useEventListener";
interface AutocompleteProps {
value: string
@ -39,9 +41,14 @@ const Autocomplete: FC<AutocompleteProps> = ({
const { isMobile } = useDeviceDetect();
const wrapperEl = useRef<HTMLDivElement>(null);
const [openAutocomplete, setOpenAutocomplete] = useState(false);
const [focusOption, setFocusOption] = useState(-1);
const {
value: openAutocomplete,
setValue: setOpenAutocomplete,
setFalse: handleCloseAutocomplete,
} = useBoolean(false);
const foundOptions = useMemo(() => {
if (!openAutocomplete) return [];
try {
@ -57,10 +64,6 @@ const Autocomplete: FC<AutocompleteProps> = ({
return noOptionsText && !foundOptions.length;
}, [noOptionsText,foundOptions]);
const handleCloseAutocomplete = () => {
setOpenAutocomplete(false);
};
const createHandlerSelect = (item: string) => () => {
if (disabled) return;
onSelect(item);
@ -73,7 +76,7 @@ const Autocomplete: FC<AutocompleteProps> = ({
if (target?.scrollIntoView) target.scrollIntoView({ block: "center" });
};
const handleKeyDown = (e: KeyboardEvent) => {
const handleKeyDown = useCallback((e: KeyboardEvent) => {
const { key, ctrlKey, metaKey, shiftKey } = e;
const modifiers = ctrlKey || metaKey || shiftKey;
const hasOptions = foundOptions.length;
@ -98,22 +101,16 @@ const Autocomplete: FC<AutocompleteProps> = ({
if (key === "Escape") {
handleCloseAutocomplete();
}
};
}, [focusOption, foundOptions, handleCloseAutocomplete, onSelect, selected]);
useEffect(() => {
const words = (value.match(/[a-zA-Z_:.][a-zA-Z0-9_:.]*/gm) || []).length;
setOpenAutocomplete(value.length > minLength && words <= maxWords);
}, [value]);
useEffect(() => {
scrollToValue();
useEventListener("keydown", handleKeyDown);
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, [focusOption, foundOptions]);
useEffect(scrollToValue, [focusOption, foundOptions]);
useEffect(() => {
setFocusOption(-1);

View file

@ -9,7 +9,7 @@ $button-radius: 6px;
justify-content: center;
padding: 6px 14px;
font-size: $font-size-small;
line-height: 15px;
line-height: 1.3;
font-weight: normal;
min-height: 31px;
border-radius: $button-radius;
@ -49,6 +49,7 @@ $button-radius: 6px;
display: grid;
align-items: center;
justify-content: center;
transform: translateZ(1px);
svg {
width: 15px;
@ -79,7 +80,7 @@ $button-radius: 6px;
/* size SMALL */
&_small {
padding: 4px 6px;
padding: 4px 8px;
min-height: 25px;
span {

View file

@ -1,9 +1,11 @@
import React, { Ref, useEffect, useMemo, useState, forwardRef } from "preact/compat";
import React, { Ref, useMemo, forwardRef } from "preact/compat";
import Calendar from "../../Main/DatePicker/Calendar/Calendar";
import dayjs, { Dayjs } from "dayjs";
import Popper from "../../Main/Popper/Popper";
import { DATE_TIME_FORMAT } from "../../../constants/date";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import useBoolean from "../../../hooks/useBoolean";
import useEventListener from "../../../hooks/useEventListener";
interface DatePickerProps {
date: string | Date | Dayjs,
@ -20,17 +22,14 @@ const DatePicker = forwardRef<HTMLDivElement, DatePickerProps>(({
onChange,
label
}, ref) => {
const [openCalendar, setOpenCalendar] = useState(false);
const dateDayjs = useMemo(() => dayjs(date).isValid() ? dayjs.tz(date) : dayjs().tz(), [date]);
const { isMobile } = useDeviceDetect();
const toggleOpenCalendar = () => {
setOpenCalendar(prev => !prev);
};
const handleCloseCalendar = () => {
setOpenCalendar(false);
};
const {
value: openCalendar,
toggle: toggleOpenCalendar,
setFalse: handleCloseCalendar,
} = useBoolean(false);
const handleChangeDate = (val: string) => {
onChange(val);
@ -41,21 +40,8 @@ const DatePicker = forwardRef<HTMLDivElement, DatePickerProps>(({
if (e.key === "Escape" || e.key === "Enter") handleCloseCalendar();
};
useEffect(() => {
targetRef.current?.addEventListener("click", toggleOpenCalendar);
return () => {
targetRef.current?.removeEventListener("click", toggleOpenCalendar);
};
}, [targetRef]);
useEffect(() => {
window.addEventListener("keyup", handleKeyUp);
return () => {
window.removeEventListener("keyup", handleKeyUp);
};
}, []);
useEventListener("click", toggleOpenCalendar, targetRef);
useEventListener("keyup", handleKeyUp);
return (<>
<Popper

View file

@ -1,18 +1,13 @@
import React, { FC } from "preact/compat";
import * as icons from "./index";
import { useSnack } from "../../../contexts/Snackbar";
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
import "./style.scss";
const PreviewIcons: FC = () => {
const { showInfoMessage } = useSnack();
const copyToClipboard = useCopyToClipboard();
const handleClickIcon = (copyValue: string) => {
navigator.clipboard.writeText(`<${copyValue}/>`);
showInfoMessage({ text: `<${copyValue}/> has been copied`, type: "success" });
};
const createHandlerClickIcon = (key: string) => () => {
handleClickIcon(key);
const createHandlerClickIcon = (key: string) => async () => {
await copyToClipboard(`<${key}/>`, `<${key}/> has been copied`);
};
return (

View file

@ -1,4 +1,4 @@
import React, { FC, useEffect } from "preact/compat";
import React, { FC, useCallback, useEffect } from "preact/compat";
import ReactDOM from "react-dom";
import { CloseIcon } from "../Icons";
import Button from "../Button/Button";
@ -7,6 +7,7 @@ import "./style.scss";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import classNames from "classnames";
import { useLocation, useNavigate } from "react-router-dom";
import useEventListener from "../../../hooks/useEventListener";
interface ModalProps {
title?: string
@ -21,48 +22,42 @@ const Modal: FC<ModalProps> = ({
children,
onClose,
className,
isOpen= true
isOpen = true
}) => {
const { isMobile } = useDeviceDetect();
const navigate = useNavigate();
const location = useLocation();
const handleKeyUp = (e: KeyboardEvent) => {
const handleKeyUp = useCallback((e: KeyboardEvent) => {
if (!isOpen) return;
if (e.key === "Escape") onClose();
};
}, [isOpen]);
const handleMouseDown = (e: MouseEvent<HTMLDivElement>) => {
e.stopPropagation();
};
const handlePopstate = () => {
const handlePopstate = useCallback(() => {
if (isOpen) {
navigate(location, { replace: true });
onClose();
}
};
useEffect(() => {
window.addEventListener("popstate", handlePopstate);
return () => {
window.removeEventListener("popstate", handlePopstate);
};
}, [isOpen, location]);
}, [isOpen, location, onClose]);
const handleDisplayModal = () => {
if (!isOpen) return;
document.body.style.overflow = "hidden";
window.addEventListener("keyup", handleKeyUp);
return () => {
document.body.style.overflow = "auto";
window.removeEventListener("keyup", handleKeyUp);
};
};
useEffect(handleDisplayModal, [isOpen]);
useEventListener("popstate", handlePopstate);
useEventListener("keyup", handleKeyUp);
return ReactDOM.createPortal((
<div
className={classNames({

View file

@ -7,6 +7,9 @@ import useDeviceDetect from "../../../hooks/useDeviceDetect";
import Button from "../Button/Button";
import { CloseIcon } from "../Icons";
import { useLocation, useNavigate } from "react-router-dom";
import useBoolean from "../../../hooks/useBoolean";
import useEventListener from "../../../hooks/useEventListener";
import { useCallback } from "preact/compat";
interface PopperProps {
children: ReactNode
@ -37,23 +40,16 @@ const Popper: FC<PopperProps> = ({
const { isMobile } = useDeviceDetect();
const navigate = useNavigate();
const location = useLocation();
const [isOpen, setIsOpen] = useState(false);
const [popperSize, setPopperSize] = useState({ width: 0, height: 0 });
const {
value: isOpen,
setValue: setIsOpen,
setFalse: handleClose,
} = useBoolean(false);
const popperRef = useRef<HTMLDivElement>(null);
const onScrollWindow = () => {
setIsOpen(false);
};
useEffect(() => {
window.addEventListener("scroll", onScrollWindow);
return () => {
window.removeEventListener("scroll", onScrollWindow);
};
}, []);
useEffect(() => {
setIsOpen(open);
}, [open]);
@ -137,32 +133,25 @@ const Popper: FC<PopperProps> = ({
}
}, [isOpen, popperRef]);
const handlePopstate = () => {
const handlePopstate = useCallback(() => {
if (isOpen && isMobile && !disabledFullScreen) {
navigate(location, { replace: true });
onClose();
}
};
}, [isOpen, isMobile, disabledFullScreen, location, onClose]);
useEffect(() => {
window.addEventListener("popstate", handlePopstate);
return () => {
window.removeEventListener("popstate", handlePopstate);
};
}, [isOpen, isMobile, disabledFullScreen, location]);
const popperClasses = classNames({
"vm-popper": true,
"vm-popper_mobile": isMobile && !disabledFullScreen,
"vm-popper_open": (isMobile || Object.keys(popperStyle).length) && isOpen,
});
useEventListener("scroll", handleClose);
useEventListener("popstate", handlePopstate);
return (
<>
{(isOpen || !popperSize.width) && ReactDOM.createPortal((
<div
className={popperClasses}
className={classNames({
"vm-popper": true,
"vm-popper_mobile": isMobile && !disabledFullScreen,
"vm-popper_open": (isMobile || Object.keys(popperStyle).length) && isOpen,
})}
ref={popperRef}
style={(isMobile && !disabledFullScreen) ? {} : popperStyle}
>

View file

@ -7,6 +7,7 @@ import { useAppState } from "../../../state/common/StateContext";
import "./style.scss";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import MultipleSelectedValue from "./MultipleSelectedValue/MultipleSelectedValue";
import useEventListener from "../../../hooks/useEventListener";
interface SelectProps {
value: string | string[]
@ -105,13 +106,7 @@ const Select: FC<SelectProps> = ({
inputRef.current.focus();
}, [autofocus, inputRef]);
useEffect(() => {
window.addEventListener("keyup", handleKeyUp);
return () => {
window.removeEventListener("keyup", handleKeyUp);
};
}, []);
useEventListener("keyup", handleKeyUp);
return (
<div

View file

@ -5,12 +5,11 @@
position: relative;
display: flex;
align-items: center;
justify-content: space-between;
padding: 5px 0 5px $padding-global;
padding: 8px 0 8px $padding-global;
cursor: pointer;
border: $border-divider;
border-radius: $border-radius-small;
min-height: 36px;
min-height: 40px;
&-content {
display: flex;
@ -18,8 +17,7 @@
justify-content: flex-start;
flex-wrap: wrap;
gap: $padding-small;
width: 100%;
max-width: calc(100% - ($padding-global + 61px));
flex-grow: 1;
&_mobile {
flex-wrap: nowrap;

View file

@ -1,4 +1,4 @@
import React, { FC, useEffect, useState } from "preact/compat";
import React, { FC, useCallback } from "preact/compat";
import { getAppModeEnable } from "../../../utils/app-mode";
import Button from "../Button/Button";
import { KeyboardIcon } from "../Icons";
@ -7,38 +7,31 @@ import "./style.scss";
import Tooltip from "../Tooltip/Tooltip";
import keyList from "./constants/keyList";
import { isMacOs } from "../../../utils/detect-device";
import useBoolean from "../../../hooks/useBoolean";
import useEventListener from "../../../hooks/useEventListener";
const title = "Shortcut keys";
const isMac = isMacOs();
const keyOpenHelp = isMac ? "Cmd + /" : "F1";
const ShortcutKeys: FC<{ showTitle?: boolean }> = ({ showTitle }) => {
const [openList, setOpenList] = useState(false);
const appModeEnable = getAppModeEnable();
const handleOpen = () => {
setOpenList(true);
};
const {
value: openList,
setTrue: handleOpen,
setFalse: handleClose,
} = useBoolean(false);
const handleClose = () => {
setOpenList(false);
};
const handleKeyDown = (e: KeyboardEvent) => {
const handleKeyDown = useCallback((e: KeyboardEvent) => {
const openOnMac = isMac && e.key === "/" && e.metaKey;
const openOnOther = !isMac && e.key === "F1" && !e.metaKey;
if (openOnMac || openOnOther) {
handleOpen();
}
};
}, [handleOpen]);
useEffect(() => {
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, []);
useEventListener("keydown", handleKeyDown);
return <>
<Tooltip

View file

@ -64,6 +64,7 @@
svg {
width: 24px;
padding: 4px;
color: $color-primary;
}
}

View file

@ -73,8 +73,12 @@ $switch-border-radius: $switch-handle-size + ($switch-padding * 2);
&__label {
white-space: nowrap;
font-size: inherit;
color: inherit;
margin-left: $padding-small;
transition: color 200ms ease;
color: $color-text-secondary;
}
&_active &__label {
color: $color-text;
}
}

View file

@ -1,9 +1,9 @@
import React, { Component, FC, useRef, useState } from "preact/compat";
import { ReactNode, useEffect } from "react";
import { getCssVariable } from "../../../utils/theme";
import useResize from "../../../hooks/useResize";
import TabItem from "./TabItem";
import "./style.scss";
import useWindowSize from "../../../hooks/useWindowSize";
export interface TabItemType {
value: string
@ -29,7 +29,7 @@ const Tabs: FC<TabsProps> = ({
indicatorPlacement = "bottom",
isNavLink,
}) => {
const windowSize = useResize(document.body);
const windowSize = useWindowSize();
const activeNavRef = useRef<Component>(null);
const [indicatorPosition, setIndicatorPosition] = useState({ left: 0, width: 0, bottom: 0 });

View file

@ -16,7 +16,7 @@
padding: $padding-global $padding-small;
color: inherit;
text-decoration: none;
text-transform: uppercase;
text-transform: capitalize;
font-size: inherit;
font-weight: inherit;
opacity: 0.6;
@ -33,7 +33,7 @@
&__icon {
display: grid;
width: 15px;
width: 16px;
margin-right: $padding-small;
&_single {

View file

@ -18,7 +18,7 @@
padding: $padding-small $padding-global;
border: $border-divider;
background-color: transparent;
font-size: 12px;
font-size: $font-size;
line-height: 18px;
grid-area: 1 / 1 / 2 / 2;
overflow: hidden;
@ -71,7 +71,7 @@
display: block;
border-radius: $border-radius-small;
transition: border 200ms ease;
min-height: 34px;
min-height: 40px;
resize: none;
overflow: hidden;
background-color: transparent;

View file

@ -7,7 +7,7 @@ import { darkPalette, lightPalette } from "../../../constants/palette";
import { Theme } from "../../../types";
import { useAppDispatch, useAppState } from "../../../state/common/StateContext";
import useSystemTheme from "../../../hooks/useSystemTheme";
import useResize from "../../../hooks/useResize";
import useWindowSize from "../../../hooks/useWindowSize";
interface ThemeProviderProps {
onLoaded: (val: boolean) => void
@ -29,7 +29,7 @@ export const ThemeProvider: FC<ThemeProviderProps> = ({ onLoaded }) => {
const { theme } = useAppState();
const isDarkTheme = useSystemTheme();
const dispatch = useAppDispatch();
const windowSize = useResize(document.body);
const windowSize = useWindowSize();
const [palette, setPalette] = useState({
[Theme.dark]: darkPalette,

View file

@ -4,6 +4,7 @@ import "./style.scss";
import { ReactNode } from "react";
import { ExoticComponent } from "react";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import useEventListener from "../../../hooks/useEventListener";
interface TooltipProps {
children: ReactNode
@ -29,14 +30,7 @@ const Tooltip: FC<TooltipProps> = ({
const popperRef = useRef<HTMLDivElement>(null);
const onScrollWindow = () => setIsOpen(false);
useEffect(() => {
window.addEventListener("scroll", onScrollWindow);
return () => {
window.removeEventListener("scroll", onScrollWindow);
};
}, []);
useEventListener("scroll", onScrollWindow);
useEffect(() => {
if (!popperRef.current || !isOpen) return;

View file

@ -88,11 +88,12 @@ const NestedNav: FC<RecursiveProps> = ({ trace, totalMsec }) => {
</div>
{(isExpanded || showFullMessage) && (
<Button
variant="text"
variant="outlined"
size="small"
color="secondary"
onClick={handleClickShowMore}
>
{showFullMessage ? "Hide" : "Show more"}
{showFullMessage ? "Hide" : "Show full query"}
</Button>
)}
</div>

View file

@ -1,4 +1,4 @@
import React, { FC, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat";
import React, { FC, useCallback, useEffect, useMemo, useState } from "preact/compat";
import { MetricResult } from "../../../api/types";
import LineChart from "../../Chart/Line/LineChart/LineChart";
import { AlignedData as uPlotData, Series as uPlotSeries } from "uplot";
@ -18,6 +18,7 @@ import { promValueToNumber } from "../../../utils/metric";
import { normalizeData } from "../../../utils/uplot/heatmap";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import { TooltipHeatmapProps } from "../../Chart/Heatmap/ChartTooltipHeatmap/ChartTooltipHeatmap";
import useElementSize from "../../../hooks/useElementSize";
export interface GraphViewProps {
data?: MetricResult[];
@ -164,7 +165,7 @@ const GraphView: FC<GraphViewProps> = ({
setLegend(tempLegend);
}, [hideSeries]);
const containerRef = useRef<HTMLDivElement>(null);
const [containerRef, containerSize] = useElementSize();
return (
<div
@ -175,7 +176,7 @@ const GraphView: FC<GraphViewProps> = ({
})}
ref={containerRef}
>
{containerRef?.current && !isHistogram && (
{!isHistogram && (
<LineChart
data={dataChart}
series={series}
@ -184,11 +185,11 @@ const GraphView: FC<GraphViewProps> = ({
yaxis={yaxis}
unit={unit}
setPeriod={setPeriod}
container={containerRef?.current}
layoutSize={containerSize}
height={height}
/>
)}
{containerRef?.current && isHistogram && (
{isHistogram && (
<HeatmapChart
data={dataChart}
metrics={data}
@ -196,7 +197,7 @@ const GraphView: FC<GraphViewProps> = ({
yaxis={yaxis}
unit={unit}
setPeriod={setPeriod}
container={containerRef?.current}
layoutSize={containerSize}
height={height}
onChangeLegend={handleChangeLegend}
/>

View file

@ -1,6 +1,6 @@
import React, { FC, useMemo } from "preact/compat";
import { InstantMetricResult } from "../../../api/types";
import { useSnack } from "../../../contexts/Snackbar";
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
import { TopQuery } from "../../../types";
import Button from "../../Main/Button/Button";
import "./style.scss";
@ -10,13 +10,12 @@ export interface JsonViewProps {
}
const JsonView: FC<JsonViewProps> = ({ data }) => {
const { showInfoMessage } = useSnack();
const copyToClipboard = useCopyToClipboard();
const formattedJson = useMemo(() => JSON.stringify(data, null, 2), [data]);
const handlerCopy = () => {
navigator.clipboard.writeText(formattedJson);
showInfoMessage({ text: "Formatted JSON has been copied", type: "success" });
const handlerCopy = async () => {
await copyToClipboard(formattedJson, "Formatted JSON has been copied");
};
return (

View file

@ -1,4 +1,4 @@
import React, { FC, useEffect, useMemo, useRef, useState } from "preact/compat";
import React, { FC, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat";
import { InstantMetricResult } from "../../../api/types";
import { InstantDataSeries } from "../../../types";
import { useSortedCategories } from "../../../hooks/useSortedCategories";
@ -7,12 +7,13 @@ import classNames from "classnames";
import { ArrowDropDownIcon, CopyIcon } from "../../Main/Icons";
import Tooltip from "../../Main/Tooltip/Tooltip";
import Button from "../../Main/Button/Button";
import { useSnack } from "../../../contexts/Snackbar";
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
import { getNameForMetric } from "../../../utils/metric";
import { useCustomPanelState } from "../../../state/customPanel/CustomPanelStateContext";
import "./style.scss";
import useResize from "../../../hooks/useResize";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import useWindowSize from "../../../hooks/useWindowSize";
import useEventListener from "../../../hooks/useEventListener";
export interface GraphViewProps {
data: InstantMetricResult[];
@ -20,11 +21,11 @@ export interface GraphViewProps {
}
const TableView: FC<GraphViewProps> = ({ data, displayColumns }) => {
const { showInfoMessage } = useSnack();
const copyToClipboard = useCopyToClipboard();
const { isMobile } = useDeviceDetect();
const { tableCompact } = useCustomPanelState();
const windowSize = useResize(document.body);
const windowSize = useWindowSize();
const tableRef = useRef<HTMLTableElement>(null);
const [tableTop, setTableTop] = useState(0);
const [headTop, setHeadTop] = useState(0);
@ -74,33 +75,22 @@ const TableView: FC<GraphViewProps> = ({ data, displayColumns }) => {
setOrderBy(key);
};
const copyHandler = async (copyValue: string) => {
await navigator.clipboard.writeText(copyValue);
showInfoMessage({ text: "Row has been copied", type: "success" });
};
const createSortHandler = (key: string) => () => {
sortHandler(key);
};
const createCopyHandler = (copyValue: string) => () => {
copyHandler(copyValue);
const createCopyHandler = (copyValue: string) => async () => {
await copyToClipboard(copyValue, "Row has been copied");
};
const handleScroll = () => {
const handleScroll = useCallback(() => {
if (!tableRef.current) return;
const { top } = tableRef.current.getBoundingClientRect();
setHeadTop(top < 0 ? window.scrollY - tableTop : 0);
};
useEffect(() => {
window.addEventListener("scroll", handleScroll);
return () => {
window.removeEventListener("scroll", handleScroll);
};
}, [tableRef, tableTop, windowSize]);
useEventListener("scroll", handleScroll);
useEffect(() => {
if (!tableRef.current) return;
const { top } = tableRef.current.getBoundingClientRect();

View file

@ -1,4 +1,6 @@
import { useEffect, RefObject } from "react";
import { RefObject } from "react";
import useEventListener from "./useEventListener";
import { useCallback } from "preact/compat";
type Event = MouseEvent | TouchEvent;
@ -7,26 +9,19 @@ const useClickOutside = <T extends HTMLElement = HTMLElement>(
handler: (event: Event) => void,
preventRef?: RefObject<T>
) => {
useEffect(() => {
const listener = (event: Event) => {
const el = ref?.current;
const target = event.target as HTMLElement;
const isPreventRef = preventRef?.current && preventRef.current.contains(target);
if (!el || el.contains((event?.target as Node) || null) || isPreventRef) {
return;
}
const listener = useCallback((event: Event) => {
const el = ref?.current;
const target = event.target as HTMLElement;
const isPreventRef = preventRef?.current && preventRef.current.contains(target);
if (!el || el.contains((event?.target as Node) || null) || isPreventRef) {
return;
}
handler(event); // Call the handler only if the click is outside of the element passed.
};
handler(event); // Call the handler only if the click is outside of the element passed.
}, [ref, handler]);
document.addEventListener("mousedown", listener);
document.addEventListener("touchstart", listener);
return () => {
document.removeEventListener("mousedown", listener);
document.removeEventListener("touchstart", listener);
};
}, [ref, handler]); // Reload only if ref or handler changes
useEventListener("mousedown", listener);
useEventListener("touchstart", listener);
};
export default useClickOutside;

Some files were not shown because too many files have changed in this diff Show more