mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-20 15:16:42 +00:00
Merge tag 'v1.91.2' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
a2d68d249b
66 changed files with 612 additions and 258 deletions
40
Makefile
40
Makefile
|
@ -418,27 +418,37 @@ check-licenses: install-wwhrd
|
||||||
wwhrd check -f .wwhrd.yml
|
wwhrd check -f .wwhrd.yml
|
||||||
|
|
||||||
copy-docs:
|
copy-docs:
|
||||||
echo '' > ${DST}
|
echo "---" > ${DST}
|
||||||
@if [ ${ORDER} -ne 0 ]; then \
|
@if [ ${ORDER} -ne 0 ]; then \
|
||||||
echo "---\nsort: ${ORDER}\n---\n" > ${DST}; \
|
echo "sort: ${ORDER}" >> ${DST}; \
|
||||||
|
echo "weight: ${ORDER}" >> ${DST}; \
|
||||||
|
echo "menu:\n docs:\n parent: 'victoriametrics'\n weight: ${ORDER}" >> ${DST}; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echo "title: ${TITLE}" >> ${DST}
|
||||||
|
@if [ ${OLD_URL} ]; then \
|
||||||
|
echo "aliases:\n - ${OLD_URL}" >> ${DST}; \
|
||||||
|
fi
|
||||||
|
echo "---" >> ${DST}
|
||||||
cat ${SRC} >> ${DST}
|
cat ${SRC} >> ${DST}
|
||||||
sed -i='.tmp' 's/<img src=\"docs\//<img src=\"/' ${DST}
|
sed -i='.tmp' 's/<img src=\"docs\//<img src=\"/' ${DST}
|
||||||
rm -rf docs/*.tmp
|
rm -rf docs/*.tmp
|
||||||
|
|
||||||
# Copies docs for all components and adds the order tag.
|
# Copies docs for all components and adds the order/weight tag, title, menu position and alias with the backward compatible link for the old site.
|
||||||
# For ORDER=0 it adds no order tag.
|
# For ORDER=0 it adds no order tag/weight tag.
|
||||||
|
# FOR OLD_URL - relative link, used for backward compatibility with the link from documentation based on GitHub pages (old one)
|
||||||
|
# FOR OLD_URL='' it adds no alias, it should be empty for every new page, don't change it for already existing links.
|
||||||
# Images starting with <img src="docs/ are replaced with <img src="
|
# Images starting with <img src="docs/ are replaced with <img src="
|
||||||
# Cluster docs are supposed to be ordered as 9th.
|
# Cluster docs are supposed to be ordered as 2nd.
|
||||||
# The rest of docs is ordered manually.
|
# The rest of docs is ordered manually.
|
||||||
docs-sync:
|
docs-sync:
|
||||||
SRC=README.md DST=docs/README.md ORDER=0 $(MAKE) copy-docs
|
SRC=README.md DST=docs/README.md OLD_URL='' ORDER=0 TITLE=VictoriaMetrics $(MAKE) copy-docs
|
||||||
SRC=README.md DST=docs/Single-server-VictoriaMetrics.md ORDER=1 $(MAKE) copy-docs
|
SRC=README.md DST=docs/Single-server-VictoriaMetrics.md OLD_URL='/Single-server-VictoriaMetrics.html' TITLE=VictoriaMetrics ORDER=1 $(MAKE) copy-docs
|
||||||
SRC=app/vmagent/README.md DST=docs/vmagent.md ORDER=3 $(MAKE) copy-docs
|
SRC=app/vmagent/README.md DST=docs/vmagent.md OLD_URL='/vmagent.html' ORDER=3 TITLE=vmagent $(MAKE) copy-docs
|
||||||
SRC=app/vmalert/README.md DST=docs/vmalert.md ORDER=4 $(MAKE) copy-docs
|
SRC=app/vmalert/README.md DST=docs/vmalert.md OLD_URL='/vmalert.html' ORDER=4 TITLE=vmalert $(MAKE) copy-docs
|
||||||
SRC=app/vmauth/README.md DST=docs/vmauth.md ORDER=5 $(MAKE) copy-docs
|
SRC=app/vmauth/README.md DST=docs/vmauth.md OLD_URL='/vmauth.html' ORDER=5 TITLE=vmauth $(MAKE) copy-docs
|
||||||
SRC=app/vmbackup/README.md DST=docs/vmbackup.md ORDER=6 $(MAKE) copy-docs
|
SRC=app/vmbackup/README.md DST=docs/vmbackup.md OLD_URL='/vmbackup.html' ORDER=6 TITLE=vmbackup $(MAKE) copy-docs
|
||||||
SRC=app/vmrestore/README.md DST=docs/vmrestore.md ORDER=7 $(MAKE) copy-docs
|
SRC=app/vmrestore/README.md DST=docs/vmrestore.md OLD_URL='/vmrestore.html' ORDER=7 TITLE=vmrestore $(MAKE) copy-docs
|
||||||
SRC=app/vmctl/README.md DST=docs/vmctl.md ORDER=8 $(MAKE) copy-docs
|
SRC=app/vmctl/README.md DST=docs/vmctl.md OLD_URL='/vmctl.html' ORDER=8 TITLE=vmctl $(MAKE) copy-docs
|
||||||
SRC=app/vmgateway/README.md DST=docs/vmgateway.md ORDER=9 $(MAKE) copy-docs
|
SRC=app/vmgateway/README.md DST=docs/vmgateway.md OLD_URL='/vmgateway.html' ORDER=9 TITLE=vmgateway $(MAKE) copy-docs
|
||||||
SRC=app/vmbackupmanager/README.md DST=docs/vmbackupmanager.md ORDER=10 $(MAKE) copy-docs
|
SRC=app/vmbackupmanager/README.md DST=docs/vmbackupmanager.md OLD_URL='/vmbackupmanager.html' ORDER=10 TITLE=vmbackupmanager $(MAKE) copy-docs
|
||||||
|
|
45
README.md
45
README.md
|
@ -528,8 +528,10 @@ and stream plain InfluxDB line protocol data to the configured TCP and/or UDP ad
|
||||||
|
|
||||||
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
||||||
|
|
||||||
* [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
* [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db`
|
||||||
unless `db` tag exists in the InfluxDB line. The `db` label name can be overridden via `-influxDBLabel` command-line flag.
|
[label](https://docs.victoriametrics.com/keyConcepts.html#labels) value unless `db` tag exists in the InfluxDB line.
|
||||||
|
The `db` label name can be overridden via `-influxDBLabel` command-line flag. If more strict data isolation is required,
|
||||||
|
read more about multi-tenancy [here](https://docs.victoriametrics.com/keyConcepts.html#multi-tenancy).
|
||||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||||
* Field values are mapped to time series values.
|
* Field values are mapped to time series values.
|
||||||
* Tags are mapped to Prometheus labels as-is.
|
* Tags are mapped to Prometheus labels as-is.
|
||||||
|
@ -1464,22 +1466,37 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
||||||
|
|
||||||
## Deduplication
|
## Deduplication
|
||||||
|
|
||||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval
|
VictoriaMetrics leaves a single [raw sample](https://docs.victoriametrics.com/keyConcepts.html#raw-samples)
|
||||||
if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single
|
with the biggest [timestamp](https://en.wikipedia.org/wiki/Unix_time) for each [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
|
||||||
raw sample with the biggest timestamp per each discrete 60s interval.
|
per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration.
|
||||||
|
For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete
|
||||||
|
`60s` interval.
|
||||||
This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||||
|
|
||||||
If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then the sample with the biggest value is left.
|
If multiple raw samples have **the same timestamp** on the given `-dedup.minScrapeInterval` discrete interval,
|
||||||
|
then the sample with **the biggest value** is kept.
|
||||||
|
|
||||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. So it is safe to use deduplication and downsampling simultaneously.
|
Please note, [labels](https://docs.victoriametrics.com/keyConcepts.html#labels) of raw samples should be identical
|
||||||
|
in order to be deduplicated. For example, this is why [HA pair of vmagents](https://docs.victoriametrics.com/vmagent.html#high-availability)
|
||||||
|
needs to be identically configured.
|
||||||
|
|
||||||
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled.
|
||||||
|
So it is safe to use deduplication and downsampling simultaneously.
|
||||||
|
|
||||||
The de-duplication reduces disk space usage if multiple identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus instances in HA pair
|
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs.
|
||||||
write data to the same VictoriaMetrics instance. These vmagent or Prometheus instances must have identical
|
It is recommended to have a single `scrape_interval` across all the scrape targets.
|
||||||
`external_labels` section in their configs, so they write data to the same time series. See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
||||||
|
|
||||||
It is recommended passing different `-promscrape.cluster.name` values to HA pairs of `vmagent` instances, so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples from other `vmagent` instances. See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
The de-duplication reduces disk space usage if multiple **identically configured** [vmagent](https://docs.victoriametrics.com/vmagent.html)
|
||||||
|
or Prometheus instances in HA pair write data to the same VictoriaMetrics instance.
|
||||||
|
These vmagent or Prometheus instances must have **identical** `external_labels` section in their configs,
|
||||||
|
so they write data to the same time series.
|
||||||
|
See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||||
|
|
||||||
|
It is recommended passing different `-promscrape.cluster.name` values to each distinct HA pair of `vmagent` instances,
|
||||||
|
so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples
|
||||||
|
from other `vmagent` instances.
|
||||||
|
See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||||
|
|
||||||
## Storage
|
## Storage
|
||||||
|
|
||||||
|
@ -1623,6 +1640,10 @@ Retention filters can be evaluated for free by downloading and using enterprise
|
||||||
|
|
||||||
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
|
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
|
||||||
|
|
||||||
|
Downsampling happens during [background merges](https://docs.victoriametrics.com/#storage)
|
||||||
|
and can't be performed if there is not enough of free disk space or if vmstorage
|
||||||
|
is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||||
|
|
||||||
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||||
|
|
||||||
## Multi-tenancy
|
## Multi-tenancy
|
||||||
|
|
|
@ -752,14 +752,18 @@ See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||||
|
|
||||||
## High availability
|
## High availability
|
||||||
|
|
||||||
It is possible to run multiple identically configured `vmagent` instances or `vmagent` [clusters](#scraping-big-number-of-targets),
|
It is possible to run multiple **identically configured** `vmagent` instances or `vmagent`
|
||||||
so they [scrape](#how-to-collect-metrics-in-prometheus-format) the same set of targets and push the collected data to the same set of VictoriaMetrics remote storage systems.
|
[clusters](#scraping-big-number-of-targets), so they [scrape](#how-to-collect-metrics-in-prometheus-format)
|
||||||
|
the same set of targets and push the collected data to the same set of VictoriaMetrics remote storage systems.
|
||||||
|
Two **identically configured** vmagent instances or clusters is usually called an HA pair.
|
||||||
|
|
||||||
In this case the deduplication must be configured at VictoriaMetrics in order to de-duplicate samples received from multiple identically configured `vmagent` instances or clusters.
|
When running HA pairs, [deduplication](https://docs.victoriametrics.com/#deduplication) must be configured
|
||||||
|
at VictoriaMetrics side in order to de-duplicate received samples.
|
||||||
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||||
|
|
||||||
It is also recommended passing different values to `-promscrape.cluster.name` command-line flag per each `vmagent` instance or per each `vmagent` cluster in HA setup.
|
It is also recommended passing different values to `-promscrape.cluster.name` command-line flag per each `vmagent`
|
||||||
This is needed for proper data de-duplication. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679) for details.
|
instance or per each `vmagent` cluster in HA setup. This is needed for proper data de-duplication.
|
||||||
|
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679) for details.
|
||||||
|
|
||||||
## Scraping targets via a proxy
|
## Scraping targets via a proxy
|
||||||
|
|
||||||
|
|
|
@ -73,6 +73,7 @@ test-vmalert:
|
||||||
go test -v -race -cover ./app/vmalert/notifier
|
go test -v -race -cover ./app/vmalert/notifier
|
||||||
go test -v -race -cover ./app/vmalert/config
|
go test -v -race -cover ./app/vmalert/config
|
||||||
go test -v -race -cover ./app/vmalert/remotewrite
|
go test -v -race -cover ./app/vmalert/remotewrite
|
||||||
|
go test -v -race -cover ./app/vmalert/utils
|
||||||
|
|
||||||
run-vmalert: vmalert
|
run-vmalert: vmalert
|
||||||
./bin/vmalert -rule=app/vmalert/config/testdata/rules/rules2-good.rules \
|
./bin/vmalert -rule=app/vmalert/config/testdata/rules/rules2-good.rules \
|
||||||
|
|
|
@ -947,7 +947,7 @@ The shortlist of configuration flags is the following:
|
||||||
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
||||||
Supports an array of values separated by comma or specified via multiple flags.
|
Supports an array of values separated by comma or specified via multiple flags.
|
||||||
-external.url string
|
-external.url string
|
||||||
External URL is used as alert's source for sent alerts to the notifier
|
External URL is used as alert's source for sent alerts to the notifier. By default, hostname is used as address.
|
||||||
-flagsAuthKey string
|
-flagsAuthKey string
|
||||||
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||||
-fs.disableMmap
|
-fs.disableMmap
|
||||||
|
|
|
@ -29,6 +29,7 @@ func toDatasourceType(s string) datasourceType {
|
||||||
}
|
}
|
||||||
|
|
||||||
// VMStorage represents vmstorage entity with ability to read and write metrics
|
// VMStorage represents vmstorage entity with ability to read and write metrics
|
||||||
|
// WARN: when adding a new field, remember to update Clone() method.
|
||||||
type VMStorage struct {
|
type VMStorage struct {
|
||||||
c *http.Client
|
c *http.Client
|
||||||
authCfg *promauth.Config
|
authCfg *promauth.Config
|
||||||
|
@ -54,29 +55,54 @@ type keyValue struct {
|
||||||
|
|
||||||
// Clone makes clone of VMStorage, shares http client.
|
// Clone makes clone of VMStorage, shares http client.
|
||||||
func (s *VMStorage) Clone() *VMStorage {
|
func (s *VMStorage) Clone() *VMStorage {
|
||||||
return &VMStorage{
|
ns := &VMStorage{
|
||||||
c: s.c,
|
c: s.c,
|
||||||
authCfg: s.authCfg,
|
authCfg: s.authCfg,
|
||||||
datasourceURL: s.datasourceURL,
|
datasourceURL: s.datasourceURL,
|
||||||
|
appendTypePrefix: s.appendTypePrefix,
|
||||||
lookBack: s.lookBack,
|
lookBack: s.lookBack,
|
||||||
queryStep: s.queryStep,
|
queryStep: s.queryStep,
|
||||||
appendTypePrefix: s.appendTypePrefix,
|
|
||||||
dataSourceType: s.dataSourceType,
|
dataSourceType: s.dataSourceType,
|
||||||
|
evaluationInterval: s.evaluationInterval,
|
||||||
|
|
||||||
|
// init map so it can be populated below
|
||||||
|
extraParams: url.Values{},
|
||||||
|
|
||||||
|
debug: s.debug,
|
||||||
}
|
}
|
||||||
|
if len(s.extraHeaders) > 0 {
|
||||||
|
ns.extraHeaders = make([]keyValue, len(s.extraHeaders))
|
||||||
|
copy(ns.extraHeaders, s.extraHeaders)
|
||||||
|
}
|
||||||
|
for k, v := range s.extraParams {
|
||||||
|
ns.extraParams[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return ns
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyParams - changes given querier params.
|
// ApplyParams - changes given querier params.
|
||||||
func (s *VMStorage) ApplyParams(params QuerierParams) *VMStorage {
|
func (s *VMStorage) ApplyParams(params QuerierParams) *VMStorage {
|
||||||
s.dataSourceType = toDatasourceType(params.DataSourceType)
|
s.dataSourceType = toDatasourceType(params.DataSourceType)
|
||||||
s.evaluationInterval = params.EvaluationInterval
|
s.evaluationInterval = params.EvaluationInterval
|
||||||
s.extraParams = params.QueryParams
|
if params.QueryParams != nil {
|
||||||
s.debug = params.Debug
|
if s.extraParams == nil {
|
||||||
|
s.extraParams = url.Values{}
|
||||||
|
}
|
||||||
|
for k, vl := range params.QueryParams {
|
||||||
|
for _, v := range vl { // custom query params are prior to default ones
|
||||||
|
s.extraParams.Set(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if params.Headers != nil {
|
if params.Headers != nil {
|
||||||
for key, value := range params.Headers {
|
for key, value := range params.Headers {
|
||||||
kv := keyValue{key: key, value: value}
|
kv := keyValue{key: key, value: value}
|
||||||
s.extraHeaders = append(s.extraHeaders, kv)
|
s.extraHeaders = append(s.extraHeaders, kv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
s.debug = params.Debug
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,6 +121,7 @@ func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Durati
|
||||||
lookBack: lookBack,
|
lookBack: lookBack,
|
||||||
queryStep: queryStep,
|
queryStep: queryStep,
|
||||||
dataSourceType: datasourcePrometheus,
|
dataSourceType: datasourcePrometheus,
|
||||||
|
extraParams: url.Values{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -378,6 +378,9 @@ func TestRequestParams(t *testing.T) {
|
||||||
}
|
}
|
||||||
query := "up"
|
query := "up"
|
||||||
timestamp := time.Date(2001, 2, 3, 4, 5, 6, 0, time.UTC)
|
timestamp := time.Date(2001, 2, 3, 4, 5, 6, 0, time.UTC)
|
||||||
|
storage := VMStorage{
|
||||||
|
extraParams: url.Values{"round_digits": {"10"}},
|
||||||
|
}
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
queryRange bool
|
queryRange bool
|
||||||
|
@ -574,6 +577,17 @@ func TestRequestParams(t *testing.T) {
|
||||||
checkEqualString(t, exp, r.URL.RawQuery)
|
checkEqualString(t, exp, r.URL.RawQuery)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"custom params overrides the original params",
|
||||||
|
false,
|
||||||
|
storage.Clone().ApplyParams(QuerierParams{
|
||||||
|
QueryParams: url.Values{"round_digits": {"2"}},
|
||||||
|
}),
|
||||||
|
func(t *testing.T, r *http.Request) {
|
||||||
|
exp := fmt.Sprintf("query=%s&round_digits=2&time=%d", query, timestamp.Unix())
|
||||||
|
checkEqualString(t, exp, r.URL.RawQuery)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"graphite extra params",
|
"graphite extra params",
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -74,7 +74,7 @@ absolute path to all .tpl files in root.
|
||||||
ruleUpdateEntriesLimit = flag.Int("rule.updateEntriesLimit", 20, "Defines the max number of rule's state updates stored in-memory. "+
|
ruleUpdateEntriesLimit = flag.Int("rule.updateEntriesLimit", 20, "Defines the max number of rule's state updates stored in-memory. "+
|
||||||
"Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overridden per rule via update_entries_limit param.")
|
"Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overridden per rule via update_entries_limit param.")
|
||||||
|
|
||||||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
|
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier. By default, hostname is used as address.")
|
||||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
|
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
|
||||||
`for cases where you want to build a custom link to Grafana, Prometheus or any other service. `+
|
`for cases where you want to build a custom link to Grafana, Prometheus or any other service. `+
|
||||||
`Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . `+
|
`Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . `+
|
||||||
|
|
|
@ -188,6 +188,7 @@ func (g *Group) toAPI() APIGroup {
|
||||||
|
|
||||||
Labels: g.Labels,
|
Labels: g.Labels,
|
||||||
}
|
}
|
||||||
|
ag.Rules = make([]APIRule, 0)
|
||||||
for _, r := range g.Rules {
|
for _, r := range g.Rules {
|
||||||
ag.Rules = append(ag.Rules, r.ToAPI())
|
ag.Rules = append(ag.Rules, r.ToAPI())
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
{% import (
|
{% import (
|
||||||
"strings"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -85,10 +84,7 @@ type NavItem struct {
|
||||||
|
|
||||||
{% func printNavItems(r *http.Request, current string, items []NavItem) %}
|
{% func printNavItems(r *http.Request, current string, items []NavItem) %}
|
||||||
{%code
|
{%code
|
||||||
prefix := "/vmalert/"
|
prefix := utils.Prefix(r.URL.Path)
|
||||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
|
||||||
prefix = ""
|
|
||||||
}
|
|
||||||
%}
|
%}
|
||||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||||
<div class="container-fluid">
|
<div class="container-fluid">
|
||||||
|
|
|
@ -9,52 +9,51 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:10
|
//line app/vmalert/tpl/header.qtpl:9
|
||||||
import (
|
import (
|
||||||
qtio422016 "io"
|
qtio422016 "io"
|
||||||
|
|
||||||
qt422016 "github.com/valyala/quicktemplate"
|
qt422016 "github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:10
|
//line app/vmalert/tpl/header.qtpl:9
|
||||||
var (
|
var (
|
||||||
_ = qtio422016.Copy
|
_ = qtio422016.Copy
|
||||||
_ = qt422016.AcquireByteBuffer
|
_ = qt422016.AcquireByteBuffer
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:10
|
//line app/vmalert/tpl/header.qtpl:9
|
||||||
func StreamHeader(qw422016 *qt422016.Writer, r *http.Request, navItems []NavItem, title string) {
|
func StreamHeader(qw422016 *qt422016.Writer, r *http.Request, navItems []NavItem, title string) {
|
||||||
//line app/vmalert/tpl/header.qtpl:10
|
//line app/vmalert/tpl/header.qtpl:9
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:11
|
//line app/vmalert/tpl/header.qtpl:10
|
||||||
prefix := utils.Prefix(r.URL.Path)
|
prefix := utils.Prefix(r.URL.Path)
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:11
|
//line app/vmalert/tpl/header.qtpl:10
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
<head>
|
<head>
|
||||||
<title>vmalert`)
|
<title>vmalert`)
|
||||||
//line app/vmalert/tpl/header.qtpl:15
|
//line app/vmalert/tpl/header.qtpl:14
|
||||||
if title != "" {
|
if title != "" {
|
||||||
//line app/vmalert/tpl/header.qtpl:15
|
//line app/vmalert/tpl/header.qtpl:14
|
||||||
qw422016.N().S(` - `)
|
qw422016.N().S(` - `)
|
||||||
//line app/vmalert/tpl/header.qtpl:15
|
//line app/vmalert/tpl/header.qtpl:14
|
||||||
qw422016.E().S(title)
|
qw422016.E().S(title)
|
||||||
//line app/vmalert/tpl/header.qtpl:15
|
//line app/vmalert/tpl/header.qtpl:14
|
||||||
}
|
}
|
||||||
//line app/vmalert/tpl/header.qtpl:15
|
//line app/vmalert/tpl/header.qtpl:14
|
||||||
qw422016.N().S(`</title>
|
qw422016.N().S(`</title>
|
||||||
<link href="`)
|
<link href="`)
|
||||||
//line app/vmalert/tpl/header.qtpl:16
|
//line app/vmalert/tpl/header.qtpl:15
|
||||||
qw422016.E().S(prefix)
|
qw422016.E().S(prefix)
|
||||||
//line app/vmalert/tpl/header.qtpl:16
|
//line app/vmalert/tpl/header.qtpl:15
|
||||||
qw422016.N().S(`static/css/bootstrap.min.css" rel="stylesheet" />
|
qw422016.N().S(`static/css/bootstrap.min.css" rel="stylesheet" />
|
||||||
<style>
|
<style>
|
||||||
body{
|
body{
|
||||||
|
@ -114,139 +113,136 @@ func StreamHeader(qw422016 *qt422016.Writer, r *http.Request, navItems []NavItem
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:74
|
//line app/vmalert/tpl/header.qtpl:73
|
||||||
streamprintNavItems(qw422016, r, title, navItems)
|
streamprintNavItems(qw422016, r, title, navItems)
|
||||||
//line app/vmalert/tpl/header.qtpl:74
|
//line app/vmalert/tpl/header.qtpl:73
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
<main class="px-2">
|
<main class="px-2">
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
func WriteHeader(qq422016 qtio422016.Writer, r *http.Request, navItems []NavItem, title string) {
|
func WriteHeader(qq422016 qtio422016.Writer, r *http.Request, navItems []NavItem, title string) {
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
StreamHeader(qw422016, r, navItems, title)
|
StreamHeader(qw422016, r, navItems, title)
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
func Header(r *http.Request, navItems []NavItem, title string) string {
|
func Header(r *http.Request, navItems []NavItem, title string) string {
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
WriteHeader(qb422016, r, navItems, title)
|
WriteHeader(qb422016, r, navItems, title)
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmalert/tpl/header.qtpl:76
|
//line app/vmalert/tpl/header.qtpl:75
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:80
|
//line app/vmalert/tpl/header.qtpl:79
|
||||||
type NavItem struct {
|
type NavItem struct {
|
||||||
Name string
|
Name string
|
||||||
Url string
|
Url string
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:86
|
//line app/vmalert/tpl/header.qtpl:85
|
||||||
func streamprintNavItems(qw422016 *qt422016.Writer, r *http.Request, current string, items []NavItem) {
|
func streamprintNavItems(qw422016 *qt422016.Writer, r *http.Request, current string, items []NavItem) {
|
||||||
//line app/vmalert/tpl/header.qtpl:86
|
//line app/vmalert/tpl/header.qtpl:85
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:88
|
//line app/vmalert/tpl/header.qtpl:87
|
||||||
prefix := "/vmalert/"
|
prefix := utils.Prefix(r.URL.Path)
|
||||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
|
||||||
prefix = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:92
|
//line app/vmalert/tpl/header.qtpl:88
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||||
<div class="container-fluid">
|
<div class="container-fluid">
|
||||||
<div class="collapse navbar-collapse" id="navbarCollapse">
|
<div class="collapse navbar-collapse" id="navbarCollapse">
|
||||||
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:97
|
//line app/vmalert/tpl/header.qtpl:93
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
//line app/vmalert/tpl/header.qtpl:97
|
//line app/vmalert/tpl/header.qtpl:93
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
<li class="nav-item">
|
<li class="nav-item">
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:100
|
//line app/vmalert/tpl/header.qtpl:96
|
||||||
u, _ := url.Parse(item.Url)
|
u, _ := url.Parse(item.Url)
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:101
|
//line app/vmalert/tpl/header.qtpl:97
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
<a class="nav-link`)
|
<a class="nav-link`)
|
||||||
//line app/vmalert/tpl/header.qtpl:102
|
//line app/vmalert/tpl/header.qtpl:98
|
||||||
if current == item.Name {
|
if current == item.Name {
|
||||||
//line app/vmalert/tpl/header.qtpl:102
|
//line app/vmalert/tpl/header.qtpl:98
|
||||||
qw422016.N().S(` active`)
|
qw422016.N().S(` active`)
|
||||||
//line app/vmalert/tpl/header.qtpl:102
|
//line app/vmalert/tpl/header.qtpl:98
|
||||||
}
|
}
|
||||||
//line app/vmalert/tpl/header.qtpl:102
|
//line app/vmalert/tpl/header.qtpl:98
|
||||||
qw422016.N().S(`"
|
qw422016.N().S(`"
|
||||||
href="`)
|
href="`)
|
||||||
//line app/vmalert/tpl/header.qtpl:103
|
//line app/vmalert/tpl/header.qtpl:99
|
||||||
if u.IsAbs() {
|
if u.IsAbs() {
|
||||||
//line app/vmalert/tpl/header.qtpl:103
|
//line app/vmalert/tpl/header.qtpl:99
|
||||||
qw422016.E().S(item.Url)
|
qw422016.E().S(item.Url)
|
||||||
//line app/vmalert/tpl/header.qtpl:103
|
//line app/vmalert/tpl/header.qtpl:99
|
||||||
} else {
|
} else {
|
||||||
//line app/vmalert/tpl/header.qtpl:103
|
//line app/vmalert/tpl/header.qtpl:99
|
||||||
qw422016.E().S(path.Join(prefix, item.Url))
|
qw422016.E().S(path.Join(prefix, item.Url))
|
||||||
//line app/vmalert/tpl/header.qtpl:103
|
//line app/vmalert/tpl/header.qtpl:99
|
||||||
}
|
}
|
||||||
//line app/vmalert/tpl/header.qtpl:103
|
//line app/vmalert/tpl/header.qtpl:99
|
||||||
qw422016.N().S(`">
|
qw422016.N().S(`">
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:104
|
//line app/vmalert/tpl/header.qtpl:100
|
||||||
qw422016.E().S(item.Name)
|
qw422016.E().S(item.Name)
|
||||||
//line app/vmalert/tpl/header.qtpl:104
|
//line app/vmalert/tpl/header.qtpl:100
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
</a>
|
</a>
|
||||||
</li>
|
</li>
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:107
|
//line app/vmalert/tpl/header.qtpl:103
|
||||||
}
|
}
|
||||||
//line app/vmalert/tpl/header.qtpl:107
|
//line app/vmalert/tpl/header.qtpl:103
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
</ul>
|
</ul>
|
||||||
</div>
|
</div>
|
||||||
</nav>
|
</nav>
|
||||||
`)
|
`)
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
func writeprintNavItems(qq422016 qtio422016.Writer, r *http.Request, current string, items []NavItem) {
|
func writeprintNavItems(qq422016 qtio422016.Writer, r *http.Request, current string, items []NavItem) {
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
streamprintNavItems(qw422016, r, current, items)
|
streamprintNavItems(qw422016, r, current, items)
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
func printNavItems(r *http.Request, current string, items []NavItem) string {
|
func printNavItems(r *http.Request, current string, items []NavItem) string {
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
writeprintNavItems(qb422016, r, current, items)
|
writeprintNavItems(qb422016, r, current, items)
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmalert/tpl/header.qtpl:111
|
//line app/vmalert/tpl/header.qtpl:107
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +1,24 @@
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import "strings"
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||||
|
)
|
||||||
|
|
||||||
const prefix = "/vmalert/"
|
const prefix = "/vmalert/"
|
||||||
|
|
||||||
// Prefix returns "/vmalert/" prefix if it is missing in the path.
|
// Prefix returns "/vmalert/" prefix if it is missing in the path.
|
||||||
func Prefix(path string) string {
|
func Prefix(path string) string {
|
||||||
|
pp := httpserver.GetPathPrefix()
|
||||||
|
path = strings.TrimLeft(path, pp)
|
||||||
if strings.HasPrefix(path, prefix) {
|
if strings.HasPrefix(path, prefix) {
|
||||||
return ""
|
return pp
|
||||||
}
|
}
|
||||||
return prefix
|
res, err := url.JoinPath(pp, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
return res
|
||||||
}
|
}
|
||||||
|
|
|
@ -565,7 +565,7 @@ btn-primary
|
||||||
{% if isNoMatch(r) %}
|
{% if isNoMatch(r) %}
|
||||||
<svg xmlns="http://www.w3.org/2000/svg"
|
<svg xmlns="http://www.w3.org/2000/svg"
|
||||||
data-bs-toggle="tooltip"
|
data-bs-toggle="tooltip"
|
||||||
title="No match! This rule last evaluation hasn't selected any time series from the datasource.
|
title="No match! This rule's last evaluation hasn't selected any time series from the datasource.
|
||||||
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
|
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
|
||||||
See more in Details."
|
See more in Details."
|
||||||
width="18" height="18" fill="currentColor" class="bi bi-exclamation-triangle-fill flex-shrink-0 me-2" viewBox="0 0 16 16" role="img" aria-label="Warning:">
|
width="18" height="18" fill="currentColor" class="bi bi-exclamation-triangle-fill flex-shrink-0 me-2" viewBox="0 0 16 16" role="img" aria-label="Warning:">
|
||||||
|
|
|
@ -1759,7 +1759,7 @@ func streamseriesFetchedWarn(qw422016 *qt422016.Writer, r APIRule) {
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
<svg xmlns="http://www.w3.org/2000/svg"
|
<svg xmlns="http://www.w3.org/2000/svg"
|
||||||
data-bs-toggle="tooltip"
|
data-bs-toggle="tooltip"
|
||||||
title="No match! This rule last evaluation hasn't selected any time series from the datasource.
|
title="No match! This rule's last evaluation hasn't selected any time series from the datasource.
|
||||||
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
|
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
|
||||||
See more in Details."
|
See more in Details."
|
||||||
width="18" height="18" fill="currentColor" class="bi bi-exclamation-triangle-fill flex-shrink-0 me-2" viewBox="0 0 16 16" role="img" aria-label="Warning:">
|
width="18" height="18" fill="currentColor" class="bi bi-exclamation-triangle-fill flex-shrink-0 me-2" viewBox="0 0 16 16" role="img" aria-label="Warning:">
|
||||||
|
|
|
@ -165,8 +165,8 @@ func TestHandler(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyResponse(t *testing.T) {
|
func TestEmptyResponse(t *testing.T) {
|
||||||
rh := &requestHandler{m: &manager{groups: make(map[uint64]*Group)}}
|
rhWithNoGroups := &requestHandler{m: &manager{groups: make(map[uint64]*Group)}}
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rh.handler(w, r) }))
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rhWithNoGroups.handler(w, r) }))
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
getResp := func(url string, to interface{}, code int) {
|
getResp := func(url string, to interface{}, code int) {
|
||||||
|
@ -190,7 +190,7 @@ func TestEmptyResponse(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("/api/v1/alerts", func(t *testing.T) {
|
t.Run("no groups /api/v1/alerts", func(t *testing.T) {
|
||||||
lr := listAlertsResponse{}
|
lr := listAlertsResponse{}
|
||||||
getResp(ts.URL+"/api/v1/alerts", &lr, 200)
|
getResp(ts.URL+"/api/v1/alerts", &lr, 200)
|
||||||
if lr.Data.Alerts == nil {
|
if lr.Data.Alerts == nil {
|
||||||
|
@ -204,7 +204,7 @@ func TestEmptyResponse(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("/api/v1/rules", func(t *testing.T) {
|
t.Run("no groups /api/v1/rules", func(t *testing.T) {
|
||||||
lr := listGroupsResponse{}
|
lr := listGroupsResponse{}
|
||||||
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
||||||
if lr.Data.Groups == nil {
|
if lr.Data.Groups == nil {
|
||||||
|
@ -217,4 +217,26 @@ func TestEmptyResponse(t *testing.T) {
|
||||||
t.Errorf("expected /api/v1/rules response to have non-nil data")
|
t.Errorf("expected /api/v1/rules response to have non-nil data")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
rhWithEmptyGroup := &requestHandler{m: &manager{groups: map[uint64]*Group{0: {Name: "test"}}}}
|
||||||
|
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rhWithEmptyGroup.handler(w, r) })
|
||||||
|
|
||||||
|
t.Run("empty group /api/v1/rules", func(t *testing.T) {
|
||||||
|
lr := listGroupsResponse{}
|
||||||
|
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
||||||
|
if lr.Data.Groups == nil {
|
||||||
|
t.Fatalf("expected /api/v1/rules response to have non-nil data")
|
||||||
|
}
|
||||||
|
|
||||||
|
lr = listGroupsResponse{}
|
||||||
|
getResp(ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||||
|
if lr.Data.Groups == nil {
|
||||||
|
t.Fatalf("expected /api/v1/rules response to have non-nil data")
|
||||||
|
}
|
||||||
|
|
||||||
|
group := lr.Data.Groups[0]
|
||||||
|
if group.Rules == nil {
|
||||||
|
t.Fatalf("expected /api/v1/rules response to have non-nil rules for group")
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
## vmbackupmanager
|
# vmbackupmanager
|
||||||
|
|
||||||
***vmbackupmanager is a part of [enterprise package](https://docs.victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
***vmbackupmanager is a part of [enterprise package](https://docs.victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
||||||
|
|
||||||
|
@ -104,11 +104,11 @@ The result on the GCS bucket
|
||||||
|
|
||||||
* The root folder
|
* The root folder
|
||||||
|
|
||||||
![root](vmbackupmanager_root_folder.png)
|
<img alt="root folder" src="vmbackupmanager_root_folder.png">
|
||||||
|
|
||||||
* The latest folder
|
* The latest folder
|
||||||
|
|
||||||
![latest](vmbackupmanager_latest_folder.png)
|
<img alt="latest folder" src="vmbackupmanager_latest_folder.png">
|
||||||
|
|
||||||
## Backup Retention Policy
|
## Backup Retention Policy
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ Backup retention policy is controlled by:
|
||||||
|
|
||||||
Let’s assume we have a backup manager collecting daily backups for the past 10 days.
|
Let’s assume we have a backup manager collecting daily backups for the past 10 days.
|
||||||
|
|
||||||
![daily](vmbackupmanager_rp_daily_1.png)
|
<img alt="retention policy daily before retention cycle" src="vmbackupmanager_rp_daily_1.png">
|
||||||
|
|
||||||
We enable backup retention policy for backup manager by using following configuration:
|
We enable backup retention policy for backup manager by using following configuration:
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ info app/vmbackupmanager/retention.go:106 daily backups to delete [daily/2
|
||||||
|
|
||||||
The result on the GCS bucket. We see only 3 daily backups:
|
The result on the GCS bucket. We see only 3 daily backups:
|
||||||
|
|
||||||
![daily](vmbackupmanager_rp_daily_2.png)
|
<img alt="retention policy daily after retention cycle" src="vmbackupmanager_rp_daily_2.png">
|
||||||
|
|
||||||
### Protection backups against deletion by retention policy
|
### Protection backups against deletion by retention policy
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ package remote_read_integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
@ -10,6 +11,7 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -169,14 +171,21 @@ func (rws *RemoteWriteServer) valuesHandler() http.Handler {
|
||||||
Data: metricNames,
|
Data: metricNames,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := json.NewEncoder(w).Encode(resp)
|
buf := bytes.NewBuffer(nil)
|
||||||
|
err := json.NewEncoder(buf).Encode(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error send series: %s", err)
|
log.Printf("error send series: %s", err)
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_, err = w.Write(buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error send series: %s", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
return
|
return
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -191,7 +200,6 @@ func (rws *RemoteWriteServer) exportNativeHandler() http.Handler {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
return
|
return
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -202,6 +210,7 @@ func (rws *RemoteWriteServer) importNativeHandler(t *testing.T) http.Handler {
|
||||||
defer common.StopUnmarshalWorkers()
|
defer common.StopUnmarshalWorkers()
|
||||||
|
|
||||||
var gotTimeSeries []vm.TimeSeries
|
var gotTimeSeries []vm.TimeSeries
|
||||||
|
var mx sync.RWMutex
|
||||||
|
|
||||||
err := stream.Parse(r.Body, false, func(block *stream.Block) error {
|
err := stream.Parse(r.Body, false, func(block *stream.Block) error {
|
||||||
mn := &block.MetricName
|
mn := &block.MetricName
|
||||||
|
@ -218,7 +227,9 @@ func (rws *RemoteWriteServer) importNativeHandler(t *testing.T) http.Handler {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mx.Lock()
|
||||||
gotTimeSeries = append(gotTimeSeries, timeseries)
|
gotTimeSeries = append(gotTimeSeries, timeseries)
|
||||||
|
mx.Unlock()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -244,7 +255,8 @@ func (rws *RemoteWriteServer) importNativeHandler(t *testing.T) http.Handler {
|
||||||
|
|
||||||
if !reflect.DeepEqual(gotTimeSeries, rws.expectedSeries) {
|
if !reflect.DeepEqual(gotTimeSeries, rws.expectedSeries) {
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
t.Fatalf("datasets not equal, expected: %#v;\n got: %#v", rws.expectedSeries, gotTimeSeries)
|
t.Errorf("datasets not equal, expected: %#v;\n got: %#v", rws.expectedSeries, gotTimeSeries)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
|
|
@ -3977,6 +3977,7 @@ func compareSeries(ss, ssExpected []*series, expr graphiteql.Expr) error {
|
||||||
if !reflect.DeepEqual(s.Timestamps, sExpected.Timestamps) {
|
if !reflect.DeepEqual(s.Timestamps, sExpected.Timestamps) {
|
||||||
return fmt.Errorf("unexpected timestamps for series %q\ngot\n%d\nwant\n%d", s.Name, s.Timestamps, sExpected.Timestamps)
|
return fmt.Errorf("unexpected timestamps for series %q\ngot\n%d\nwant\n%d", s.Name, s.Timestamps, sExpected.Timestamps)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !equalFloats(s.Values, sExpected.Values) {
|
if !equalFloats(s.Values, sExpected.Values) {
|
||||||
return fmt.Errorf("unexpected values for series %q\ngot\n%g\nwant\n%g", s.Name, s.Values, sExpected.Values)
|
return fmt.Errorf("unexpected values for series %q\ngot\n%g\nwant\n%g", s.Name, s.Values, sExpected.Values)
|
||||||
}
|
}
|
||||||
|
@ -4009,7 +4010,7 @@ func equalFloats(a, b []float64) bool {
|
||||||
} else if math.IsNaN(v2) {
|
} else if math.IsNaN(v2) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
eps := math.Abs(v1) / 1e15
|
eps := math.Abs(v1) / 1e9
|
||||||
if math.Abs(v1-v2) > eps {
|
if math.Abs(v1-v2) > eps {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -545,7 +545,7 @@ func isGraphiteTagsPath(path string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) {
|
func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) {
|
||||||
logger.Warnf("error in %q: %s", httpserver.GetRequestURI(r), err)
|
logger.WarnfSkipframes(1, "error in %q: %s", httpserver.GetRequestURI(r), err)
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
statusCode := http.StatusUnprocessableEntity
|
statusCode := http.StatusUnprocessableEntity
|
||||||
|
|
|
@ -16,6 +16,7 @@ import { useTimeDispatch, useTimeState } from "../../../state/time/TimeStateCont
|
||||||
import ThemeControl from "../ThemeControl/ThemeControl";
|
import ThemeControl from "../ThemeControl/ThemeControl";
|
||||||
import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
||||||
import useBoolean from "../../../hooks/useBoolean";
|
import useBoolean from "../../../hooks/useBoolean";
|
||||||
|
import { getTenantIdFromUrl } from "../../../utils/tenants";
|
||||||
|
|
||||||
const title = "Settings";
|
const title = "Settings";
|
||||||
|
|
||||||
|
@ -57,6 +58,10 @@ const GlobalSettings: FC = () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const handlerApply = () => {
|
const handlerApply = () => {
|
||||||
|
const tenantIdFromUrl = getTenantIdFromUrl(serverUrl);
|
||||||
|
if (tenantIdFromUrl !== "") {
|
||||||
|
dispatch({ type: "SET_TENANT_ID", payload: tenantIdFromUrl });
|
||||||
|
}
|
||||||
dispatch({ type: "SET_SERVER", payload: serverUrl });
|
dispatch({ type: "SET_SERVER", payload: serverUrl });
|
||||||
timeDispatch({ type: "SET_TIMEZONE", payload: timezone });
|
timeDispatch({ type: "SET_TIMEZONE", payload: timezone });
|
||||||
customPanelDispatch({ type: "SET_SERIES_LIMITS", payload: limits });
|
customPanelDispatch({ type: "SET_SERIES_LIMITS", payload: limits });
|
||||||
|
|
|
@ -2,7 +2,7 @@ version: '3.5'
|
||||||
services:
|
services:
|
||||||
vmagent:
|
vmagent:
|
||||||
container_name: vmagent
|
container_name: vmagent
|
||||||
image: victoriametrics/vmagent:v1.90.0
|
image: victoriametrics/vmagent:v1.91.1
|
||||||
depends_on:
|
depends_on:
|
||||||
- "vminsert"
|
- "vminsert"
|
||||||
ports:
|
ports:
|
||||||
|
@ -32,7 +32,7 @@ services:
|
||||||
|
|
||||||
vmstorage-1:
|
vmstorage-1:
|
||||||
container_name: vmstorage-1
|
container_name: vmstorage-1
|
||||||
image: victoriametrics/vmstorage:v1.90.0-cluster
|
image: victoriametrics/vmstorage:v1.91.1-cluster
|
||||||
ports:
|
ports:
|
||||||
- 8482
|
- 8482
|
||||||
- 8400
|
- 8400
|
||||||
|
@ -44,7 +44,7 @@ services:
|
||||||
restart: always
|
restart: always
|
||||||
vmstorage-2:
|
vmstorage-2:
|
||||||
container_name: vmstorage-2
|
container_name: vmstorage-2
|
||||||
image: victoriametrics/vmstorage:v1.90.0-cluster
|
image: victoriametrics/vmstorage:v1.91.1-cluster
|
||||||
ports:
|
ports:
|
||||||
- 8482
|
- 8482
|
||||||
- 8400
|
- 8400
|
||||||
|
@ -56,7 +56,7 @@ services:
|
||||||
restart: always
|
restart: always
|
||||||
vminsert:
|
vminsert:
|
||||||
container_name: vminsert
|
container_name: vminsert
|
||||||
image: victoriametrics/vminsert:v1.90.0-cluster
|
image: victoriametrics/vminsert:v1.91.1-cluster
|
||||||
depends_on:
|
depends_on:
|
||||||
- "vmstorage-1"
|
- "vmstorage-1"
|
||||||
- "vmstorage-2"
|
- "vmstorage-2"
|
||||||
|
@ -68,7 +68,7 @@ services:
|
||||||
restart: always
|
restart: always
|
||||||
vmselect:
|
vmselect:
|
||||||
container_name: vmselect
|
container_name: vmselect
|
||||||
image: victoriametrics/vmselect:v1.90.0-cluster
|
image: victoriametrics/vmselect:v1.91.1-cluster
|
||||||
depends_on:
|
depends_on:
|
||||||
- "vmstorage-1"
|
- "vmstorage-1"
|
||||||
- "vmstorage-2"
|
- "vmstorage-2"
|
||||||
|
@ -82,7 +82,7 @@ services:
|
||||||
|
|
||||||
vmalert:
|
vmalert:
|
||||||
container_name: vmalert
|
container_name: vmalert
|
||||||
image: victoriametrics/vmalert:v1.90.0
|
image: victoriametrics/vmalert:v1.91.1
|
||||||
depends_on:
|
depends_on:
|
||||||
- "vmselect"
|
- "vmselect"
|
||||||
ports:
|
ports:
|
||||||
|
|
|
@ -2,7 +2,7 @@ version: "3.5"
|
||||||
services:
|
services:
|
||||||
vmagent:
|
vmagent:
|
||||||
container_name: vmagent
|
container_name: vmagent
|
||||||
image: victoriametrics/vmagent:v1.90.0
|
image: victoriametrics/vmagent:v1.91.1
|
||||||
depends_on:
|
depends_on:
|
||||||
- "victoriametrics"
|
- "victoriametrics"
|
||||||
ports:
|
ports:
|
||||||
|
@ -18,7 +18,7 @@ services:
|
||||||
restart: always
|
restart: always
|
||||||
victoriametrics:
|
victoriametrics:
|
||||||
container_name: victoriametrics
|
container_name: victoriametrics
|
||||||
image: victoriametrics/victoria-metrics:v1.90.0
|
image: victoriametrics/victoria-metrics:v1.91.1
|
||||||
ports:
|
ports:
|
||||||
- 8428:8428
|
- 8428:8428
|
||||||
- 8089:8089
|
- 8089:8089
|
||||||
|
@ -56,7 +56,7 @@ services:
|
||||||
restart: always
|
restart: always
|
||||||
vmalert:
|
vmalert:
|
||||||
container_name: vmalert
|
container_name: vmalert
|
||||||
image: victoriametrics/vmalert:v1.90.0
|
image: victoriametrics/vmalert:v1.91.1
|
||||||
depends_on:
|
depends_on:
|
||||||
- "victoriametrics"
|
- "victoriametrics"
|
||||||
- "alertmanager"
|
- "alertmanager"
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
4. Set variables `DIGITALOCEAN_API_TOKEN` with `VM_VERSION` for `packer` environment and run make from example below:
|
4. Set variables `DIGITALOCEAN_API_TOKEN` with `VM_VERSION` for `packer` environment and run make from example below:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="dop_v23_2e46f4759ceeeba0d0248" VM_VERSION="1.90.0"
|
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="dop_v23_2e46f4759ceeeba0d0248" VM_VERSION="1.91.1"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -19,8 +19,8 @@ On the server:
|
||||||
* VictoriaMetrics is running on ports: 8428, 8089, 4242, 2003 and they are bound to the local interface.
|
* VictoriaMetrics is running on ports: 8428, 8089, 4242, 2003 and they are bound to the local interface.
|
||||||
|
|
||||||
********************************************************************************
|
********************************************************************************
|
||||||
# This image includes 1.90.0 version of VictoriaMetrics.
|
# This image includes 1.91.1 version of VictoriaMetrics.
|
||||||
# See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.90.0
|
# See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.91.1
|
||||||
|
|
||||||
# Welcome to VictoriaMetrics droplet!
|
# Welcome to VictoriaMetrics droplet!
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,22 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): fix nil map assignment panic in runtime introduced in this [change](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4341).
|
||||||
|
|
||||||
|
|
||||||
|
## [v1.91.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.91.1)
|
||||||
|
|
||||||
|
Released at 2023-06-01
|
||||||
|
|
||||||
|
* FEATURE:[vmagent](https://docs.victoriametrics.com/vmagent.html): Adds `follow_redirects` at service discovery level of scrape configuration. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4282). Thanks to @Haleygo for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4286).
|
||||||
|
* FEATURE: vmselect: Decreases startup time for vmselect with a big number of vmstorage nodes. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4364). Thanks to @Haleygo for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4366).
|
||||||
|
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): Properly form path to static assets in WEB UI if `http.pathPrefix` set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4349).
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): Properly set datasource query params. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4340). Thanks to @gsakun for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4341).
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly return empty slices instead of nil for `/api/v1/rules` for groups with present name but absent `rules`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4221).
|
||||||
|
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): Properly handle LOCAL command for proxy protocol. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3335#issuecomment-1569864108).
|
||||||
|
* BUGFIX: [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html): Fixes crash on startup. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4378).
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix bug with custom URL in global settings not respecting tenantID change. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4322).
|
||||||
|
|
||||||
## [v1.91.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.91.0)
|
## [v1.91.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.91.0)
|
||||||
|
|
||||||
|
@ -39,7 +55,7 @@ Released at 2023-05-18
|
||||||
* FEATURE: expose `process_*` metrics at `/metrics` page of all the VictoriaMetrics components under Windows OS. See [this pull request](https://github.com/VictoriaMetrics/metrics/pull/47).
|
* FEATURE: expose `process_*` metrics at `/metrics` page of all the VictoriaMetrics components under Windows OS. See [this pull request](https://github.com/VictoriaMetrics/metrics/pull/47).
|
||||||
* FEATURE: reduce the amounts of unimportant `INFO` logging during VictoriaMetrics startup / shutdown. This should improve visibility for potentially important logs.
|
* FEATURE: reduce the amounts of unimportant `INFO` logging during VictoriaMetrics startup / shutdown. This should improve visibility for potentially important logs.
|
||||||
* FEATURE: upgrade base docker image (alpine) from 3.17.3 to 3.18.0. See [alpine 3.18.0 release notes](https://www.alpinelinux.org/posts/Alpine-3.18.0-released.html).
|
* FEATURE: upgrade base docker image (alpine) from 3.17.3 to 3.18.0. See [alpine 3.18.0 release notes](https://www.alpinelinux.org/posts/Alpine-3.18.0-released.html).
|
||||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): do not pullute logs with `cannot read hello: cannot read message with size 11: EOF` messages at `vmstorage` during TCP health checks performed by [Consul](https://developer.hashicorp.com/consul/docs/services/usage/checks) or [other services](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-health-check/). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1762).
|
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): do not pollute logs with `cannot read hello: cannot read message with size 11: EOF` messages at `vmstorage` during TCP health checks performed by [Consul](https://developer.hashicorp.com/consul/docs/services/usage/checks) or [other services](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-health-check/). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1762).
|
||||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): support the ability to filter [consul_sd_configs](https://docs.victoriametrics.com/sd_configs.html#consul_sd_configs) targets in more optimal way via new `filter` option. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4183).
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): support the ability to filter [consul_sd_configs](https://docs.victoriametrics.com/sd_configs.html#consul_sd_configs) targets in more optimal way via new `filter` option. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4183).
|
||||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [consulagent_sd_configs](https://docs.victoriametrics.com/sd_configs.html#consulagent_sd_configs). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3953).
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [consulagent_sd_configs](https://docs.victoriametrics.com/sd_configs.html#consulagent_sd_configs). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3953).
|
||||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): emit a warning if too small value is passed to `-remoteWrite.maxDiskUsagePerURL` command-line flag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4195).
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): emit a warning if too small value is passed to `-remoteWrite.maxDiskUsagePerURL` command-line flag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4195).
|
||||||
|
@ -231,6 +247,33 @@ Released at 2023-02-24
|
||||||
* BUGFIX: properly parse timestamps in milliseconds when [ingesting data via OpenTSDB telnet put protocol](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol). Previously timestamps in milliseconds were mistakenly multiplied by 1000. Thanks to @Droxenator for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3810).
|
* BUGFIX: properly parse timestamps in milliseconds when [ingesting data via OpenTSDB telnet put protocol](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol). Previously timestamps in milliseconds were mistakenly multiplied by 1000. Thanks to @Droxenator for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3810).
|
||||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): do not add extrapolated points outside the real points when using [interpolate()](https://docs.victoriametrics.com/MetricsQL.html#interpolate) function. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3816).
|
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): do not add extrapolated points outside the real points when using [interpolate()](https://docs.victoriametrics.com/MetricsQL.html#interpolate) function. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3816).
|
||||||
|
|
||||||
|
## [v1.87.6](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.6)
|
||||||
|
|
||||||
|
Released at 2023-05-18
|
||||||
|
|
||||||
|
**v1.87.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||||
|
The v1.87.x line will be supported for at least 12 months since [v1.87.0](https://docs.victoriametrics.com/CHANGELOG.html#v1870) release**
|
||||||
|
|
||||||
|
* SECURITY: upgrade Go builder from Go1.20.3 to Go1.20.4. See [the list of issues addressed in Go1.20.4](https://github.com/golang/go/issues?q=milestone%3AGo1.20.4+label%3ACherryPickApproved).
|
||||||
|
* SECURITY: upgrade base docker image (alpine) from 3.17.3 to 3.18.0. See [alpine 3.18.0 release notes](https://www.alpinelinux.org/posts/Alpine-3.18.0-released.html).
|
||||||
|
* SECURITY: serve `/robots.txt` content to disallow indexing of the exposed instances by search engines. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4128) for details.
|
||||||
|
|
||||||
|
* BUGFIX: reduce the probability of sudden increase in the number of small parts on systems with small number of CPU cores.
|
||||||
|
* BUGFIX: reduce the possibility of increased CPU usage when data with timestamps older than one hour is ingested into VictoriaMetrics. This reduces spikes for the graph `sum(rate(vm_slow_per_day_index_inserts_total))`. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4258).
|
||||||
|
* BUGFIX: do not ignore trailing empty field in CSV lines when [importing data in CSV format](https://docs.victoriametrics.com/#how-to-import-csv-data). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4048).
|
||||||
|
* BUGFIX: disallow `"` chars when parsing Prometheus label names, since they aren't allowed by [Prometheus text exposition format](https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-format-example). Previously this could result in silent incorrect parsing of incorrect Prometheus labels such as `foo{"bar"="baz"}` or `{foo:"bar",baz="aaa"}`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4284).
|
||||||
|
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): fix a panic when the duration in the query contains uppercase `M` suffix. Such a suffix isn't allowed to use in durations, since it clashes with `a million` suffix, e.g. it isn't clear whether `rate(metric[5M])` means rate over 5 minutes, 5 months or 5 million seconds. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3589) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4120) issues.
|
||||||
|
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): prevent from possible panic when the number of vmstorage nodes increases when [automatic vmstorage discovery](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#automatic-vmstorage-discovery) is enabled.
|
||||||
|
* BUGFIX: properly limit the number of [OpenTSDB HTTP](https://docs.victoriametrics.com/#sending-opentsdb-data-via-http-apiput-requests) concurrent requests specified via `-maxConcurrentInserts` command-line flag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4204). Thanks to @zouxiang1993 for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4208).
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly return empty slices instead of nil for `/api/v1/rules` and `/api/v1/alerts` API handlers. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4221).
|
||||||
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `__meta_kubernetes_endpoints_name` label for all ports discovered from endpoint. Previously, ports not matched by `Service` did not have this label. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4154) for details. Thanks to @thunderbird86 for discovering and [fixing](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4253) the issue.
|
||||||
|
* BUGFIX: fix possible infinite loop during `indexdb` rotation when `-retentionTimezoneOffset` command-line flag is set and the local timezone is not UTC. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4207). Thanks to @faceair for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4206).
|
||||||
|
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): do not return invalid auth credentials in http response by default, since it may be logged by client. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4188).
|
||||||
|
* BUGFIX: [alerts-health](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts-health.yml): update threshold for `TooHighMemoryUsage` alert from 90% to 80%, since 90% is too high for production environments.
|
||||||
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly handle the `vm_promscrape_config_last_reload_successful` metric after config reload. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4260).
|
||||||
|
* BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html): fix bug with duplicated labels during stream aggregation via single-node VictoriaMetrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4277).
|
||||||
|
* BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html): suppress `series after dedup` error message in logs when `-remoteWrite.streamAggr.dedupInterval` command-line flag is set at [vmagent](https://docs.victoriametrics.com/vmgent.html) or when `-streamAggr.dedupInterval` command-line flag is set at [single-node VictoriaMetrics](https://docs.victoriametrics.com/).
|
||||||
|
|
||||||
## [v1.87.5](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.5)
|
## [v1.87.5](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.5)
|
||||||
|
|
||||||
Released at 2023-04-06
|
Released at 2023-04-06
|
||||||
|
@ -802,6 +845,17 @@ Released at 2022-08-08
|
||||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly show date picker at `Table` tab. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2874).
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly show date picker at `Table` tab. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2874).
|
||||||
* BUGFIX: properly generate http redirects if `-http.pathPrefix` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918).
|
* BUGFIX: properly generate http redirects if `-http.pathPrefix` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918).
|
||||||
|
|
||||||
|
## [v1.79.13](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.13)
|
||||||
|
|
||||||
|
Released at 2023-05-18
|
||||||
|
|
||||||
|
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||||
|
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||||
|
|
||||||
|
* SECURITY: upgrade Go builder from Go1.20.3 to Go1.20.4. See [the list of issues addressed in Go1.20.4](https://github.com/golang/go/issues?q=milestone%3AGo1.20.4+label%3ACherryPickApproved).
|
||||||
|
* SECURITY: upgrade base docker image (alpine) from 3.17.3 to 3.18.0. See [alpine 3.18.0 release notes](https://www.alpinelinux.org/posts/Alpine-3.18.0-released.html).
|
||||||
|
* SECURITY: serve `/robots.txt` content to disallow indexing of the exposed instances by search engines. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4128) for details.
|
||||||
|
|
||||||
## [v1.79.12](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.12)
|
## [v1.79.12](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.12)
|
||||||
|
|
||||||
Released at 2023-04-06
|
Released at 2023-04-06
|
||||||
|
|
|
@ -74,7 +74,7 @@ file.
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
```console
|
```console
|
||||||
git clone https://github.com/VictoriaMetrics/VictoriaMetrics &&
|
git clone https://github.com/VictoriaMetrics/VictoriaMetrics && cd VictoriaMetrics
|
||||||
make docker-cluster-up
|
make docker-cluster-up
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
---
|
||||||
|
title: VictoriaMetrics
|
||||||
|
---
|
||||||
# VictoriaMetrics
|
# VictoriaMetrics
|
||||||
|
|
||||||
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||||
|
@ -529,8 +531,10 @@ and stream plain InfluxDB line protocol data to the configured TCP and/or UDP ad
|
||||||
|
|
||||||
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
||||||
|
|
||||||
* [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
* [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db`
|
||||||
unless `db` tag exists in the InfluxDB line. The `db` label name can be overridden via `-influxDBLabel` command-line flag.
|
[label](https://docs.victoriametrics.com/keyConcepts.html#labels) value unless `db` tag exists in the InfluxDB line.
|
||||||
|
The `db` label name can be overridden via `-influxDBLabel` command-line flag. If more strict data isolation is required,
|
||||||
|
read more about multi-tenancy [here](https://docs.victoriametrics.com/keyConcepts.html#multi-tenancy).
|
||||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||||
* Field values are mapped to time series values.
|
* Field values are mapped to time series values.
|
||||||
* Tags are mapped to Prometheus labels as-is.
|
* Tags are mapped to Prometheus labels as-is.
|
||||||
|
@ -1465,22 +1469,37 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
||||||
|
|
||||||
## Deduplication
|
## Deduplication
|
||||||
|
|
||||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval
|
VictoriaMetrics leaves a single [raw sample](https://docs.victoriametrics.com/keyConcepts.html#raw-samples)
|
||||||
if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single
|
with the biggest [timestamp](https://en.wikipedia.org/wiki/Unix_time) for each [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
|
||||||
raw sample with the biggest timestamp per each discrete 60s interval.
|
per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration.
|
||||||
|
For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete
|
||||||
|
`60s` interval.
|
||||||
This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||||
|
|
||||||
If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then the sample with the biggest value is left.
|
If multiple raw samples have **the same timestamp** on the given `-dedup.minScrapeInterval` discrete interval,
|
||||||
|
then the sample with **the biggest value** is kept.
|
||||||
|
|
||||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. So it is safe to use deduplication and downsampling simultaneously.
|
Please note, [labels](https://docs.victoriametrics.com/keyConcepts.html#labels) of raw samples should be identical
|
||||||
|
in order to be deduplicated. For example, this is why [HA pair of vmagents](https://docs.victoriametrics.com/vmagent.html#high-availability)
|
||||||
|
needs to be identically configured.
|
||||||
|
|
||||||
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled.
|
||||||
|
So it is safe to use deduplication and downsampling simultaneously.
|
||||||
|
|
||||||
The de-duplication reduces disk space usage if multiple identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus instances in HA pair
|
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs.
|
||||||
write data to the same VictoriaMetrics instance. These vmagent or Prometheus instances must have identical
|
It is recommended to have a single `scrape_interval` across all the scrape targets.
|
||||||
`external_labels` section in their configs, so they write data to the same time series. See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
||||||
|
|
||||||
It is recommended passing different `-promscrape.cluster.name` values to HA pairs of `vmagent` instances, so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples from other `vmagent` instances. See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
The de-duplication reduces disk space usage if multiple **identically configured** [vmagent](https://docs.victoriametrics.com/vmagent.html)
|
||||||
|
or Prometheus instances in HA pair write data to the same VictoriaMetrics instance.
|
||||||
|
These vmagent or Prometheus instances must have **identical** `external_labels` section in their configs,
|
||||||
|
so they write data to the same time series.
|
||||||
|
See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||||
|
|
||||||
|
It is recommended passing different `-promscrape.cluster.name` values to each distinct HA pair of `vmagent` instances,
|
||||||
|
so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples
|
||||||
|
from other `vmagent` instances.
|
||||||
|
See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||||
|
|
||||||
## Storage
|
## Storage
|
||||||
|
|
||||||
|
@ -1624,6 +1643,10 @@ Retention filters can be evaluated for free by downloading and using enterprise
|
||||||
|
|
||||||
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
|
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
|
||||||
|
|
||||||
|
Downsampling happens during [background merges](https://docs.victoriametrics.com/#storage)
|
||||||
|
and can't be performed if there is not enough of free disk space or if vmstorage
|
||||||
|
is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||||
|
|
||||||
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||||
|
|
||||||
## Multi-tenancy
|
## Multi-tenancy
|
||||||
|
|
|
@ -64,7 +64,7 @@ git remote add enterprise <url>
|
||||||
and this release contains all the needed binaries and checksums.
|
and this release contains all the needed binaries and checksums.
|
||||||
7. Update the release description with the [CHANGELOG](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) for this release.
|
7. Update the release description with the [CHANGELOG](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) for this release.
|
||||||
8. Remove the `draft` checkbox for the `TAG` release and manually publish it.
|
8. Remove the `draft` checkbox for the `TAG` release and manually publish it.
|
||||||
9. Bump version of the VictoriaMetrics cluster in the [sandbox environment](https://github.com/VictoriaMetrics/ops/blob/main/sandbox/manifests/benchmark-vm/vmcluster.yaml)
|
9. Bump version of the VictoriaMetrics cluster in the [sandbox environment](https://github.com/VictoriaMetrics/ops/blob/main/gcp-test/sandbox/manifests/benchmark-vm/vmcluster.yaml)
|
||||||
by [opening and merging PR](https://github.com/VictoriaMetrics/ops/pull/58).
|
by [opening and merging PR](https://github.com/VictoriaMetrics/ops/pull/58).
|
||||||
10. Bump VictoriaMetrics version at `deployment/docker/docker-compose.yml` and at `deployment/docker/docker-compose-cluster.yml`.
|
10. Bump VictoriaMetrics version at `deployment/docker/docker-compose.yml` and at `deployment/docker/docker-compose-cluster.yml`.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
---
|
---
|
||||||
sort: 1
|
sort: 1
|
||||||
|
weight: 1
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 1
|
||||||
|
title: VictoriaMetrics
|
||||||
|
aliases:
|
||||||
|
- /Single-server-VictoriaMetrics.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# VictoriaMetrics
|
# VictoriaMetrics
|
||||||
|
|
||||||
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||||
|
@ -532,8 +539,10 @@ and stream plain InfluxDB line protocol data to the configured TCP and/or UDP ad
|
||||||
|
|
||||||
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
||||||
|
|
||||||
* [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
* [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db`
|
||||||
unless `db` tag exists in the InfluxDB line. The `db` label name can be overridden via `-influxDBLabel` command-line flag.
|
[label](https://docs.victoriametrics.com/keyConcepts.html#labels) value unless `db` tag exists in the InfluxDB line.
|
||||||
|
The `db` label name can be overridden via `-influxDBLabel` command-line flag. If more strict data isolation is required,
|
||||||
|
read more about multi-tenancy [here](https://docs.victoriametrics.com/keyConcepts.html#multi-tenancy).
|
||||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||||
* Field values are mapped to time series values.
|
* Field values are mapped to time series values.
|
||||||
* Tags are mapped to Prometheus labels as-is.
|
* Tags are mapped to Prometheus labels as-is.
|
||||||
|
@ -1468,22 +1477,37 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
||||||
|
|
||||||
## Deduplication
|
## Deduplication
|
||||||
|
|
||||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval
|
VictoriaMetrics leaves a single [raw sample](https://docs.victoriametrics.com/keyConcepts.html#raw-samples)
|
||||||
if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single
|
with the biggest [timestamp](https://en.wikipedia.org/wiki/Unix_time) for each [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
|
||||||
raw sample with the biggest timestamp per each discrete 60s interval.
|
per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration.
|
||||||
|
For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete
|
||||||
|
`60s` interval.
|
||||||
This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||||
|
|
||||||
If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then the sample with the biggest value is left.
|
If multiple raw samples have **the same timestamp** on the given `-dedup.minScrapeInterval` discrete interval,
|
||||||
|
then the sample with **the biggest value** is kept.
|
||||||
|
|
||||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. So it is safe to use deduplication and downsampling simultaneously.
|
Please note, [labels](https://docs.victoriametrics.com/keyConcepts.html#labels) of raw samples should be identical
|
||||||
|
in order to be deduplicated. For example, this is why [HA pair of vmagents](https://docs.victoriametrics.com/vmagent.html#high-availability)
|
||||||
|
needs to be identically configured.
|
||||||
|
|
||||||
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled.
|
||||||
|
So it is safe to use deduplication and downsampling simultaneously.
|
||||||
|
|
||||||
The de-duplication reduces disk space usage if multiple identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus instances in HA pair
|
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs.
|
||||||
write data to the same VictoriaMetrics instance. These vmagent or Prometheus instances must have identical
|
It is recommended to have a single `scrape_interval` across all the scrape targets.
|
||||||
`external_labels` section in their configs, so they write data to the same time series. See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
||||||
|
|
||||||
It is recommended passing different `-promscrape.cluster.name` values to HA pairs of `vmagent` instances, so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples from other `vmagent` instances. See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
The de-duplication reduces disk space usage if multiple **identically configured** [vmagent](https://docs.victoriametrics.com/vmagent.html)
|
||||||
|
or Prometheus instances in HA pair write data to the same VictoriaMetrics instance.
|
||||||
|
These vmagent or Prometheus instances must have **identical** `external_labels` section in their configs,
|
||||||
|
so they write data to the same time series.
|
||||||
|
See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||||
|
|
||||||
|
It is recommended passing different `-promscrape.cluster.name` values to each distinct HA pair of `vmagent` instances,
|
||||||
|
so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples
|
||||||
|
from other `vmagent` instances.
|
||||||
|
See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||||
|
|
||||||
## Storage
|
## Storage
|
||||||
|
|
||||||
|
@ -1627,6 +1651,10 @@ Retention filters can be evaluated for free by downloading and using enterprise
|
||||||
|
|
||||||
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
|
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
|
||||||
|
|
||||||
|
Downsampling happens during [background merges](https://docs.victoriametrics.com/#storage)
|
||||||
|
and can't be performed if there is not enough of free disk space or if vmstorage
|
||||||
|
is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||||
|
|
||||||
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||||
|
|
||||||
## Multi-tenancy
|
## Multi-tenancy
|
||||||
|
|
|
@ -10,7 +10,7 @@ aliases:
|
||||||
- /enterprise.html
|
- /enterprise.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# VictoriaMetrics enterprise
|
# VictoriaMetrics Enterprise
|
||||||
|
|
||||||
VictoriaMetrics components are provided in two kinds - [community edition](https://victoriametrics.com/products/open-source/)
|
VictoriaMetrics components are provided in two kinds - [community edition](https://victoriametrics.com/products/open-source/)
|
||||||
and [enterprise edition](https://victoriametrics.com/products/enterprise/).
|
and [enterprise edition](https://victoriametrics.com/products/enterprise/).
|
||||||
|
|
|
@ -10,14 +10,14 @@ menu:
|
||||||
|
|
||||||
# Guides
|
# Guides
|
||||||
|
|
||||||
1. [K8s monitoring via VM Single](https://docs.victoriametrics.com/guides/k8s-monitoring-via-vm-single.html)
|
1. [K8s monitoring via VM Single](k8s-monitoring-via-vm-single.html)
|
||||||
2. [K8s monitoring via VM Cluster](https://docs.victoriametrics.com/guides/k8s-monitoring-via-vm-cluster.html)
|
2. [K8s monitoring via VM Cluster](k8s-monitoring-via-vm-cluster.html)
|
||||||
3. [HA monitoring setup in K8s via VM Cluster](https://docs.victoriametrics.com/guides/k8s-ha-monitoring-via-vm-cluster.html)
|
3. [HA monitoring setup in K8s via VM Cluster](k8s-ha-monitoring-via-vm-cluster.html)
|
||||||
4. [Getting started with VM Operator](https://docs.victoriametrics.com/guides/getting-started-with-vm-operator.html)
|
4. [Getting started with VM Operator](getting-started-with-vm-operator.html)
|
||||||
5. [Multi Retention Setup within VictoriaMetrics Cluster](https://docs.victoriametrics.com/guides/guide-vmcluster-multiple-retention-setup.html)
|
5. [Multi Retention Setup within VictoriaMetrics Cluster](guide-vmcluster-multiple-retention-setup.html)
|
||||||
6. [Migrate from InfluxDB to VictoriaMetrics](https://docs.victoriametrics.com/guides/migrate-from-influx.html)
|
6. [Migrate from InfluxDB to VictoriaMetrics](migrate-from-influx.html)
|
||||||
7. [Multi-regional setup with VictoriaMetrics: Dedicated regions for monitoring](https://docs.victoriametrics.com/guides/multi-regional-setup-dedicated-regions.html)
|
7. [Multi-regional setup with VictoriaMetrics: Dedicated regions for monitoring](multi-regional-setup-dedicated-regions.html)
|
||||||
8. [How to delete or replace metrics in VictoriaMetrics](https://docs.victoriametrics.com/guides/guide-delete-or-replace-metrics.html)
|
8. [How to delete or replace metrics in VictoriaMetrics](guide-delete-or-replace-metrics.html)
|
||||||
9. [How to monitor kubernetes cluster using Managed VictoriaMetrics](https://docs.victoriametrics.com/managed-victoriametrics/how-to-monitor-k8s.html)
|
9. [How to monitor kubernetes cluster using Managed VictoriaMetrics](/managed-victoriametrics/how-to-monitor-k8s.html)
|
||||||
10. [How to configure vmgateway for multi-tenant access using Grafana and OpenID Connect](https://docs.victoriametrics.com/guides/grafana-vmgateway-openid-configuration.html)
|
10. [How to configure vmgateway for multi-tenant access using Grafana and OpenID Connect](grafana-vmgateway-openid-configuration.html)
|
||||||
11. [How to setup vmanomaly together with vmalert](https://docs.victoriametrics.com/guide/guide-vmanomaly-vmalert.html)
|
11. [How to setup vmanomaly together with vmalert](guide-vmanomaly-vmalert.html)
|
||||||
|
|
|
@ -239,27 +239,27 @@ services:
|
||||||
- grafana_data:/var/lib/grafana/
|
- grafana_data:/var/lib/grafana/
|
||||||
|
|
||||||
vmsingle:
|
vmsingle:
|
||||||
image: victoriametrics/victoria-metrics:v1.90.0
|
image: victoriametrics/victoria-metrics:v1.91.0
|
||||||
command:
|
command:
|
||||||
- -httpListenAddr=0.0.0.0:8429
|
- -httpListenAddr=0.0.0.0:8429
|
||||||
|
|
||||||
vmstorage:
|
vmstorage:
|
||||||
image: victoriametrics/vmstorage:v1.90.0-cluster
|
image: victoriametrics/vmstorage:v1.91.0-cluster
|
||||||
|
|
||||||
vminsert:
|
vminsert:
|
||||||
image: victoriametrics/vminsert:v1.90.0-cluster
|
image: victoriametrics/vminsert:v1.91.0-cluster
|
||||||
command:
|
command:
|
||||||
- -storageNode=vmstorage:8400
|
- -storageNode=vmstorage:8400
|
||||||
- -httpListenAddr=0.0.0.0:8480
|
- -httpListenAddr=0.0.0.0:8480
|
||||||
|
|
||||||
vmselect:
|
vmselect:
|
||||||
image: victoriametrics/vmselect:v1.90.0-cluster
|
image: victoriametrics/vmselect:v1.91.0-cluster
|
||||||
command:
|
command:
|
||||||
- -storageNode=vmstorage:8401
|
- -storageNode=vmstorage:8401
|
||||||
- -httpListenAddr=0.0.0.0:8481
|
- -httpListenAddr=0.0.0.0:8481
|
||||||
|
|
||||||
vmagent:
|
vmagent:
|
||||||
image: victoriametrics/vmagent:v1.90.0
|
image: victoriametrics/vmagent:v1.91.0
|
||||||
volumes:
|
volumes:
|
||||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||||
command:
|
command:
|
||||||
|
@ -268,7 +268,7 @@ services:
|
||||||
- -remoteWrite.url=http://vmsingle:8429/api/v1/write
|
- -remoteWrite.url=http://vmsingle:8429/api/v1/write
|
||||||
|
|
||||||
vmgateway-cluster:
|
vmgateway-cluster:
|
||||||
image: victoriametrics/vmgateway:v1.90.0-enterprise
|
image: victoriametrics/vmgateway:v1.91.0-enterprise
|
||||||
ports:
|
ports:
|
||||||
- 8431:8431
|
- 8431:8431
|
||||||
command:
|
command:
|
||||||
|
@ -281,7 +281,7 @@ services:
|
||||||
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
|
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
|
||||||
|
|
||||||
vmgateway-single:
|
vmgateway-single:
|
||||||
image: victoriametrics/vmgateway:v1.90.0-enterprise
|
image: victoriametrics/vmgateway:v1.91.0-enterprise
|
||||||
ports:
|
ports:
|
||||||
- 8432:8431
|
- 8432:8431
|
||||||
command:
|
command:
|
||||||
|
|
|
@ -81,7 +81,7 @@ These modes are mutually exclusive. A high _iowait_ means that you are disk or n
|
||||||
|
|
||||||
The metric `node_cpu_seconds_total` is a [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) type of metric. If we'd like to see how much time CPU spent in each of the nodes, we need to calculate the per-second values change via [rate function](https://docs.victoriametrics.com/MetricsQL.html#rate): `rate(node_cpu_seconds_total)`.
|
The metric `node_cpu_seconds_total` is a [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) type of metric. If we'd like to see how much time CPU spent in each of the nodes, we need to calculate the per-second values change via [rate function](https://docs.victoriametrics.com/MetricsQL.html#rate): `rate(node_cpu_seconds_total)`.
|
||||||
Here is how this query may look like in Grafana:
|
Here is how this query may look like in Grafana:
|
||||||
![node_cpu_rate_graph](guide-vmanomaly-node-cpu-rate-graph.png "node_cpu_rate_graph")
|
<img alt="node_cpu_rate_graph" src="guide-vmanomaly-node-cpu-rate-graph.png">
|
||||||
|
|
||||||
This query result will generate 8 time series per each cpu, and we will use them as an input for our VM Anomaly Detection. vmanomaly will start learning configured model type separately for each of the time series.
|
This query result will generate 8 time series per each cpu, and we will use them as an input for our VM Anomaly Detection. vmanomaly will start learning configured model type separately for each of the time series.
|
||||||
|
|
||||||
|
@ -401,26 +401,29 @@ Each of these metrics will contain same labels our query `rate(node_cpu_seconds_
|
||||||
### Anomaly scores for each metric with its according labels.
|
### Anomaly scores for each metric with its according labels.
|
||||||
|
|
||||||
Query: `anomaly_score`
|
Query: `anomaly_score`
|
||||||
![Anomaly score graph](guide-vmanomaly-anomaly-score.png "Anomaly score graph1")
|
<img alt="Anomaly score graph" src="guide-vmanomaly-anomaly-score.png">
|
||||||
|
|
||||||
<br>Check out if the anomaly score is high for datapoints you think are anomalies. If not, you can try other parameters in the config file or try other model type.
|
<br>Check out if the anomaly score is high for datapoints you think are anomalies. If not, you can try other parameters in the config file or try other model type.
|
||||||
|
|
||||||
As you may notice a lot of data shows anomaly score greater than 1. It is expected as we just started to scrape and store data and there are not enough datapoints to train on. Just wait for some more time for gathering more data to see how well this particular model can find anomalies. In our configs we put 2 days of data required.
|
As you may notice a lot of data shows anomaly score greater than 1. It is expected as we just started to scrape and store data and there are not enough datapoints to train on. Just wait for some more time for gathering more data to see how well this particular model can find anomalies. In our configs we put 2 days of data required.
|
||||||
### Actual value from input query with predicted `yhat` metric.
|
### Actual value from input query with predicted `yhat` metric.
|
||||||
Query: `yhat`
|
Query: `yhat`
|
||||||
![Yhat](guide-vmanomaly-yhat.png "yhat")
|
<img alt="yhat" src="guide-vmanomaly-yhat.png">
|
||||||
|
|
||||||
<br>Here we are using one particular set of metrics for visualization. Check out the difference between model prediction and actual values. If values are very different from prediction, it can be considered as anomalous.
|
<br>Here we are using one particular set of metrics for visualization. Check out the difference between model prediction and actual values. If values are very different from prediction, it can be considered as anomalous.
|
||||||
|
|
||||||
### Lower and upper boundaries that model predicted.
|
### Lower and upper boundaries that model predicted.
|
||||||
Queries: `yhat_lower` and `yhat_upper`
|
Queries: `yhat_lower` and `yhat_upper`
|
||||||
![Yhat_lower and upper](guide-vmanomaly-yhat-lower-upper.png "_lower and _upper")
|
<img alt="yhat lower and yhat upper" src="guide-vmanomaly-yhat-lower-upper.png">
|
||||||
Boundaries of 'normal' metric values according to model inference.
|
Boundaries of 'normal' metric values according to model inference.
|
||||||
|
|
||||||
### Alerting
|
### Alerting
|
||||||
On the page `http://localhost:8880/vmalert/groups` you can find our configured Alerting rule:
|
On the page `http://localhost:8880/vmalert/groups` you can find our configured Alerting rule:
|
||||||
|
|
||||||
![alerting_rule](guide-vmanomaly-alert-rule.png "alert rule")
|
<img alt="alert rule" src="guide-vmanomaly-alert-rule.png">
|
||||||
|
|
||||||
According to the rule configured for vmalert we will see Alert when anomaly score exceed 1. You will see an alert on Alert tab. `http://localhost:8880/vmalert/alerts`
|
According to the rule configured for vmalert we will see Alert when anomaly score exceed 1. You will see an alert on Alert tab. `http://localhost:8880/vmalert/alerts`
|
||||||
![alerts](guide-vmanomaly-alerts-firing.png "alerts firing")
|
<img alt="alerts firing" src="guide-vmanomaly-alerts-firing.png">
|
||||||
|
|
||||||
## 10. Conclusion
|
## 10. Conclusion
|
||||||
Now we know how to set up Victoria Metric Anomaly Detection tool and use it together with vmalert. We also discovered core vmanomaly generated metrics and behaviour.
|
Now we know how to set up Victoria Metric Anomaly Detection tool and use it together with vmalert. We also discovered core vmanomaly generated metrics and behaviour.
|
||||||
|
|
|
@ -45,10 +45,12 @@ with similarities and differences:
|
||||||
or [fields](https://docs.influxdata.com/influxdb/v2.2/reference/key-concepts/data-elements/#field-key) in
|
or [fields](https://docs.influxdata.com/influxdb/v2.2/reference/key-concepts/data-elements/#field-key) in
|
||||||
VictoriaMetrics, metric name contains it all. If measurement contains more than 1 field, then for VictoriaMetrics
|
VictoriaMetrics, metric name contains it all. If measurement contains more than 1 field, then for VictoriaMetrics
|
||||||
it will be multiple metrics;
|
it will be multiple metrics;
|
||||||
* there are no [buckets](https://docs.influxdata.com/influxdb/v2.2/reference/key-concepts/data-elements/#bucket)
|
* there are no [databases](https://docs.influxdata.com/influxdb/v1.8/concepts/glossary/#database),
|
||||||
or [organizations](https://docs.influxdata.com/influxdb/v2.2/reference/key-concepts/data-elements/#organization), all
|
[buckets](https://docs.influxdata.com/influxdb/v2.2/reference/key-concepts/data-elements/#bucket)
|
||||||
|
or [organizations](https://docs.influxdata.com/influxdb/v2.2/reference/key-concepts/data-elements/#organization). All
|
||||||
data in VictoriaMetrics is stored in a global namespace or within
|
data in VictoriaMetrics is stored in a global namespace or within
|
||||||
a [tenant](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy).
|
a [tenant](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy).
|
||||||
|
See more about multi-tenancy [here](https://docs.victoriametrics.com/keyConcepts.html#multi-tenancy).
|
||||||
|
|
||||||
Let's consider the
|
Let's consider the
|
||||||
following [sample data](https://docs.influxdata.com/influxdb/v2.2/reference/key-concepts/data-elements/#sample-data)
|
following [sample data](https://docs.influxdata.com/influxdb/v2.2/reference/key-concepts/data-elements/#sample-data)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
weight: 11
|
weight: 11
|
||||||
title: Multi-regional setup with VictoriaMetrics: Dedicated regions for monitoring
|
title: "Multi-regional setup with VictoriaMetrics: Dedicated regions for monitoring"
|
||||||
menu:
|
menu:
|
||||||
docs:
|
docs:
|
||||||
parent: "guides"
|
parent: "guides"
|
||||||
|
@ -94,3 +94,4 @@ Additional context
|
||||||
### What more can we do?
|
### What more can we do?
|
||||||
|
|
||||||
Setup vmagents in Ground Control regions. That allows it to accept data close to storage and add more reliability if storage is temporarily offline.
|
Setup vmagents in Ground Control regions. That allows it to accept data close to storage and add more reliability if storage is temporarily offline.
|
||||||
|
g
|
|
@ -34,6 +34,8 @@ You can be more specific here by saying `requests_success_total` (for only succe
|
||||||
or `request_errors_total` (for requests which failed). Choosing a metric name is very important and supposed to clarify
|
or `request_errors_total` (for requests which failed). Choosing a metric name is very important and supposed to clarify
|
||||||
what is actually measured to every person who reads it, just like **variable names** in programming.
|
what is actually measured to every person who reads it, just like **variable names** in programming.
|
||||||
|
|
||||||
|
#### Labels
|
||||||
|
|
||||||
Every metric can contain additional meta-information in the form of label-value pairs:
|
Every metric can contain additional meta-information in the form of label-value pairs:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -53,6 +55,12 @@ requests_total{path="/", code="200"}
|
||||||
{__name__="requests_total", path="/", code="200"}
|
{__name__="requests_total", path="/", code="200"}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Labels can be automatically attached to the [time series](#time-series)
|
||||||
|
written via [vmagent](https://docs.victoriametrics.com/vmagent.html#adding-labels-to-metrics)
|
||||||
|
or [Prometheus](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-setup).
|
||||||
|
VictoriaMetrics supports enforcing of label filters for [query API](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-querying-api-enhancements)
|
||||||
|
to emulate data isolation. However, the real data isolation can be achieved via [multi-tenancy](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy).
|
||||||
|
|
||||||
#### Time series
|
#### Time series
|
||||||
|
|
||||||
A combination of a metric name and its labels defines a `time series`. For example,
|
A combination of a metric name and its labels defines a `time series`. For example,
|
||||||
|
@ -344,6 +352,18 @@ It is very important to keep under control the number of unique label values, si
|
||||||
leads to a new [time series](#time-series). Try to avoid using volatile label values such as session ID or query ID in order to
|
leads to a new [time series](#time-series). Try to avoid using volatile label values such as session ID or query ID in order to
|
||||||
avoid excessive resource usage and database slowdown.
|
avoid excessive resource usage and database slowdown.
|
||||||
|
|
||||||
|
### Multi-tenancy
|
||||||
|
|
||||||
|
[Cluster version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) of VictoriaMetrics
|
||||||
|
supports [multi-tenancy](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy)
|
||||||
|
for data isolation.
|
||||||
|
|
||||||
|
Multi-tenancy can be emulated for [single-server](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
|
||||||
|
version of VictoriaMetrics by adding [labels](#labels) on [write path](#write-data)
|
||||||
|
and enforcing [labels filtering](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-querying-api-enhancements)
|
||||||
|
on [read path](#query-data).
|
||||||
|
|
||||||
|
|
||||||
## Write data
|
## Write data
|
||||||
|
|
||||||
VictoriaMetrics supports both models used in modern monitoring applications: [push](#push-model) and [pull](#pull-model).
|
VictoriaMetrics supports both models used in modern monitoring applications: [push](#push-model) and [pull](#pull-model).
|
||||||
|
|
|
@ -27,7 +27,7 @@ Managed VictoriaMetrics provides different levels of user access. It defines wha
|
||||||
You assign the role to the user during the user creation procedure. You can change the role after the creation
|
You assign the role to the user during the user creation procedure. You can change the role after the creation
|
||||||
|
|
||||||
|
|
||||||
#### Roles definition
|
### Roles definition
|
||||||
|
|
||||||
<table>
|
<table>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -82,7 +82,7 @@ You assign the role to the user during the user creation procedure. You can chan
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
#### User statuses
|
### User statuses
|
||||||
|
|
||||||
|
|
||||||
<table>
|
<table>
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
---
|
---
|
||||||
sort: 27
|
sort: 27
|
||||||
|
title: VictoriaMetrics Operator
|
||||||
|
disableToc: true
|
||||||
---
|
---
|
||||||
|
|
||||||
# VictoriaMetrics Operator
|
# VictoriaMetrics Operator
|
||||||
|
|
|
@ -7,7 +7,7 @@ menu:
|
||||||
parent: "operator"
|
parent: "operator"
|
||||||
weight: 2
|
weight: 2
|
||||||
aliases:
|
aliases:
|
||||||
- operator/additional-scrape.html
|
- /operator/additional-scrape.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# Additional Scrape Configuration
|
# Additional Scrape Configuration
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
---
|
---
|
||||||
sort: 3
|
sort: 3
|
||||||
|
weight: 3
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 3
|
||||||
|
title: vmagent
|
||||||
|
aliases:
|
||||||
|
- /vmagent.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# vmagent
|
# vmagent
|
||||||
|
|
||||||
`vmagent` is a tiny agent which helps you collect metrics from various sources,
|
`vmagent` is a tiny agent which helps you collect metrics from various sources,
|
||||||
|
@ -756,14 +763,18 @@ See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||||
|
|
||||||
## High availability
|
## High availability
|
||||||
|
|
||||||
It is possible to run multiple identically configured `vmagent` instances or `vmagent` [clusters](#scraping-big-number-of-targets),
|
It is possible to run multiple **identically configured** `vmagent` instances or `vmagent`
|
||||||
so they [scrape](#how-to-collect-metrics-in-prometheus-format) the same set of targets and push the collected data to the same set of VictoriaMetrics remote storage systems.
|
[clusters](#scraping-big-number-of-targets), so they [scrape](#how-to-collect-metrics-in-prometheus-format)
|
||||||
|
the same set of targets and push the collected data to the same set of VictoriaMetrics remote storage systems.
|
||||||
|
Two **identically configured** vmagent instances or clusters is usually called an HA pair.
|
||||||
|
|
||||||
In this case the deduplication must be configured at VictoriaMetrics in order to de-duplicate samples received from multiple identically configured `vmagent` instances or clusters.
|
When running HA pairs, [deduplication](https://docs.victoriametrics.com/#deduplication) must be configured
|
||||||
|
at VictoriaMetrics side in order to de-duplicate received samples.
|
||||||
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||||
|
|
||||||
It is also recommended passing different values to `-promscrape.cluster.name` command-line flag per each `vmagent` instance or per each `vmagent` cluster in HA setup.
|
It is also recommended passing different values to `-promscrape.cluster.name` command-line flag per each `vmagent`
|
||||||
This is needed for proper data de-duplication. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679) for details.
|
instance or per each `vmagent` cluster in HA setup. This is needed for proper data de-duplication.
|
||||||
|
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679) for details.
|
||||||
|
|
||||||
## Scraping targets via a proxy
|
## Scraping targets via a proxy
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
---
|
---
|
||||||
sort: 4
|
sort: 4
|
||||||
|
weight: 4
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 4
|
||||||
|
title: vmalert
|
||||||
|
aliases:
|
||||||
|
- /vmalert.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# vmalert
|
# vmalert
|
||||||
|
|
||||||
`vmalert` executes a list of the given [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)
|
`vmalert` executes a list of the given [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)
|
||||||
|
@ -951,7 +958,7 @@ The shortlist of configuration flags is the following:
|
||||||
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
||||||
Supports an array of values separated by comma or specified via multiple flags.
|
Supports an array of values separated by comma or specified via multiple flags.
|
||||||
-external.url string
|
-external.url string
|
||||||
External URL is used as alert's source for sent alerts to the notifier
|
External URL is used as alert's source for sent alerts to the notifier. By default, hostname is used as address.
|
||||||
-flagsAuthKey string
|
-flagsAuthKey string
|
||||||
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||||
-fs.disableMmap
|
-fs.disableMmap
|
||||||
|
|
|
@ -98,13 +98,15 @@ Currently, vmanomaly ships with a few common models:
|
||||||
### Examples
|
### Examples
|
||||||
For example, here’s how Prophet predictions could look like on a real-data example
|
For example, here’s how Prophet predictions could look like on a real-data example
|
||||||
(Prophet auto-detected seasonality interval):
|
(Prophet auto-detected seasonality interval):
|
||||||
![prophet](vmanomaly-prophet-example.png)
|
|
||||||
|
<img alt="propher-example" src="vmanomaly-prophet-example.png">
|
||||||
|
|
||||||
And here’s what Holt-Winters predictions real-world data could look like (seasonality manually
|
And here’s what Holt-Winters predictions real-world data could look like (seasonality manually
|
||||||
set to 1 week). Notice that it predicts anomalies in
|
set to 1 week). Notice that it predicts anomalies in
|
||||||
different places than Prophet because the model noticed there are usually spikes on Friday
|
different places than Prophet because the model noticed there are usually spikes on Friday
|
||||||
morning, so it accounted for that:
|
morning, so it accounted for that:
|
||||||
![holt-winters](vmanomaly-holtwinters-example.png)
|
|
||||||
|
<img alt="holtwinters-example" src="vmanomaly-holtwinters-example.png">
|
||||||
|
|
||||||
## Process
|
## Process
|
||||||
Upon starting, vmanomaly queries the initial range of data, and trains its model (“fit” by convention).
|
Upon starting, vmanomaly queries the initial range of data, and trains its model (“fit” by convention).
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
---
|
---
|
||||||
sort: 5
|
sort: 5
|
||||||
|
weight: 5
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 5
|
||||||
|
title: vmauth
|
||||||
|
aliases:
|
||||||
|
- /vmauth.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# vmauth
|
# vmauth
|
||||||
|
|
||||||
`vmauth` is a simple auth proxy, router and [load balancer](#load-balancing) for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
`vmauth` is a simple auth proxy, router and [load balancer](#load-balancing) for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
---
|
---
|
||||||
sort: 6
|
sort: 6
|
||||||
|
weight: 6
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 6
|
||||||
|
title: vmbackup
|
||||||
|
aliases:
|
||||||
|
- /vmbackup.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# vmbackup
|
# vmbackup
|
||||||
|
|
||||||
`vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
`vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
||||||
|
|
|
@ -1,8 +1,15 @@
|
||||||
---
|
---
|
||||||
sort: 10
|
sort: 10
|
||||||
|
weight: 10
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 10
|
||||||
|
title: vmbackupmanager
|
||||||
|
aliases:
|
||||||
|
- /vmbackupmanager.html
|
||||||
---
|
---
|
||||||
|
# vmbackupmanager
|
||||||
## vmbackupmanager
|
|
||||||
|
|
||||||
***vmbackupmanager is a part of [enterprise package](https://docs.victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
***vmbackupmanager is a part of [enterprise package](https://docs.victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
||||||
|
|
||||||
|
@ -108,11 +115,11 @@ The result on the GCS bucket
|
||||||
|
|
||||||
* The root folder
|
* The root folder
|
||||||
|
|
||||||
![root](vmbackupmanager_root_folder.png)
|
<img alt="root folder" src="vmbackupmanager_root_folder.png">
|
||||||
|
|
||||||
* The latest folder
|
* The latest folder
|
||||||
|
|
||||||
![latest](vmbackupmanager_latest_folder.png)
|
<img alt="latest folder" src="vmbackupmanager_latest_folder.png">
|
||||||
|
|
||||||
## Backup Retention Policy
|
## Backup Retention Policy
|
||||||
|
|
||||||
|
@ -127,7 +134,7 @@ Backup retention policy is controlled by:
|
||||||
|
|
||||||
Let’s assume we have a backup manager collecting daily backups for the past 10 days.
|
Let’s assume we have a backup manager collecting daily backups for the past 10 days.
|
||||||
|
|
||||||
![daily](vmbackupmanager_rp_daily_1.png)
|
<img alt="retention policy daily before retention cycle" src="vmbackupmanager_rp_daily_1.png">
|
||||||
|
|
||||||
We enable backup retention policy for backup manager by using following configuration:
|
We enable backup retention policy for backup manager by using following configuration:
|
||||||
|
|
||||||
|
@ -152,7 +159,7 @@ info app/vmbackupmanager/retention.go:106 daily backups to delete [daily/2
|
||||||
|
|
||||||
The result on the GCS bucket. We see only 3 daily backups:
|
The result on the GCS bucket. We see only 3 daily backups:
|
||||||
|
|
||||||
![daily](vmbackupmanager_rp_daily_2.png)
|
<img alt="retention policy daily after retention cycle" src="vmbackupmanager_rp_daily_2.png">
|
||||||
|
|
||||||
### Protection backups against deletion by retention policy
|
### Protection backups against deletion by retention policy
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
---
|
---
|
||||||
sort: 8
|
sort: 8
|
||||||
|
weight: 8
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 8
|
||||||
|
title: vmctl
|
||||||
|
aliases:
|
||||||
|
- /vmctl.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# vmctl
|
# vmctl
|
||||||
|
|
||||||
VictoriaMetrics command-line tool
|
VictoriaMetrics command-line tool
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
---
|
---
|
||||||
sort: 9
|
sort: 9
|
||||||
|
weight: 9
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 9
|
||||||
|
title: vmgateway
|
||||||
|
aliases:
|
||||||
|
- /vmgateway.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# vmgateway
|
# vmgateway
|
||||||
|
|
||||||
***vmgateway is a part of [enterprise package](https://docs.victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
***vmgateway is a part of [enterprise package](https://docs.victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
---
|
---
|
||||||
sort: 7
|
sort: 7
|
||||||
|
weight: 7
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
parent: 'victoriametrics'
|
||||||
|
weight: 7
|
||||||
|
title: vmrestore
|
||||||
|
aliases:
|
||||||
|
- /vmrestore.html
|
||||||
---
|
---
|
||||||
|
|
||||||
# vmrestore
|
# vmrestore
|
||||||
|
|
||||||
`vmrestore` restores data from backups created by [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
`vmrestore` restores data from backups created by [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||||
|
|
|
@ -71,9 +71,14 @@ func readProxyProto(r io.Reader) (net.Addr, error) {
|
||||||
if version != 2 {
|
if version != 2 {
|
||||||
return nil, fmt.Errorf("unsupported proxy protocol version, only v2 protocol version is supported, got: %d", version)
|
return nil, fmt.Errorf("unsupported proxy protocol version, only v2 protocol version is supported, got: %d", version)
|
||||||
}
|
}
|
||||||
if proto != 1 {
|
// check for supported proto:
|
||||||
// Only TCP is supported (aka STREAM).
|
switch {
|
||||||
return nil, fmt.Errorf("the proxy protocol implementation doesn't support proto %d; expecting 1", proto)
|
case proto == 0 && command == 0:
|
||||||
|
// 0 - UNSPEC with LOCAL command 0. Common use case for load balancer health checks.
|
||||||
|
case proto == 1:
|
||||||
|
// 1 - TCP (aka STREAM).
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("the proxy protocol implementation doesn't support proto %d and command: %d; expecting proto 1 or proto 0 with command 0", proto, command)
|
||||||
}
|
}
|
||||||
// The length of the remainder of the header including any TLVs in network byte order
|
// The length of the remainder of the header including any TLVs in network byte order
|
||||||
// 0, 1, 2
|
// 0, 1, 2
|
||||||
|
|
|
@ -123,6 +123,9 @@ type HTTPClientConfig struct {
|
||||||
|
|
||||||
// Headers contains optional HTTP headers, which must be sent in the request to the server
|
// Headers contains optional HTTP headers, which must be sent in the request to the server
|
||||||
Headers []string `yaml:"headers,omitempty"`
|
Headers []string `yaml:"headers,omitempty"`
|
||||||
|
|
||||||
|
// FollowRedirects specifies whether the client should follow HTTP 3xx redirects.
|
||||||
|
FollowRedirects *bool `yaml:"follow_redirects,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProxyClientConfig represents proxy client config.
|
// ProxyClientConfig represents proxy client config.
|
||||||
|
|
|
@ -73,12 +73,12 @@ function submitRelabelDebugForm(e) {
|
||||||
{% func relabelDebugFormInputs(metric, relabelConfigs string) %}
|
{% func relabelDebugFormInputs(metric, relabelConfigs string) %}
|
||||||
<div>
|
<div>
|
||||||
Relabel configs:<br/>
|
Relabel configs:<br/>
|
||||||
<textarea name="relabel_configs" style="width: 100%; height: 15em" class="m-1">{%s relabelConfigs %}</textarea>
|
<textarea name="relabel_configs" style="width: 100%; height: 15em; font-family: monospace" class="m-1">{%s relabelConfigs %}</textarea>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div>
|
<div>
|
||||||
Labels:<br/>
|
Labels:<br/>
|
||||||
<textarea name="metric" style="width: 100%; height: 5em" class="m-1">{%s metric %}</textarea>
|
<textarea name="metric" style="width: 100%; height: 5em; font-family: monospace" class="m-1">{%s metric %}</textarea>
|
||||||
</div>
|
</div>
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
|
|
||||||
|
|
|
@ -181,11 +181,11 @@ func RelabelDebugStepsHTML(targetURL, targetID string, dss []DebugStep, metric,
|
||||||
//line lib/promrelabel/debug.qtpl:73
|
//line lib/promrelabel/debug.qtpl:73
|
||||||
func streamrelabelDebugFormInputs(qw422016 *qt422016.Writer, metric, relabelConfigs string) {
|
func streamrelabelDebugFormInputs(qw422016 *qt422016.Writer, metric, relabelConfigs string) {
|
||||||
//line lib/promrelabel/debug.qtpl:73
|
//line lib/promrelabel/debug.qtpl:73
|
||||||
qw422016.N().S(`<div>Relabel configs:<br/><textarea name="relabel_configs" style="width: 100%; height: 15em" class="m-1">`)
|
qw422016.N().S(`<div>Relabel configs:<br/><textarea name="relabel_configs" style="width: 100%; height: 15em; font-family: monospace" class="m-1">`)
|
||||||
//line lib/promrelabel/debug.qtpl:76
|
//line lib/promrelabel/debug.qtpl:76
|
||||||
qw422016.E().S(relabelConfigs)
|
qw422016.E().S(relabelConfigs)
|
||||||
//line lib/promrelabel/debug.qtpl:76
|
//line lib/promrelabel/debug.qtpl:76
|
||||||
qw422016.N().S(`</textarea></div><div>Labels:<br/><textarea name="metric" style="width: 100%; height: 5em" class="m-1">`)
|
qw422016.N().S(`</textarea></div><div>Labels:<br/><textarea name="metric" style="width: 100%; height: 5em; font-family: monospace" class="m-1">`)
|
||||||
//line lib/promrelabel/debug.qtpl:81
|
//line lib/promrelabel/debug.qtpl:81
|
||||||
qw422016.E().S(metric)
|
qw422016.E().S(metric)
|
||||||
//line lib/promrelabel/debug.qtpl:81
|
//line lib/promrelabel/debug.qtpl:81
|
||||||
|
|
|
@ -245,7 +245,6 @@ type ScrapeConfig struct {
|
||||||
MetricsPath string `yaml:"metrics_path,omitempty"`
|
MetricsPath string `yaml:"metrics_path,omitempty"`
|
||||||
HonorLabels bool `yaml:"honor_labels,omitempty"`
|
HonorLabels bool `yaml:"honor_labels,omitempty"`
|
||||||
HonorTimestamps *bool `yaml:"honor_timestamps,omitempty"`
|
HonorTimestamps *bool `yaml:"honor_timestamps,omitempty"`
|
||||||
FollowRedirects *bool `yaml:"follow_redirects,omitempty"`
|
|
||||||
Scheme string `yaml:"scheme,omitempty"`
|
Scheme string `yaml:"scheme,omitempty"`
|
||||||
Params map[string][]string `yaml:"params,omitempty"`
|
Params map[string][]string `yaml:"params,omitempty"`
|
||||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||||
|
@ -990,8 +989,8 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
|
||||||
honorTimestamps = *sc.HonorTimestamps
|
honorTimestamps = *sc.HonorTimestamps
|
||||||
}
|
}
|
||||||
denyRedirects := false
|
denyRedirects := false
|
||||||
if sc.FollowRedirects != nil {
|
if sc.HTTPClientConfig.FollowRedirects != nil {
|
||||||
denyRedirects = !*sc.FollowRedirects
|
denyRedirects = !*sc.HTTPClientConfig.FollowRedirects
|
||||||
}
|
}
|
||||||
metricsPath := sc.MetricsPath
|
metricsPath := sc.MetricsPath
|
||||||
if metricsPath == "" {
|
if metricsPath == "" {
|
||||||
|
|
|
@ -110,7 +110,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c, err := discoveryutils.NewClient(env.ResourceManagerEndpoint, ac, sdc.ProxyURL, proxyAC)
|
c, err := discoveryutils.NewClient(env.ResourceManagerEndpoint, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create client for %q: %w", env.ResourceManagerEndpoint, err)
|
return nil, fmt.Errorf("cannot create client for %q: %w", env.ResourceManagerEndpoint, err)
|
||||||
}
|
}
|
||||||
|
@ -230,7 +230,7 @@ func getRefreshTokenFunc(sdc *SDConfig, ac, proxyAC *promauth.Config, env *cloud
|
||||||
return nil, fmt.Errorf("unsupported `authentication_method: %q` only `OAuth` and `ManagedIdentity` are supported", authenticationMethod)
|
return nil, fmt.Errorf("unsupported `authentication_method: %q` only `OAuth` and `ManagedIdentity` are supported", authenticationMethod)
|
||||||
}
|
}
|
||||||
|
|
||||||
authClient, err := discoveryutils.NewClient(tokenEndpoint, ac, sdc.ProxyURL, proxyAC)
|
authClient, err := discoveryutils.NewClient(tokenEndpoint, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot build auth client: %w", err)
|
return nil, fmt.Errorf("cannot build auth client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ func TestGetVirtualMachinesSuccess(t *testing.T) {
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
c, err := discoveryutils.NewClient(testServer.URL, nil, nil, nil)
|
c, err := discoveryutils.NewClient(testServer.URL, nil, nil, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error at client create: %s", err)
|
t.Fatalf("unexpected error at client create: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,7 +80,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,6 @@ func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return v.(*apiConfig), nil
|
return v.(*apiConfig), nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const dropletsAPIPath = "/v2/droplets"
|
const dropletsAPIPath = "/v2/droplets"
|
||||||
|
|
|
@ -50,7 +50,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(sdc.Host, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(sdc.Host, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", sdc.Host, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", sdc.Host, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(sdc.Host, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(sdc.Host, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", sdc.Host, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", sdc.Host, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,6 @@ func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return v.(*apiConfig), nil
|
return v.(*apiConfig), nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAPIResponse(cfg *apiConfig, path string) ([]byte, error) {
|
func getAPIResponse(cfg *apiConfig, path string) ([]byte, error) {
|
||||||
|
|
|
@ -44,7 +44,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC)
|
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC, sdc.HTTPClientConfig.FollowRedirects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,7 +83,7 @@ type HTTPClient struct {
|
||||||
var defaultDialer = &net.Dialer{}
|
var defaultDialer = &net.Dialer{}
|
||||||
|
|
||||||
// NewClient returns new Client for the given args.
|
// NewClient returns new Client for the given args.
|
||||||
func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxyAC *promauth.Config) (*Client, error) {
|
func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxyAC *promauth.Config, followRedirects *bool) (*Client, error) {
|
||||||
u, err := url.Parse(apiServer)
|
u, err := url.Parse(apiServer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse apiServer=%q: %w", apiServer, err)
|
return nil, fmt.Errorf("cannot parse apiServer=%q: %w", apiServer, err)
|
||||||
|
@ -139,6 +139,14 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
||||||
ac.SetHeaders(req, true)
|
ac.SetHeaders(req, true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if followRedirects != nil && !*followRedirects {
|
||||||
|
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||||
|
return http.ErrUseLastResponse
|
||||||
|
}
|
||||||
|
blockingClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||||
|
return http.ErrUseLastResponse
|
||||||
|
}
|
||||||
|
}
|
||||||
setHTTPProxyHeaders := func(req *http.Request) {}
|
setHTTPProxyHeaders := func(req *http.Request) {}
|
||||||
if proxyAC != nil {
|
if proxyAC != nil {
|
||||||
setHTTPProxyHeaders = func(req *http.Request) {
|
setHTTPProxyHeaders = func(req *http.Request) {
|
||||||
|
@ -186,7 +194,8 @@ func (c *Client) GetAPIResponse(path string) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) getAPIResponseWithConcurrencyLimit(ctx context.Context, client *HTTPClient, path string,
|
func (c *Client) getAPIResponseWithConcurrencyLimit(ctx context.Context, client *HTTPClient, path string,
|
||||||
modifyRequest RequestCallback, inspectResponse ResponseCallback) ([]byte, error) {
|
modifyRequest RequestCallback, inspectResponse ResponseCallback,
|
||||||
|
) ([]byte, error) {
|
||||||
// Limit the number of concurrent API requests.
|
// Limit the number of concurrent API requests.
|
||||||
concurrencyLimitChOnce.Do(concurrencyLimitChInit)
|
concurrencyLimitChOnce.Do(concurrencyLimitChInit)
|
||||||
t := timerpool.Get(*maxWaitTime)
|
t := timerpool.Get(*maxWaitTime)
|
||||||
|
|
26
lib/promscrape/testdata/prometheus.yml
vendored
26
lib/promscrape/testdata/prometheus.yml
vendored
|
@ -1,5 +1,29 @@
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: foo
|
- job_name: foo
|
||||||
|
scrape_interval: 54s
|
||||||
|
scrape_timeout: 12s
|
||||||
|
metrics_path: /foo/bar
|
||||||
|
scheme: https
|
||||||
|
honor_labels: true
|
||||||
|
honor_timestamps: false
|
||||||
|
follow_redirects: false
|
||||||
|
static_configs:
|
||||||
|
- targets: ["foo.bar", "aaa"]
|
||||||
|
labels:
|
||||||
|
x: y
|
||||||
|
__scrape_timeout__: "5s"
|
||||||
|
- job_name: file-job
|
||||||
file_sd_configs:
|
file_sd_configs:
|
||||||
- files: ["file_sd_*.yml"]
|
- files: ["file_sd_*.yml"]
|
||||||
- files: ["file_sd.json"]
|
- files: ["file_sd.json"]
|
||||||
|
- job_name: service-kubernetes
|
||||||
|
kubernetes_sd_configs:
|
||||||
|
- role: endpoints
|
||||||
|
api_server: "https://localhost:1234"
|
||||||
|
follow_redirects: true
|
||||||
|
tls_config:
|
||||||
|
cert_file: valid_cert_file
|
||||||
|
key_file: valid_key_file
|
||||||
|
basic_auth:
|
||||||
|
username: "myusername"
|
||||||
|
password: "mysecret"
|
||||||
|
|
Loading…
Reference in a new issue