diff --git a/Makefile b/Makefile index 2766db0f0..25fdee19d 100644 --- a/Makefile +++ b/Makefile @@ -389,7 +389,7 @@ golangci-lint: install-golangci-lint golangci-lint run --exclude '(SA4003|SA1019|SA5011):' -D errcheck -D structcheck --timeout 2m install-golangci-lint: - which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.46.2 + which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.47.1 install-wwhrd: which wwhrd || GO111MODULE=off go get github.com/frapposelli/wwhrd diff --git a/README.md b/README.md index 6211c543a..1e7dd0975 100644 --- a/README.md +++ b/README.md @@ -248,7 +248,7 @@ It is also safe downgrading to older versions unless [release notes](https://git The following steps must be performed during the upgrade / downgrade procedure: -* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it. +* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it. See [how to send signals to processes](https://stackoverflow.com/questions/33239959/send-signal-to-process-from-command-line). * Wait until the process stops. This can take a few seconds. * Start the upgraded VictoriaMetrics. @@ -411,7 +411,7 @@ and stream plain InfluxDB line protocol data to the configured TCP and/or UDP ad VictoriaMetrics performs the following transformations to the ingested InfluxDB data: * [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value - unless `db` tag exists in the InfluxDB line. The `db` label name can be overriden via `-influxDBLabel` command-line flag. + unless `db` tag exists in the InfluxDB line. The `db` label name can be overridden via `-influxDBLabel` command-line flag. * Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names. * Field values are mapped to time series values. * Tags are mapped to Prometheus labels as-is. @@ -704,7 +704,7 @@ VictoriaMetrics supports the following handlers from [Graphite Metrics API](http VictoriaMetrics accepts the following additional query args at `/metrics/find` and `/metrics/expand`: * `label` - for selecting arbitrary label values. By default `label=__name__`, i.e. metric names are selected. -* `delimiter` - for using different delimiters in metric name hierachy. For example, `/metrics/find?delimiter=_&query=node_*` would return all the metric name prefixes +* `delimiter` - for using different delimiters in metric name hierarchy. For example, `/metrics/find?delimiter=_&query=node_*` would return all the metric name prefixes that start with `node_`. By default `delimiter=.`. ### Graphite Tags API usage @@ -823,6 +823,7 @@ Send a request to `http://:8428/api/v1/admin/tsdb/delete_s where `` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for metrics to delete. After that all the time series matching the given selector are deleted. Storage space for the deleted time series isn't freed instantly - it is freed during subsequent [background merges of data files](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). + Note that background merges may never occur for data from previous months, so storage space won't be freed for historical data. In this case [forced merge](#forced-merge) may help freeing up storage space. @@ -845,6 +846,8 @@ Using the delete API is not recommended in the following cases, since it brings * Reducing disk space usage by deleting unneeded time series. This doesn't work as expected, since the deleted time series occupy disk space until the next merge operation, which can never occur when deleting too old data. [Forced merge](#forced-merge) may be used for freeing up disk space occupied by old data. + Note that VictoriaMetrics doesn't delete entries from inverted index (aka `indexdb`) for the deleted time series. + Inverted index is cleaned up once per the configured [retention](#retention). It's better to use the `-retentionPeriod` command-line flag for efficient pruning of old data. @@ -1136,7 +1139,7 @@ Extra labels may be added to all the imported metrics by passing `extra_label=na For example, `/api/v1/import/prometheus?extra_label=foo=bar` would add `{foo="bar"}` label to all the imported metrics. If timestamp is missing in ` ` Prometheus exposition format line, then the current timestamp is used during data ingestion. -It can be overriden by passing unix timestamp in *milliseconds* via `timestamp` query arg. For example, `/api/v1/import/prometheus?timestamp=1594370496905`. +It can be overridden by passing unix timestamp in *milliseconds* via `timestamp` query arg. For example, `/api/v1/import/prometheus?timestamp=1594370496905`. VictoriaMetrics accepts arbitrary number of lines in a single request to `/api/v1/import/prometheus`, i.e. it supports data streaming. @@ -1629,6 +1632,20 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html). +## Push metrics + +All the VictoriaMetrics apps support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format. This can be done by specifying the following command-line flags: + +* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format). The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls. The url can contain basic auth params in the form http://user:pass@hostname/api/v1/import/prometheus . +* `-pushmetrics.interval` - the interval between pushes. By default it is set to 10 seconds. +* `-pushmetrics.extraLabel` - label to add to all the metrics before sending them to `-pushmetrics.url`. The label must be specified in the format `label="value"`. It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels are added to all the metrics sending them to `-pushmetrics.url`. + +For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus` with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels are added to all the metrics before sending them to the remote storage: + +```console +/path/to/victoria-metrics -pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus -pushmetrics.extraLabel='instance="foobar",job="vm"' +``` + ## Cache removal VictoriaMetrics uses various internal caches. These caches are stored to `<-storageDataPath>/cache` directory during graceful shutdown (e.g. when VictoriaMetrics is stopped by sending `SIGINT` signal). The caches are read on the next VictoriaMetrics startup. Sometimes it is needed to remove such caches on the next startup. This can be performed by placing `reset_cache_on_startup` file inside the `<-storageDataPath>/cache` directory before the restart of VictoriaMetrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447) for details. @@ -1745,7 +1762,7 @@ For accessing vmalert's UI through single-node VictoriaMetrics configure `-vmale Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting the best parts of their product, while highlighting the worst parts of competing products. -So we encourage users and all independent third parties to conduct their becnhmarks for various products +So we encourage users and all independent third parties to conduct their benchmarks for various products they are evaluating in production and publish the results. As a reference, please see [benchmarks](https://docs.victoriametrics.com/Articles.html#benchmarks) conducted by @@ -2083,6 +2100,14 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay -promscrape.suppressScrapeErrorsDelay duration The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -relabelConfig string Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal -relabelDebug @@ -2101,7 +2126,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li -search.graphiteMaxPointsPerSeries int The maximum number of points per series Graphite render API can return (default 1000000) -search.graphiteStorageStep duration - The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overriden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s) + The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overridden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s) -search.latencyOffset duration The time when data points become visible in query results after the collection. Too small value can result in incomplete last points for query results (default 30s) -search.logSlowQueryDuration duration diff --git a/app/victoria-metrics/main.go b/app/victoria-metrics/main.go index a6c56fbbd..741cc29b1 100644 --- a/app/victoria-metrics/main.go +++ b/app/victoria-metrics/main.go @@ -19,6 +19,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" ) @@ -43,6 +44,7 @@ func main() { flag.CommandLine.SetOutput(os.Stdout) flag.Usage = usage envflag.Parse() + pushmetrics.Init() buildinfo.Init() logger.Init() diff --git a/app/victoria-metrics/self_scraper.go b/app/victoria-metrics/self_scraper.go index ed12efeb4..fb544a009 100644 --- a/app/victoria-metrics/self_scraper.go +++ b/app/victoria-metrics/self_scraper.go @@ -6,8 +6,8 @@ import ( "time" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/appmetrics" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus" @@ -60,7 +60,7 @@ func selfScraper(scrapeInterval time.Duration) { currentTimestamp = currentTime.UnixNano() / 1e6 } bb.Reset() - httpserver.WritePrometheusMetrics(&bb) + appmetrics.WritePrometheusMetrics(&bb) s := bytesutil.ToUnsafeString(bb.B) rows.Reset() rows.Unmarshal(s) diff --git a/app/vmagent/README.md b/app/vmagent/README.md index ee69ce6a7..2256f38a6 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -122,7 +122,7 @@ Please see [these docs](#relabeling) for details. ### Splitting data streams among multiple systems -`vmagent` supports splitting the collected data between muliple destinations with the help of `-remoteWrite.urlRelabelConfig`, +`vmagent` supports splitting the collected data between multiple destinations with the help of `-remoteWrite.urlRelabelConfig`, which is applied independently for each configured `-remoteWrite.url` destination. For example, it is possible to replicate or split data among long-term remote storage, short-term remote storage and a real-time analytical system [built on top of Kafka](https://github.com/Telefonica/prometheus-kafka-adapter). Note that each destination can receive it's own subset of the collected data due to per-destination relabeling via `-remoteWrite.urlRelabelConfig`. @@ -136,7 +136,7 @@ Also, Basic Auth can be enabled for the incoming `remote_write` requests with `- ### remote_write for clustered version -While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes are always peformed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html), `-remoteWrite.url` the command-line flag should be configured as `://:8480/insert//prometheus/api/v1/write` according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). There is also support for multitenant writes. See [these docs](#multitenancy). +While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes are always performed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html), `-remoteWrite.url` the command-line flag should be configured as `://:8480/insert//prometheus/api/v1/write` according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). There is also support for multitenant writes. See [these docs](#multitenancy). ## Multitenancy @@ -332,7 +332,7 @@ The following articles contain useful information about Prometheus relabeling: VictoriaMetrics provides the following additional relabeling actions on top of standard actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config): -* `replace_all` replaces all of the occurences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`): +* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`): ```yaml - action: replace_all @@ -342,7 +342,7 @@ VictoriaMetrics provides the following additional relabeling actions on top of s replacement: "_" ``` -* `labelmap_all` replaces all of the occurences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`): +* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurrences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`): ```yaml - action: labelmap_all @@ -1079,6 +1079,14 @@ See the docs at https://docs.victoriametrics.com/vmagent.html . Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay -promscrape.suppressScrapeErrorsDelay duration The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -remoteWrite.aws.accessKey array Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set Supports an array of values separated by comma or specified via multiple flags. diff --git a/app/vmagent/main.go b/app/vmagent/main.go index a7e242173..d44d1b716 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -37,6 +37,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics" "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) @@ -76,6 +77,7 @@ func main() { flag.CommandLine.SetOutput(os.Stdout) flag.Usage = usage envflag.Parse() + pushmetrics.Init() remotewrite.InitSecretFlags() buildinfo.Init() logger.Init() diff --git a/app/vmagent/remotewrite/client.go b/app/vmagent/remotewrite/client.go index 54cb61ea8..d50eeea31 100644 --- a/app/vmagent/remotewrite/client.go +++ b/app/vmagent/remotewrite/client.go @@ -154,6 +154,9 @@ func (c *client) init(argIdx, concurrency int, sanitizedURL string) { c.packetsDropped = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_packets_dropped_total{url=%q}`, c.sanitizedURL)) c.retriesCount = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_retries_count_total{url=%q}`, c.sanitizedURL)) c.sendDuration = metrics.GetOrCreateFloatCounter(fmt.Sprintf(`vmagent_remotewrite_send_duration_seconds_total{url=%q}`, c.sanitizedURL)) + metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_queues{url=%q}`, c.sanitizedURL), func() float64 { + return float64(*queues) + }) for i := 0; i < concurrency; i++ { c.wg.Add(1) go func() { diff --git a/app/vmalert/README.md b/app/vmalert/README.md index 9172d877e..6694f43d4 100644 --- a/app/vmalert/README.md +++ b/app/vmalert/README.md @@ -787,6 +787,14 @@ The shortlist of configuration flags is the following: The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s) -promscrape.dnsSDCheckInterval duration Interval for checking for changes in dns. This works only if dns_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details (default 30s) + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -remoteRead.basicAuth.password string Optional basic auth password for -remoteRead.url -remoteRead.basicAuth.passwordFile string diff --git a/app/vmalert/group_test.go b/app/vmalert/group_test.go index 046acd2b0..fc01b0e1b 100644 --- a/app/vmalert/group_test.go +++ b/app/vmalert/group_test.go @@ -148,7 +148,7 @@ func TestUpdateWith(t *testing.T) { t.Fatalf("expected to have rule %q; got %q", want, got) } if err := compareRules(t, got, want); err != nil { - t.Fatalf("comparsion error: %s", err) + t.Fatalf("comparison error: %s", err) } } }) diff --git a/app/vmalert/helpers_test.go b/app/vmalert/helpers_test.go index 96bd2b1e2..1a7978cc1 100644 --- a/app/vmalert/helpers_test.go +++ b/app/vmalert/helpers_test.go @@ -164,7 +164,7 @@ func compareGroups(t *testing.T, a, b *Group) { t.Fatalf("expected to have rule %q; got %q", want.ID(), got.ID()) } if err := compareRules(t, want, got); err != nil { - t.Fatalf("comparsion error: %s", err) + t.Fatalf("comparison error: %s", err) } } } diff --git a/app/vmalert/main.go b/app/vmalert/main.go index feef1691c..35bbac02f 100644 --- a/app/vmalert/main.go +++ b/app/vmalert/main.go @@ -23,6 +23,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics" "github.com/VictoriaMetrics/metrics" ) @@ -79,6 +80,7 @@ func main() { flag.CommandLine.SetOutput(os.Stdout) flag.Usage = usage envflag.Parse() + pushmetrics.Init() buildinfo.Init() logger.Init() err := templates.Load(*ruleTemplatesPath, true) diff --git a/app/vmalert/main_test.go b/app/vmalert/main_test.go index 30595d2f0..43e42b93b 100644 --- a/app/vmalert/main_test.go +++ b/app/vmalert/main_test.go @@ -47,7 +47,7 @@ func TestGetAlertURLGenerator(t *testing.T) { } _, err = getAlertURLGenerator(nil, "foo?{{invalid}}", true) if err == nil { - t.Errorf("expected tempalte validation error got nil") + t.Errorf("expected template validation error got nil") } fn, err = getAlertURLGenerator(u, "foo?query={{$value}}", true) if err != nil { diff --git a/app/vmalert/manager_test.go b/app/vmalert/manager_test.go index 2ac3574ca..e5ecb73af 100644 --- a/app/vmalert/manager_test.go +++ b/app/vmalert/manager_test.go @@ -30,7 +30,7 @@ func TestManagerEmptyRulesDir(t *testing.T) { m := &manager{groups: make(map[uint64]*Group)} cfg := loadCfg(t, []string{"foo/bar"}, true, true) if err := m.update(context.Background(), cfg, false); err != nil { - t.Fatalf("expected to load succesfully with empty rules dir; got err instead: %v", err) + t.Fatalf("expected to load successfully with empty rules dir; got err instead: %v", err) } } diff --git a/app/vmalert/templates/template_test.go b/app/vmalert/templates/template_test.go index 16e4b5c25..67b450594 100644 --- a/app/vmalert/templates/template_test.go +++ b/app/vmalert/templates/template_test.go @@ -185,7 +185,7 @@ func TestTemplates_Load(t *testing.T) { } if tc.expErr != "" && err == nil { t.Error("%+w", err) - t.Error("expected error that didn't happend") + t.Error("expected error that didn't happened") } if err != nil && !strings.Contains(err.Error(), tc.expErr) { t.Error("%+w", err) diff --git a/app/vmauth/README.md b/app/vmauth/README.md index e0128ae43..1f102bb8f 100644 --- a/app/vmauth/README.md +++ b/app/vmauth/README.md @@ -36,7 +36,8 @@ Each `url_prefix` in the [-auth.config](#auth-config) may contain either a singl ```yml # Arbitrary number of usernames may be put here. -# Username and bearer_token values must be unique. +# It is possible to set multiple identical usernames with different passwords. +# Such usernames can be differentiated by `name` option. users: # Requests with the 'Authorization: Bearer XXXX' and 'Authorization: Token XXXX' @@ -287,6 +288,14 @@ See the docs at https://docs.victoriametrics.com/vmauth.html . Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -reloadAuthKey string Auth key for /-/reload http endpoint. It must be passed as authKey=... -tls diff --git a/app/vmauth/auth_config.go b/app/vmauth/auth_config.go index c49a21c24..e19086069 100644 --- a/app/vmauth/auth_config.go +++ b/app/vmauth/auth_config.go @@ -260,8 +260,6 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) { return nil, fmt.Errorf("`users` section cannot be empty in AuthConfig") } byAuthToken := make(map[string]*UserInfo, len(uis)) - byUsername := make(map[string]bool, len(uis)) - byBearerToken := make(map[string]bool, len(uis)) for i := range uis { ui := &uis[i] if ui.BearerToken == "" && ui.Username == "" { @@ -270,12 +268,6 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) { if ui.BearerToken != "" && ui.Username != "" { return nil, fmt.Errorf("bearer_token=%q and username=%q cannot be set simultaneously", ui.BearerToken, ui.Username) } - if byBearerToken[ui.BearerToken] { - return nil, fmt.Errorf("duplicate bearer_token found; bearer_token: %q", ui.BearerToken) - } - if byUsername[ui.Username] { - return nil, fmt.Errorf("duplicate username found; username: %q", ui.Username) - } at1, at2 := getAuthTokens(ui.BearerToken, ui.Username, ui.Password) if byAuthToken[at1] != nil { return nil, fmt.Errorf("duplicate auth token found for bearer_token=%q, username=%q: %q", ui.BearerToken, ui.Username, at1) @@ -311,7 +303,6 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) { return nil, fmt.Errorf("password shouldn't be set for bearer_token %q", ui.BearerToken) } ui.requests = metrics.GetOrCreateCounter(fmt.Sprintf(`vmauth_user_requests_total{username=%q}`, name)) - byBearerToken[ui.BearerToken] = true } if ui.Username != "" { name := ui.Username @@ -319,7 +310,6 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) { name = ui.Name } ui.requests = metrics.GetOrCreateCounter(fmt.Sprintf(`vmauth_user_requests_total{username=%q}`, name)) - byUsername[ui.Username] = true } byAuthToken[at1] = ui byAuthToken[at2] = ui diff --git a/app/vmauth/auth_config_test.go b/app/vmauth/auth_config_test.go index a95a3a43a..a18fffd10 100644 --- a/app/vmauth/auth_config_test.go +++ b/app/vmauth/auth_config_test.go @@ -110,6 +110,18 @@ users: - username: foo url_prefix: https://sss.sss `) + // Duplicate users + f(` +users: +- username: foo + password: bar + url_prefix: http://foo.bar +- username: bar + url_prefix: http://xxx.yyy +- username: foo + password: bar + url_prefix: https://sss.sss +`) // Duplicate bearer_tokens f(` @@ -317,6 +329,28 @@ users: }, }, }) + // Multiple users with the same name + f(` +users: +- username: foo-same + password: baz + url_prefix: http://foo +- username: foo-same + password: bar + url_prefix: https://bar/x/// +`, map[string]*UserInfo{ + getAuthToken("", "foo-same", "baz"): { + Username: "foo-same", + Password: "baz", + URLPrefix: mustParseURL("http://foo"), + }, + getAuthToken("", "foo-same", "bar"): { + Username: "foo-same", + Password: "bar", + URLPrefix: mustParseURL("https://bar/x"), + }, + }) + } func getSrcPaths(paths []string) []*SrcPath { diff --git a/app/vmauth/example_config.yml b/app/vmauth/example_config.yml index f77c07067..a505c4854 100644 --- a/app/vmauth/example_config.yml +++ b/app/vmauth/example_config.yml @@ -1,5 +1,6 @@ # Arbitrary number of usernames may be put here. -# Username and bearer_token values must be unique. +# It is possible to set multiple identical usernames with different passwords. +# Such usernames can be differentiated by `name` option. users: # Requests with the 'Authorization: Bearer XXXX' and 'Authorization: Token XXXX' diff --git a/app/vmauth/main.go b/app/vmauth/main.go index 06af2528c..684161800 100644 --- a/app/vmauth/main.go +++ b/app/vmauth/main.go @@ -17,6 +17,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics" "github.com/VictoriaMetrics/metrics" ) @@ -33,6 +34,7 @@ func main() { flag.CommandLine.SetOutput(os.Stdout) flag.Usage = usage envflag.Parse() + pushmetrics.Init() buildinfo.Init() logger.Init() logger.Infof("starting vmauth at %q...", *httpListenAddr) diff --git a/app/vmbackup/README.md b/app/vmbackup/README.md index 51fa65fa9..41b5fb5b9 100644 --- a/app/vmbackup/README.md +++ b/app/vmbackup/README.md @@ -239,6 +239,14 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time- Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -s3ForcePathStyle Prefixing endpoint with bucket name when set false, true by default. (default true) -snapshot.createURL string diff --git a/app/vmbackup/main.go b/app/vmbackup/main.go index d527157c6..7280b76a3 100644 --- a/app/vmbackup/main.go +++ b/app/vmbackup/main.go @@ -17,6 +17,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics" "github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot" ) @@ -41,6 +42,7 @@ func main() { flag.CommandLine.SetOutput(os.Stdout) flag.Usage = usage envflag.Parse() + pushmetrics.Init() buildinfo.Init() logger.Init() diff --git a/app/vmbackupmanager/README.md b/app/vmbackupmanager/README.md index 1b3f075d4..0645145fe 100644 --- a/app/vmbackupmanager/README.md +++ b/app/vmbackupmanager/README.md @@ -238,6 +238,14 @@ vmbackupmanager performs regular backups according to the provided configs. Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -runOnStart Upload backups immediately after start of the service. Otherwise the backup starts on new hour -s3ForcePathStyle diff --git a/app/vmgateway/README.md b/app/vmgateway/README.md index 52924d771..2bebda6ba 100644 --- a/app/vmgateway/README.md +++ b/app/vmgateway/README.md @@ -281,6 +281,14 @@ The shortlist of configuration flags include the following: Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -ratelimit.config string path for configuration file. Accepts url address -ratelimit.configCheckInterval duration diff --git a/app/vmrestore/README.md b/app/vmrestore/README.md index 73985e440..1551d2549 100644 --- a/app/vmrestore/README.md +++ b/app/vmrestore/README.md @@ -141,6 +141,14 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -s3ForcePathStyle Prefixing endpoint with bucket name when set false, true by default. (default true) -skipBackupCompleteCheck diff --git a/app/vmrestore/main.go b/app/vmrestore/main.go index 1c74b3d89..295229648 100644 --- a/app/vmrestore/main.go +++ b/app/vmrestore/main.go @@ -14,6 +14,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics" ) var ( @@ -33,6 +34,7 @@ func main() { flag.CommandLine.SetOutput(os.Stdout) flag.Usage = usage envflag.Parse() + pushmetrics.Init() buildinfo.Init() logger.Init() diff --git a/app/vmselect/promql/binary_op.go b/app/vmselect/promql/binary_op.go index 699f54d18..8e78664ff 100644 --- a/app/vmselect/promql/binary_op.go +++ b/app/vmselect/promql/binary_op.go @@ -36,9 +36,9 @@ var binaryOpFuncs = map[string]binaryOpFunc{ "unless": binaryOpUnless, // New ops - "if": newBinaryOpArithFunc(binaryop.If), - "ifnot": newBinaryOpArithFunc(binaryop.Ifnot), - "default": newBinaryOpArithFunc(binaryop.Default), + "if": binaryOpIf, + "ifnot": binaryOpIfnot, + "default": binaryOpDefault, } func getBinaryOpFunc(op string) binaryOpFunc { @@ -86,17 +86,6 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp right := bfa.right op := bfa.be.Op switch true { - case op == "ifnot": - left = removeEmptySeries(left) - // Do not remove empty series on the right side, - // so the left-side series could be matched against them. - case op == "default": - // Do not remove empty series on the left and the right side, - // since this may lead to missing result: - // - if empty time series are removed on the left side, - // then they won't be substituted by time series from the right side. - // - if empty time series are removed on the right side, - // then this may result in missing time series from the left side. case metricsql.IsBinaryOpCmp(op): // Do not remove empty series for comparison operations, // since this may lead to missing result. @@ -136,7 +125,7 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp func adjustBinaryOpTags(be *metricsql.BinaryOpExpr, left, right []*timeseries) ([]*timeseries, []*timeseries, []*timeseries, error) { if len(be.GroupModifier.Op) == 0 && len(be.JoinModifier.Op) == 0 { - if isScalar(left) && be.Op != "default" && be.Op != "if" && be.Op != "ifnot" { + if isScalar(left) { // Fast path: `scalar op vector` rvsLeft := make([]*timeseries, len(right)) tsLeft := left[0] @@ -324,14 +313,23 @@ func resetMetricGroupIfRequired(be *metricsql.BinaryOpExpr, ts *timeseries) { // Do not reset MetricGroup for non-boolean `compare` binary ops like Prometheus does. return } - switch be.Op { - case "default", "if", "ifnot": - // Do not reset MetricGroup for these ops. - return - } ts.MetricName.ResetMetricGroup() } +func binaryOpIf(bfa *binaryOpFuncArg) ([]*timeseries, error) { + mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right) + var rvs []*timeseries + for k, tssLeft := range mLeft { + tssRight := seriesByKey(mRight, k) + if tssRight == nil { + continue + } + tssLeft = addRightNaNsToLeft(tssLeft, tssRight) + rvs = append(rvs, tssLeft...) + } + return rvs, nil +} + func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) { mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right) var rvs []*timeseries @@ -340,24 +338,47 @@ func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) { if tssLeft == nil { continue } - // Add gaps to tssLeft if there are gaps at tssRight. - for _, tsLeft := range tssLeft { - valuesLeft := tsLeft.Values - for i := range valuesLeft { - hasValue := false - for _, tsRight := range tssRight { - if !math.IsNaN(tsRight.Values[i]) { - hasValue = true - break - } - } - if !hasValue { - valuesLeft[i] = nan + tssLeft = addRightNaNsToLeft(tssLeft, tssRight) + rvs = append(rvs, tssLeft...) + } + return rvs, nil +} + +func addRightNaNsToLeft(tssLeft, tssRight []*timeseries) []*timeseries { + for _, tsLeft := range tssLeft { + valuesLeft := tsLeft.Values + for i := range valuesLeft { + hasValue := false + for _, tsRight := range tssRight { + if !math.IsNaN(tsRight.Values[i]) { + hasValue = true + break } } + if !hasValue { + valuesLeft[i] = nan + } } - tssLeft = removeEmptySeries(tssLeft) + } + return removeEmptySeries(tssLeft) +} + +func binaryOpDefault(bfa *binaryOpFuncArg) ([]*timeseries, error) { + mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right) + var rvs []*timeseries + if len(mLeft) == 0 { + for _, tss := range mRight { + rvs = append(rvs, tss...) + } + return rvs, nil + } + for k, tssLeft := range mLeft { rvs = append(rvs, tssLeft...) + tssRight := seriesByKey(mRight, k) + if tssRight == nil { + continue + } + fillLeftNaNsWithRightValues(tssLeft, tssRight) } return rvs, nil } @@ -374,24 +395,43 @@ func binaryOpOr(bfa *binaryOpFuncArg) ([]*timeseries, error) { rvs = append(rvs, tssRight...) continue } - // Fill gaps in tssLeft with values from tssRight as Prometheus does. - // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/552 - for _, tsLeft := range tssLeft { - valuesLeft := tsLeft.Values - for i, v := range valuesLeft { - if !math.IsNaN(v) { - continue - } - for _, tsRight := range tssRight { - vRight := tsRight.Values[i] - if !math.IsNaN(vRight) { - valuesLeft[i] = vRight - break - } + fillLeftNaNsWithRightValues(tssLeft, tssRight) + } + return rvs, nil +} + +func fillLeftNaNsWithRightValues(tssLeft, tssRight []*timeseries) { + // Fill gaps in tssLeft with values from tssRight as Prometheus does. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/552 + for _, tsLeft := range tssLeft { + valuesLeft := tsLeft.Values + for i, v := range valuesLeft { + if !math.IsNaN(v) { + continue + } + for _, tsRight := range tssRight { + vRight := tsRight.Values[i] + if !math.IsNaN(vRight) { + valuesLeft[i] = vRight + break } } } } +} + +func binaryOpIfnot(bfa *binaryOpFuncArg) ([]*timeseries, error) { + mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right) + var rvs []*timeseries + for k, tssLeft := range mLeft { + tssRight := seriesByKey(mRight, k) + if tssRight == nil { + rvs = append(rvs, tssLeft...) + continue + } + tssLeft = addLeftNaNsIfNoRightNaNs(tssLeft, tssRight) + rvs = append(rvs, tssLeft...) + } return rvs, nil } @@ -404,24 +444,44 @@ func binaryOpUnless(bfa *binaryOpFuncArg) ([]*timeseries, error) { rvs = append(rvs, tssLeft...) continue } - // Add gaps to tssLeft if the are no gaps at tssRight. - for _, tsLeft := range tssLeft { - valuesLeft := tsLeft.Values - for i := range valuesLeft { - for _, tsRight := range tssRight { - if !math.IsNaN(tsRight.Values[i]) { - valuesLeft[i] = nan - break - } - } - } - } - tssLeft = removeEmptySeries(tssLeft) + tssLeft = addLeftNaNsIfNoRightNaNs(tssLeft, tssRight) rvs = append(rvs, tssLeft...) } return rvs, nil } +func addLeftNaNsIfNoRightNaNs(tssLeft, tssRight []*timeseries) []*timeseries { + for _, tsLeft := range tssLeft { + valuesLeft := tsLeft.Values + for i := range valuesLeft { + for _, tsRight := range tssRight { + if !math.IsNaN(tsRight.Values[i]) { + valuesLeft[i] = nan + break + } + } + } + } + return removeEmptySeries(tssLeft) +} + +func seriesByKey(m map[string][]*timeseries, key string) []*timeseries { + tss := m[key] + if tss != nil { + return tss + } + if len(m) != 1 { + return nil + } + for _, tss := range m { + if isScalar(tss) { + return tss + } + return nil + } + return nil +} + func createTimeseriesMapByTagSet(be *metricsql.BinaryOpExpr, left, right []*timeseries) (map[string][]*timeseries, map[string][]*timeseries) { groupTags := be.GroupModifier.Args groupOp := strings.ToLower(be.GroupModifier.Op) diff --git a/app/vmselect/promql/eval.go b/app/vmselect/promql/eval.go index 400ebdcdb..e0b2d9a6e 100644 --- a/app/vmselect/promql/eval.go +++ b/app/vmselect/promql/eval.go @@ -382,7 +382,64 @@ func evalBinaryOp(qt *querytracer.Tracer, ec *EvalConfig, be *metricsql.BinaryOp return rv, nil } +func canPushdownCommonFilters(be *metricsql.BinaryOpExpr) bool { + switch strings.ToLower(be.Op) { + case "or", "default": + return false + } + if isAggrFuncWithoutGrouping(be.Left) || isAggrFuncWithoutGrouping(be.Right) { + return false + } + return true +} + +func isAggrFuncWithoutGrouping(e metricsql.Expr) bool { + afe, ok := e.(*metricsql.AggrFuncExpr) + if !ok { + return false + } + return len(afe.Modifier.Args) == 0 +} + func execBinaryOpArgs(qt *querytracer.Tracer, ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *metricsql.BinaryOpExpr) ([]*timeseries, []*timeseries, error) { + if !canPushdownCommonFilters(be) { + // Execute exprFirst and exprSecond in parallel, since it is impossible to pushdown common filters + // from exprFirst to exprSecond. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2886 + qt = qt.NewChild("execute left and right sides of %q in parallel", be.Op) + defer qt.Done() + var wg sync.WaitGroup + + var tssFirst []*timeseries + var errFirst error + qtFirst := qt.NewChild("expr1") + wg.Add(1) + go func() { + defer wg.Done() + tssFirst, errFirst = evalExpr(qtFirst, ec, exprFirst) + qtFirst.Done() + }() + + var tssSecond []*timeseries + var errSecond error + qtSecond := qt.NewChild("expr2") + wg.Add(1) + go func() { + defer wg.Done() + tssSecond, errSecond = evalExpr(qtSecond, ec, exprSecond) + qtSecond.Done() + }() + + wg.Wait() + if errFirst != nil { + return nil, nil, errFirst + } + if errSecond != nil { + return nil, nil, errFirst + } + return tssFirst, tssSecond, nil + } + // Execute binary operation in the following way: // // 1) execute the exprFirst @@ -410,15 +467,9 @@ func execBinaryOpArgs(qt *querytracer.Tracer, ec *EvalConfig, exprFirst, exprSec if err != nil { return nil, nil, err } - switch strings.ToLower(be.Op) { - case "or": - // Do not pushdown common label filters from tssFirst for `or` operation, since this can filter out the needed time series from tssSecond. - // See https://prometheus.io/docs/prometheus/latest/querying/operators/#logical-set-binary-operators for details. - default: - lfs := getCommonLabelFilters(tssFirst) - lfs = metricsql.TrimFiltersByGroupModifier(lfs, be) - exprSecond = metricsql.PushdownBinaryOpFilters(exprSecond, lfs) - } + lfs := getCommonLabelFilters(tssFirst) + lfs = metricsql.TrimFiltersByGroupModifier(lfs, be) + exprSecond = metricsql.PushdownBinaryOpFilters(exprSecond, lfs) tssSecond, err := evalExpr(qt, ec, exprSecond) if err != nil { return nil, nil, err diff --git a/app/vmselect/promql/exec_test.go b/app/vmselect/promql/exec_test.go index 350c1e35f..4727d98cb 100644 --- a/app/vmselect/promql/exec_test.go +++ b/app/vmselect/promql/exec_test.go @@ -2803,7 +2803,12 @@ func TestExecSuccess(t *testing.T) { t.Run(`scalar default vector1`, func(t *testing.T) { t.Parallel() q := `time() > 1400 default label_set(123, "foo", "bar")` - resultExpected := []netstorage.Result{} + r := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{nan, nan, nan, 1600, 1800, 2000}, + Timestamps: timestampsExpected, + } + resultExpected := []netstorage.Result{r} f(q, resultExpected) }) t.Run(`scalar default vector2`, func(t *testing.T) { @@ -6092,7 +6097,18 @@ func TestExecSuccess(t *testing.T) { t.Run(`ifnot-no-matching-timeseries`, func(t *testing.T) { t.Parallel() q := `label_set(time(), "foo", "bar") ifnot label_set(time() > 1400, "x", "y")` - resultExpected := []netstorage.Result{} + r := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{1000, 1200, 1400, 1600, 1800, 2000}, + Timestamps: timestampsExpected, + } + r.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + } + resultExpected := []netstorage.Result{r} f(q, resultExpected) }) t.Run(`quantile(-2)`, func(t *testing.T) { diff --git a/app/vmselect/promql/transform.go b/app/vmselect/promql/transform.go index a893e217e..c83616dd1 100644 --- a/app/vmselect/promql/transform.go +++ b/app/vmselect/promql/transform.go @@ -2183,17 +2183,13 @@ func transformTimezoneOffset(tfa *transformFuncArg) ([]*timeseries, error) { return nil, fmt.Errorf("cannot load timezone %q: %w", tzString, err) } - var ts timeseries - ts.denyReuse = true - timestamps := tfa.ec.getSharedTimestamps() - values := make([]float64, len(timestamps)) - for i, v := range timestamps { - _, offset := time.Unix(v/1000, 0).In(loc).Zone() - values[i] = float64(offset) + tss := evalNumber(tfa.ec, nan) + ts := tss[0] + for i, timestamp := range ts.Timestamps { + _, offset := time.Unix(timestamp/1000, 0).In(loc).Zone() + ts.Values[i] = float64(offset) } - ts.Values = values - ts.Timestamps = timestamps - return []*timeseries{&ts}, nil + return tss, nil } func transformTime(tfa *transformFuncArg) ([]*timeseries, error) { diff --git a/app/vmui/Dockerfile-web b/app/vmui/Dockerfile-web index 2575514d0..4daf7aa0f 100644 --- a/app/vmui/Dockerfile-web +++ b/app/vmui/Dockerfile-web @@ -6,7 +6,7 @@ COPY web/ /build/ RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \ GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/ -FROM alpine:3.16.0 +FROM alpine:3.16.1 USER root COPY --from=build-web-stage /build/web-amd64 /app/web diff --git a/dashboards/vmagent.json b/dashboards/vmagent.json index 0c4b8a610..c292ede53 100644 --- a/dashboards/vmagent.json +++ b/dashboards/vmagent.json @@ -6,7 +6,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "8.5.3" + "version": "8.4.4" }, { "type": "panel", @@ -61,12 +61,12 @@ } ] }, - "description": "Overview for VictoriaMetrics vmagent v1.73.0 or higher", + "description": "Overview for VictoriaMetrics vmagent v1.80.0 or higher", "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 1, "id": null, - "iteration": 1656943336787, + "iteration": 1657810604530, "links": [ { "icon": "doc", @@ -154,7 +154,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "targets": [ { "expr": "sum(vm_promscrape_targets{job=~\"$job\", instance=~\"$instance\", status=\"up\"})", @@ -218,7 +218,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "targets": [ { "expr": "sum(vm_promscrape_targets{job=~\"$job\", instance=~\"$instance\", status=\"down\"})", @@ -285,7 +285,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "targets": [ { "expr": "sum(increase(vm_log_messages_total{job=~\"$job\", instance=~\"$instance\", level!=\"info\"}[30m]))", @@ -344,7 +344,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "targets": [ { "expr": "sum(vm_persistentqueue_bytes_pending{job=~\"$job\", instance=~\"$instance\"})", @@ -490,7 +490,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -589,7 +589,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -702,7 +702,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -805,7 +805,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -946,7 +946,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -1039,7 +1039,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -1138,7 +1138,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -1237,7 +1237,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -1344,7 +1344,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2457,7 +2457,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4 + "y": 43 }, "hiddenSeries": false, "id": 60, @@ -2480,7 +2480,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2555,7 +2555,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4 + "y": 43 }, "hiddenSeries": false, "id": 66, @@ -2578,7 +2578,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2652,7 +2652,7 @@ "h": 8, "w": 12, "x": 0, - "y": 12 + "y": 51 }, "hiddenSeries": false, "id": 61, @@ -2675,7 +2675,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2748,7 +2748,7 @@ "h": 8, "w": 12, "x": 12, - "y": 12 + "y": 51 }, "hiddenSeries": false, "id": 65, @@ -2771,7 +2771,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2837,7 +2837,7 @@ "h": 8, "w": 12, "x": 0, - "y": 20 + "y": 59 }, "heatmap": {}, "hideZeroBuckets": false, @@ -2881,9 +2881,10 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$ds" }, - "description": "Shows saturation of every connection to remote storage. If the threshold of 0.9sec is reached, then the connection is saturated by more than 90% and vmagent won't be able to keep up. This usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase the number of connections per each remote storage.\n", + "description": "Shows saturation of every connection to remote storage. If the threshold of 90% is reached, then the connection is saturated (busy or slow) by more than 90%, so vmagent won't be able to keep up and can start buffering data. \n\nThis usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase the number of connections per each remote storage.\n", "fieldConfig": { "defaults": { "links": [] @@ -2896,7 +2897,7 @@ "h": 8, "w": 12, "x": 12, - "y": 20 + "y": 59 }, "hiddenSeries": false, "id": 84, @@ -2919,7 +2920,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2930,7 +2931,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(rate(vmagent_remotewrite_send_duration_seconds_total{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) by (instance, url)", + "expr": "sum(rate(vmagent_remotewrite_send_duration_seconds_total{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) by (instance, url)\n/\nmax(vmagent_remotewrite_queues{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}) by(instance, url)", "interval": "", "legendFormat": "", "refId": "A" @@ -2943,7 +2944,7 @@ "fill": true, "line": true, "op": "gt", - "value": 0.9, + "value": 90, "yaxis": "left" } ], @@ -2963,7 +2964,7 @@ "yaxes": [ { "$$hashKey": "object:662", - "format": "s", + "format": "percentunit", "logBase": 1, "min": "0", "show": true @@ -2997,7 +2998,7 @@ "h": 8, "w": 12, "x": 0, - "y": 28 + "y": 67 }, "heatmap": {}, "hideZeroBuckets": false, @@ -3053,7 +3054,7 @@ "h": 8, "w": 12, "x": 12, - "y": 28 + "y": 67 }, "heatmap": {}, "hideZeroBuckets": false, @@ -3104,7 +3105,7 @@ "h": 8, "w": 12, "x": 0, - "y": 36 + "y": 75 }, "hiddenSeries": false, "id": 88, @@ -3124,7 +3125,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3207,7 +3208,7 @@ "h": 8, "w": 12, "x": 12, - "y": 36 + "y": 75 }, "hiddenSeries": false, "id": 90, @@ -3227,7 +3228,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.5.3", + "pluginVersion": "8.4.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -4567,7 +4568,7 @@ } ], "refresh": "", - "schemaVersion": 36, + "schemaVersion": 35, "style": "dark", "tags": [ "vmagent", @@ -4577,7 +4578,9 @@ "list": [ { "current": { - "selected": false + "selected": true, + "text": "VM", + "value": "VM" }, "hide": 0, "includeAll": false, diff --git a/deployment/docker/Makefile b/deployment/docker/Makefile index 96c9aca1d..0479c8be4 100644 --- a/deployment/docker/Makefile +++ b/deployment/docker/Makefile @@ -2,8 +2,8 @@ DOCKER_NAMESPACE := victoriametrics -ROOT_IMAGE ?= alpine:3.16.0 -CERTS_IMAGE := alpine:3.16.0 +ROOT_IMAGE ?= alpine:3.16.1 +CERTS_IMAGE := alpine:3.16.1 GO_BUILDER_IMAGE := golang:1.18.4-alpine BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1 BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __) diff --git a/deployment/docker/alerts.yml b/deployment/docker/alerts.yml index 0feaf4590..6f34571c3 100644 --- a/deployment/docker/alerts.yml +++ b/deployment/docker/alerts.yml @@ -270,7 +270,9 @@ groups: Ensure that destination is up and reachable." - alert: RemoteWriteConnectionIsSaturated - expr: rate(vmagent_remotewrite_send_duration_seconds_total[5m]) > 0.9 + expr: | + sum(rate(vmagent_remotewrite_send_duration_seconds_total[5m])) by(job, instance, url) + > 0.9 * max(vmagent_remotewrite_queues) by(job, instance, url) for: 15m labels: severity: warning diff --git a/deployment/docker/docker-compose.yml b/deployment/docker/docker-compose.yml index e3a0c4bd5..b2bdcf991 100644 --- a/deployment/docker/docker-compose.yml +++ b/deployment/docker/docker-compose.yml @@ -40,7 +40,7 @@ services: restart: always grafana: container_name: grafana - image: grafana/grafana:9.0.2 + image: grafana/grafana:9.0.3 depends_on: - "victoriametrics" ports: diff --git a/docs/Articles.md b/docs/Articles.md index 056c23c47..7ac3dc6e5 100644 --- a/docs/Articles.md +++ b/docs/Articles.md @@ -9,6 +9,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html). ## Third-party articles and slides about VictoriaMetrics * [Optimizing the Storage of Large Volumes of Metrics for a Long Time in VictoriaMetrics](https://percona.community/blog/2022/06/02/long-time-keeping-metrics-victoriametrics/) +* [How do We Keep Metrics for a Long Time in VictoriaMetrics](https://www.youtube.com/watch?v=SGZjY7xgDwE) * [Announcing Asserts](https://www.asserts.ai/blog/announcing-asserts/) * [Choosing a Time Series Database for High Cardinality Aggregations](https://abiosgaming.com/press/high-cardinality-aggregations/) * [Scaling to trillions of metric data points](https://engineering.razorpay.com/scaling-to-trillions-of-metric-data-points-f569a5b654f2) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index c16d15623..c347be171 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -15,6 +15,15 @@ The following tip changes can be tested by building VictoriaMetrics components f ## tip +* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): execute left and right sides of certain operations in parallel. For example, `q1 or q2`, `aggr_func(q1) q2`, `q1 aggr_func(q1)`. This may improve query performance if VictoriaMetrics has enough free resources for parallel processing of both sides of the operation. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2886). +* FEATURE: [vmauth](https://docs.victoriametrics.com/vmagent.html): allow duplicate username records with different passwords at configuration file. It should allow password rotation without username change. +* FEATURE: add ability to push internal metrics (e.g. metrics exposed at `/metrics` page) to the configured remote storage from all the VictoriaMetrics components. See [these docs](https://docs.victoriametrics.com/#push-metrics). + +* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): restart all the scrape jobs during [config reload](https://docs.victoriametrics.com/vmagent.html#configuration-update) after `global` section is changed inside `-promscrape.config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884). +* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with AWS ECS credentials. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2875). Thanks to @transacid for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2876). +* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): return series from `q1` if `q2` doesn't return matching time series in the query `q1 ifnot q2`. Previously series from `q1` weren't returned in this case. + + ## [v1.79.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.0) Released at 14-07-2022 @@ -25,7 +34,9 @@ Released at 14-07-2022 **Update note 3:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics because of added ability to query `vmselect` data from other `vmselect` nodes - see [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup), so read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases at any time. -**Update note 4:** this release removes support of deprecated in [1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0) param `extra_filter_labels` from [vmalert's](https://docs.victoriametrics.com/vmalert.html) groups definition. This deprecated param was replaced with [params](https://docs.victoriametrics.com/vmalert.html#url-params). +**Update note 4:** this release removes support of deprecated in [1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0) param `extra_filter_labels` from [vmalert's](https://docs.victoriametrics.com/vmalert.html) groups definition. This deprecated param was replaced with [params](https://docs.victoriametrics.com/vmalert.html#url-params). + +**Update note 5:** this release changes naming for published linux binaries at [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Now names for binaries for all the supported platforms match the following template - `$(APP_NAME)-$(GOOS)-$(GOARCH)-$(VERSION).tar.gz`. For example, `victoria-metrics-linux-amd64-v1.79.0.tar.gz`. Previously linux binaries didn't have `$(GOOS)` part, e.g. they had the name `victoria-metrics-amd64-v1.79.0.tar.gz`. Please update automation scripts for upgrading VictoriaMetrics releases according to this change. * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add [azure_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#azure_sd_config) service discovery mechanism. It allows discovering Virtual Machines at [Azure Cloud](https://azure.microsoft.com/en-us/). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1364). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): deprecate alert's status link `/api/v1///status` in favour of `api/v1/alert?group_id=&alert_id="`. The old alert's status link is still supported, but will be removed in future releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2825). diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md index eb209e55e..86d237abc 100644 --- a/docs/Cluster-VictoriaMetrics.md +++ b/docs/Cluster-VictoriaMetrics.md @@ -648,6 +648,14 @@ Below is the output for `/path/to/vminsert -help`: Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -relabelConfig string Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal -relabelDebug @@ -778,6 +786,14 @@ Below is the output for `/path/to/vmselect -help`: Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -replicationFactor int How many copies of every time series is available on vmstorage nodes. See -replicationFactor command-line flag for vminsert nodes (default 1) -search.cacheTimestampOffset duration @@ -789,7 +805,7 @@ Below is the output for `/path/to/vmselect -help`: -search.graphiteMaxPointsPerSeries int The maximum number of points per series Graphite render API can return (default 1000000) -search.graphiteStorageStep duration - The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overriden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s) + The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overridden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s) -search.latencyOffset duration The time when data points become visible in query results after the collection. Too small value can result in incomplete last points for query results (default 30s) -search.logSlowQueryDuration duration @@ -962,6 +978,14 @@ Below is the output for `/path/to/vmstorage -help`: Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings -precisionBits int The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss (default 64) + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -retentionPeriod value Data with timestamps outside the retentionPeriod is automatically deleted The following optional suffixes are supported: h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 1) diff --git a/docs/FAQ.md b/docs/FAQ.md index b73c17ae9..3d748993c 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -255,9 +255,9 @@ Memory usage for VictoriaMetrics components can be tuned according to the follow * [Troubleshooting for vmagent](https://docs.victoriametrics.com/vmagent.html#troubleshooting) * [Troubleshooting for single-node VictoriaMetrics](https://docs.victoriametrics.com/#troubleshooting) -## How can I run VictoriaMetrics on FreeBSD? +## How can I run VictoriaMetrics on FreeBSD/OpenBSD? -VictoriaMetrics is included in FreeBSD ports, so just install it from there. See [this link](https://www.freebsd.org/cgi/ports.cgi?query=victoria&stype=all). +VictoriaMetrics is included in [OpenBSD](https://github.com/openbsd/ports/blob/c1bfea520bbb30d6e5f8d0f09115ace341f820d6/infrastructure/db/user.list#L383) and [FreeBSD](https://www.freebsd.org/cgi/ports.cgi?query=victoria&stype=all) ports so just install it from there or use pre-built binaries from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). ## Does VictoriaMetrics support the Graphite query language? diff --git a/docs/MetricsQL.md b/docs/MetricsQL.md index 41126fa27..2fb5f7cdb 100644 --- a/docs/MetricsQL.md +++ b/docs/MetricsQL.md @@ -782,7 +782,7 @@ sum by (__name__) ( #### label_transform -`label_transform(q, "label", "regexp", "replacement")` substitutes all the `regexp` occurences by the given `replacement` in the given `label`. +`label_transform(q, "label", "regexp", "replacement")` substitutes all the `regexp` occurrences by the given `replacement` in the given `label`. #### label_uppercase diff --git a/docs/README.md b/docs/README.md index 6211c543a..1e7dd0975 100644 --- a/docs/README.md +++ b/docs/README.md @@ -248,7 +248,7 @@ It is also safe downgrading to older versions unless [release notes](https://git The following steps must be performed during the upgrade / downgrade procedure: -* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it. +* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it. See [how to send signals to processes](https://stackoverflow.com/questions/33239959/send-signal-to-process-from-command-line). * Wait until the process stops. This can take a few seconds. * Start the upgraded VictoriaMetrics. @@ -411,7 +411,7 @@ and stream plain InfluxDB line protocol data to the configured TCP and/or UDP ad VictoriaMetrics performs the following transformations to the ingested InfluxDB data: * [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value - unless `db` tag exists in the InfluxDB line. The `db` label name can be overriden via `-influxDBLabel` command-line flag. + unless `db` tag exists in the InfluxDB line. The `db` label name can be overridden via `-influxDBLabel` command-line flag. * Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names. * Field values are mapped to time series values. * Tags are mapped to Prometheus labels as-is. @@ -704,7 +704,7 @@ VictoriaMetrics supports the following handlers from [Graphite Metrics API](http VictoriaMetrics accepts the following additional query args at `/metrics/find` and `/metrics/expand`: * `label` - for selecting arbitrary label values. By default `label=__name__`, i.e. metric names are selected. -* `delimiter` - for using different delimiters in metric name hierachy. For example, `/metrics/find?delimiter=_&query=node_*` would return all the metric name prefixes +* `delimiter` - for using different delimiters in metric name hierarchy. For example, `/metrics/find?delimiter=_&query=node_*` would return all the metric name prefixes that start with `node_`. By default `delimiter=.`. ### Graphite Tags API usage @@ -823,6 +823,7 @@ Send a request to `http://:8428/api/v1/admin/tsdb/delete_s where `` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for metrics to delete. After that all the time series matching the given selector are deleted. Storage space for the deleted time series isn't freed instantly - it is freed during subsequent [background merges of data files](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). + Note that background merges may never occur for data from previous months, so storage space won't be freed for historical data. In this case [forced merge](#forced-merge) may help freeing up storage space. @@ -845,6 +846,8 @@ Using the delete API is not recommended in the following cases, since it brings * Reducing disk space usage by deleting unneeded time series. This doesn't work as expected, since the deleted time series occupy disk space until the next merge operation, which can never occur when deleting too old data. [Forced merge](#forced-merge) may be used for freeing up disk space occupied by old data. + Note that VictoriaMetrics doesn't delete entries from inverted index (aka `indexdb`) for the deleted time series. + Inverted index is cleaned up once per the configured [retention](#retention). It's better to use the `-retentionPeriod` command-line flag for efficient pruning of old data. @@ -1136,7 +1139,7 @@ Extra labels may be added to all the imported metrics by passing `extra_label=na For example, `/api/v1/import/prometheus?extra_label=foo=bar` would add `{foo="bar"}` label to all the imported metrics. If timestamp is missing in ` ` Prometheus exposition format line, then the current timestamp is used during data ingestion. -It can be overriden by passing unix timestamp in *milliseconds* via `timestamp` query arg. For example, `/api/v1/import/prometheus?timestamp=1594370496905`. +It can be overridden by passing unix timestamp in *milliseconds* via `timestamp` query arg. For example, `/api/v1/import/prometheus?timestamp=1594370496905`. VictoriaMetrics accepts arbitrary number of lines in a single request to `/api/v1/import/prometheus`, i.e. it supports data streaming. @@ -1629,6 +1632,20 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html). +## Push metrics + +All the VictoriaMetrics apps support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format. This can be done by specifying the following command-line flags: + +* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format). The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls. The url can contain basic auth params in the form http://user:pass@hostname/api/v1/import/prometheus . +* `-pushmetrics.interval` - the interval between pushes. By default it is set to 10 seconds. +* `-pushmetrics.extraLabel` - label to add to all the metrics before sending them to `-pushmetrics.url`. The label must be specified in the format `label="value"`. It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels are added to all the metrics sending them to `-pushmetrics.url`. + +For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus` with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels are added to all the metrics before sending them to the remote storage: + +```console +/path/to/victoria-metrics -pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus -pushmetrics.extraLabel='instance="foobar",job="vm"' +``` + ## Cache removal VictoriaMetrics uses various internal caches. These caches are stored to `<-storageDataPath>/cache` directory during graceful shutdown (e.g. when VictoriaMetrics is stopped by sending `SIGINT` signal). The caches are read on the next VictoriaMetrics startup. Sometimes it is needed to remove such caches on the next startup. This can be performed by placing `reset_cache_on_startup` file inside the `<-storageDataPath>/cache` directory before the restart of VictoriaMetrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447) for details. @@ -1745,7 +1762,7 @@ For accessing vmalert's UI through single-node VictoriaMetrics configure `-vmale Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting the best parts of their product, while highlighting the worst parts of competing products. -So we encourage users and all independent third parties to conduct their becnhmarks for various products +So we encourage users and all independent third parties to conduct their benchmarks for various products they are evaluating in production and publish the results. As a reference, please see [benchmarks](https://docs.victoriametrics.com/Articles.html#benchmarks) conducted by @@ -2083,6 +2100,14 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay -promscrape.suppressScrapeErrorsDelay duration The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -relabelConfig string Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal -relabelDebug @@ -2101,7 +2126,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li -search.graphiteMaxPointsPerSeries int The maximum number of points per series Graphite render API can return (default 1000000) -search.graphiteStorageStep duration - The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overriden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s) + The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overridden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s) -search.latencyOffset duration The time when data points become visible in query results after the collection. Too small value can result in incomplete last points for query results (default 30s) -search.logSlowQueryDuration duration diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index 772adeafb..1ab5670b6 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -252,7 +252,7 @@ It is also safe downgrading to older versions unless [release notes](https://git The following steps must be performed during the upgrade / downgrade procedure: -* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it. +* Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it. See [how to send signals to processes](https://stackoverflow.com/questions/33239959/send-signal-to-process-from-command-line). * Wait until the process stops. This can take a few seconds. * Start the upgraded VictoriaMetrics. @@ -415,7 +415,7 @@ and stream plain InfluxDB line protocol data to the configured TCP and/or UDP ad VictoriaMetrics performs the following transformations to the ingested InfluxDB data: * [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value - unless `db` tag exists in the InfluxDB line. The `db` label name can be overriden via `-influxDBLabel` command-line flag. + unless `db` tag exists in the InfluxDB line. The `db` label name can be overridden via `-influxDBLabel` command-line flag. * Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names. * Field values are mapped to time series values. * Tags are mapped to Prometheus labels as-is. @@ -708,7 +708,7 @@ VictoriaMetrics supports the following handlers from [Graphite Metrics API](http VictoriaMetrics accepts the following additional query args at `/metrics/find` and `/metrics/expand`: * `label` - for selecting arbitrary label values. By default `label=__name__`, i.e. metric names are selected. -* `delimiter` - for using different delimiters in metric name hierachy. For example, `/metrics/find?delimiter=_&query=node_*` would return all the metric name prefixes +* `delimiter` - for using different delimiters in metric name hierarchy. For example, `/metrics/find?delimiter=_&query=node_*` would return all the metric name prefixes that start with `node_`. By default `delimiter=.`. ### Graphite Tags API usage @@ -827,6 +827,7 @@ Send a request to `http://:8428/api/v1/admin/tsdb/delete_s where `` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for metrics to delete. After that all the time series matching the given selector are deleted. Storage space for the deleted time series isn't freed instantly - it is freed during subsequent [background merges of data files](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). + Note that background merges may never occur for data from previous months, so storage space won't be freed for historical data. In this case [forced merge](#forced-merge) may help freeing up storage space. @@ -849,6 +850,8 @@ Using the delete API is not recommended in the following cases, since it brings * Reducing disk space usage by deleting unneeded time series. This doesn't work as expected, since the deleted time series occupy disk space until the next merge operation, which can never occur when deleting too old data. [Forced merge](#forced-merge) may be used for freeing up disk space occupied by old data. + Note that VictoriaMetrics doesn't delete entries from inverted index (aka `indexdb`) for the deleted time series. + Inverted index is cleaned up once per the configured [retention](#retention). It's better to use the `-retentionPeriod` command-line flag for efficient pruning of old data. @@ -1140,7 +1143,7 @@ Extra labels may be added to all the imported metrics by passing `extra_label=na For example, `/api/v1/import/prometheus?extra_label=foo=bar` would add `{foo="bar"}` label to all the imported metrics. If timestamp is missing in ` ` Prometheus exposition format line, then the current timestamp is used during data ingestion. -It can be overriden by passing unix timestamp in *milliseconds* via `timestamp` query arg. For example, `/api/v1/import/prometheus?timestamp=1594370496905`. +It can be overridden by passing unix timestamp in *milliseconds* via `timestamp` query arg. For example, `/api/v1/import/prometheus?timestamp=1594370496905`. VictoriaMetrics accepts arbitrary number of lines in a single request to `/api/v1/import/prometheus`, i.e. it supports data streaming. @@ -1633,6 +1636,20 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html). +## Push metrics + +All the VictoriaMetrics apps support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format. This can be done by specifying the following command-line flags: + +* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format). The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls. The url can contain basic auth params in the form http://user:pass@hostname/api/v1/import/prometheus . +* `-pushmetrics.interval` - the interval between pushes. By default it is set to 10 seconds. +* `-pushmetrics.extraLabel` - label to add to all the metrics before sending them to `-pushmetrics.url`. The label must be specified in the format `label="value"`. It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels are added to all the metrics sending them to `-pushmetrics.url`. + +For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus` with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels are added to all the metrics before sending them to the remote storage: + +```console +/path/to/victoria-metrics -pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus -pushmetrics.extraLabel='instance="foobar",job="vm"' +``` + ## Cache removal VictoriaMetrics uses various internal caches. These caches are stored to `<-storageDataPath>/cache` directory during graceful shutdown (e.g. when VictoriaMetrics is stopped by sending `SIGINT` signal). The caches are read on the next VictoriaMetrics startup. Sometimes it is needed to remove such caches on the next startup. This can be performed by placing `reset_cache_on_startup` file inside the `<-storageDataPath>/cache` directory before the restart of VictoriaMetrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447) for details. @@ -1749,7 +1766,7 @@ For accessing vmalert's UI through single-node VictoriaMetrics configure `-vmale Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting the best parts of their product, while highlighting the worst parts of competing products. -So we encourage users and all independent third parties to conduct their becnhmarks for various products +So we encourage users and all independent third parties to conduct their benchmarks for various products they are evaluating in production and publish the results. As a reference, please see [benchmarks](https://docs.victoriametrics.com/Articles.html#benchmarks) conducted by @@ -2087,6 +2104,14 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay -promscrape.suppressScrapeErrorsDelay duration The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -relabelConfig string Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal -relabelDebug @@ -2105,7 +2130,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li -search.graphiteMaxPointsPerSeries int The maximum number of points per series Graphite render API can return (default 1000000) -search.graphiteStorageStep duration - The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overriden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s) + The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overridden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s) -search.latencyOffset duration The time when data points become visible in query results after the collection. Too small value can result in incomplete last points for query results (default 30s) -search.logSlowQueryDuration duration diff --git a/docs/Troubleshooting.md b/docs/Troubleshooting.md index e8e907db5..2e05fa4b4 100644 --- a/docs/Troubleshooting.md +++ b/docs/Troubleshooting.md @@ -50,6 +50,9 @@ If you see unexpected or unreliable query results from VictoriaMetrics, then try instead of raw samples stored in VictoriaMetrics. See [these docs](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness) for details. + If you migrate from InfluxDB, then pass `-search.setLookbackToStep` command-line flag to single-node VictoriaMetrics + or to `vmselect` in VictoriaMetrics cluster. See also [how to migrate from InfluxDB to VictoriaMetrics](https://docs.victoriametrics.com/guides/migrate-from-influx.html). + 3. Sometimes response caching may lead to unexpected results when samples with older timestamps are ingested into VictoriaMetrics (aka [backfilling](https://docs.victoriametrics.com/#backfilling)). Try disabling response cache and see whether this helps. This can be done in the following ways: @@ -151,7 +154,7 @@ There are the following most commons reasons for slow data ingestion in Victoria - If the percentage of free CPU is close to 0, then VictoriaMetrics may experience arbitrary long delays during data ingestion when it cannot keep up - with the data ingestion rate. + with slightly increased data ingestion rate. - If the percentage of free memory reaches 0, then the Operating System where VictoriaMetrics components run, may have no enough memory for [page cache](https://en.wikipedia.org/wiki/Page_cache). @@ -165,8 +168,8 @@ There are the following most commons reasons for slow data ingestion in Victoria which, in turn, slows down both data ingestion and querying. See [these docs](https://docs.victoriametrics.com/#storage) for details. 4. If you run cluster version of VictoriaMetrics, then make sure `vminsert` and `vmstorage` components - are located in the same network with short network latency between them. - `vminsert` packs incoming data into in-memory packets and sends them to `vmstorage` on-by-one. + are located in the same network with small network latency between them. + `vminsert` packs incoming data into batch packets and sends them to `vmstorage` on-by-one. It waits until `vmstorage` returns back `ack` response before sending the next packet. If the network latency between `vminsert` and `vmstorage` is high (for example, if they run in different datacenters), then this may become limiting factor for data ingestion speed. @@ -196,11 +199,11 @@ There are the following solutions exist for slow queries: - Adding more CPU and memory to VictoriaMetrics, so it may perform the slow query faster. If you use cluster version of VictoriaMetrics, then migration of `vmselect` nodes to machines - with more CPU and RAM should help improving speed for slow queries. Remember that query performance - is always limited by resources of one vmselect which processes the query. For example, if 2vCPU on vmselect - isn't enough to process query fast enough, then prefer vertical scaling to horizontal for vmselects. - If on [ffficial Grafana dashboard for cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring) - panel `Concurrent selects` is close to the limit, then prefer horizontal scaling for vmselects. + with more CPU and RAM should help improving speed for slow queries. Query performance + is always limited by resources of one vmselect which processes the query. For example, if 2vCPU cores on `vmselect` + isn't enough to process query fast enough, then migrating `vmselect` to a machine with 4vCPU cores should increase heavy query performance by up to 2x. + If the line on `Concurrent select` graph form the [official Grafana dashboard for VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring) + is close to the limit, then prefer adding more `vmselect` nodes to the cluster. Sometimes adding more `vmstorage` nodes also can help improving the speed for slow queries. - Rewriting slow queries, so they become faster. Unfortunately it is hard determining @@ -211,6 +214,21 @@ There are the following solutions exist for slow queries: which explains how to determine and optimize slow queries. In practice many slow queries are generated because of improper use of [subqueries](https://docs.victoriametrics.com/MetricsQL.html#subqueries). + It is recommended avoiding subqueries if you don't understand clearly how they work. + It is easy to create a subquery without knowing about it. + For example, `rate(sum(some_metric))` is implicitly transformed into the following subquery + according to [implicit conversion rules for MetricsQL queries](https://docs.victoriametrics.com/MetricsQL.html#implicit-query-conversions): + + ```metricsql + rate( + sum( + default_rollup(some_metric[1i]) + )[1i:1i] + ) + ``` + + It is likely this query won't return the expected results. Instead, `sum(rate(some_metric))` must be used instead. + See [this article](https://www.robustperception.io/rate-then-sum-never-sum-then-rate/) for more details. ## Out of memory errors diff --git a/docs/vmagent.md b/docs/vmagent.md index 96f202c9e..13233d0dc 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -126,7 +126,7 @@ Please see [these docs](#relabeling) for details. ### Splitting data streams among multiple systems -`vmagent` supports splitting the collected data between muliple destinations with the help of `-remoteWrite.urlRelabelConfig`, +`vmagent` supports splitting the collected data between multiple destinations with the help of `-remoteWrite.urlRelabelConfig`, which is applied independently for each configured `-remoteWrite.url` destination. For example, it is possible to replicate or split data among long-term remote storage, short-term remote storage and a real-time analytical system [built on top of Kafka](https://github.com/Telefonica/prometheus-kafka-adapter). Note that each destination can receive it's own subset of the collected data due to per-destination relabeling via `-remoteWrite.urlRelabelConfig`. @@ -140,7 +140,7 @@ Also, Basic Auth can be enabled for the incoming `remote_write` requests with `- ### remote_write for clustered version -While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes are always peformed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html), `-remoteWrite.url` the command-line flag should be configured as `://:8480/insert//prometheus/api/v1/write` according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). There is also support for multitenant writes. See [these docs](#multitenancy). +While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes are always performed in Promethes remote_write protocol. Therefore for the [clustered version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html), `-remoteWrite.url` the command-line flag should be configured as `://:8480/insert//prometheus/api/v1/write` according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). There is also support for multitenant writes. See [these docs](#multitenancy). ## Multitenancy @@ -336,7 +336,7 @@ The following articles contain useful information about Prometheus relabeling: VictoriaMetrics provides the following additional relabeling actions on top of standard actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config): -* `replace_all` replaces all of the occurences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`): +* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`): ```yaml - action: replace_all @@ -346,7 +346,7 @@ VictoriaMetrics provides the following additional relabeling actions on top of s replacement: "_" ``` -* `labelmap_all` replaces all of the occurences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`): +* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurrences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`): ```yaml - action: labelmap_all @@ -1083,6 +1083,14 @@ See the docs at https://docs.victoriametrics.com/vmagent.html . Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay -promscrape.suppressScrapeErrorsDelay duration The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -remoteWrite.aws.accessKey array Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set Supports an array of values separated by comma or specified via multiple flags. diff --git a/docs/vmalert.md b/docs/vmalert.md index 6e3ca4fc1..7c5eb39a7 100644 --- a/docs/vmalert.md +++ b/docs/vmalert.md @@ -791,6 +791,14 @@ The shortlist of configuration flags is the following: The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s) -promscrape.dnsSDCheckInterval duration Interval for checking for changes in dns. This works only if dns_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details (default 30s) + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -remoteRead.basicAuth.password string Optional basic auth password for -remoteRead.url -remoteRead.basicAuth.passwordFile string diff --git a/docs/vmauth.md b/docs/vmauth.md index 86bf68b69..5a7601d11 100644 --- a/docs/vmauth.md +++ b/docs/vmauth.md @@ -40,7 +40,8 @@ Each `url_prefix` in the [-auth.config](#auth-config) may contain either a singl ```yml # Arbitrary number of usernames may be put here. -# Username and bearer_token values must be unique. +# It is possible to set multiple identical usernames with different passwords. +# Such usernames can be differentiated by `name` option. users: # Requests with the 'Authorization: Bearer XXXX' and 'Authorization: Token XXXX' @@ -291,6 +292,14 @@ See the docs at https://docs.victoriametrics.com/vmauth.html . Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -reloadAuthKey string Auth key for /-/reload http endpoint. It must be passed as authKey=... -tls diff --git a/docs/vmbackup.md b/docs/vmbackup.md index a4573a807..76956a0a5 100644 --- a/docs/vmbackup.md +++ b/docs/vmbackup.md @@ -243,6 +243,14 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time- Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -s3ForcePathStyle Prefixing endpoint with bucket name when set false, true by default. (default true) -snapshot.createURL string diff --git a/docs/vmbackupmanager.md b/docs/vmbackupmanager.md index d1686c426..91f27ed36 100644 --- a/docs/vmbackupmanager.md +++ b/docs/vmbackupmanager.md @@ -242,6 +242,14 @@ vmbackupmanager performs regular backups according to the provided configs. Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -runOnStart Upload backups immediately after start of the service. Otherwise the backup starts on new hour -s3ForcePathStyle diff --git a/docs/vmgateway.md b/docs/vmgateway.md index f697c46f2..0657bffe4 100644 --- a/docs/vmgateway.md +++ b/docs/vmgateway.md @@ -285,6 +285,14 @@ The shortlist of configuration flags include the following: Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -ratelimit.config string path for configuration file. Accepts url address -ratelimit.configCheckInterval duration diff --git a/docs/vmrestore.md b/docs/vmrestore.md index 009bc4be7..bd87fa364 100644 --- a/docs/vmrestore.md +++ b/docs/vmrestore.md @@ -145,6 +145,14 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings -pprofAuthKey string Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings + -pushmetrics.extraLabels array + Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url + Supports an array of values separated by comma or specified via multiple flags. + -pushmetrics.interval duration + Interval for pushing metrics to -pushmetrics.url (default 10s) + -pushmetrics.url array + Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage + Supports an array of values separated by comma or specified via multiple flags. -s3ForcePathStyle Prefixing endpoint with bucket name when set false, true by default. (default true) -skipBackupCompleteCheck diff --git a/go.mod b/go.mod index 485768b8a..db8eaf7cd 100644 --- a/go.mod +++ b/go.mod @@ -9,9 +9,9 @@ require ( // Do not use the original github.com/valyala/fasthttp because of issues // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b github.com/VictoriaMetrics/fasthttp v1.1.0 - github.com/VictoriaMetrics/metrics v1.18.1 + github.com/VictoriaMetrics/metrics v1.19.3 github.com/VictoriaMetrics/metricsql v0.44.1 - github.com/aws/aws-sdk-go v1.44.53 + github.com/aws/aws-sdk-go v1.44.56 github.com/cespare/xxhash/v2 v2.1.2 github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect @@ -37,7 +37,7 @@ require ( github.com/valyala/quicktemplate v1.7.0 golang.org/x/net v0.0.0-20220708220712-1185a9018129 golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 - golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 google.golang.org/api v0.87.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -76,7 +76,7 @@ require ( golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d // indirect + google.golang.org/genproto v0.0.0-20220715211116-798f69b842b9 // indirect google.golang.org/grpc v1.48.0 // indirect google.golang.org/protobuf v1.28.0 // indirect ) diff --git a/go.sum b/go.sum index aaae38606..2d1f1a074 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,9 @@ github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40wo github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0= github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ= -github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0= github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA= +github.com/VictoriaMetrics/metrics v1.19.3 h1:cr7yyS6fHSzjvwCAYsJbvh8qaRfFzilkcqgHgO97e6Y= +github.com/VictoriaMetrics/metrics v1.19.3/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA= github.com/VictoriaMetrics/metricsql v0.44.1 h1:qGoRt0g84uMUscVjS7P3uDZKmjJubWKaIx9v0iHKgck= github.com/VictoriaMetrics/metricsql v0.44.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= @@ -146,8 +147,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.53 h1:2MErE8gRyBLuE1fuH2Sqlj1xoN3S6/jXb0aO/A1jGfk= -github.com/aws/aws-sdk-go v1.44.53/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.56 h1:bT+lExwagH7djxb6InKUVkEKGPAj5aAPnV85/m1fKro= +github.com/aws/aws-sdk-go v1.44.56/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -1137,8 +1138,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e h1:NHvCuwuS43lGnYhten69ZWqi2QOj/CiDNcKbVqwVoew= -golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1381,8 +1382,8 @@ google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljW google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d h1:YbuF5+kdiC516xIP60RvlHeFbY9sRDR73QsAGHpkeVw= -google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220715211116-798f69b842b9 h1:1aEQRgZ4Gks2SRAkLzIPpIszRazwVfjSFe1cKc+e0Jg= +google.golang.org/genproto v0.0.0-20220715211116-798f69b842b9/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= diff --git a/lib/httpserver/metrics.go b/lib/appmetrics/appmetrics.go similarity index 99% rename from lib/httpserver/metrics.go rename to lib/appmetrics/appmetrics.go index 4ad6781a9..ba6885e29 100644 --- a/lib/httpserver/metrics.go +++ b/lib/appmetrics/appmetrics.go @@ -1,4 +1,4 @@ -package httpserver +package appmetrics import ( "flag" diff --git a/lib/awsapi/config.go b/lib/awsapi/config.go index f183bbe4d..31b472ddc 100644 --- a/lib/awsapi/config.go +++ b/lib/awsapi/config.go @@ -204,7 +204,11 @@ func (cfg *Config) getAPICredentials() (*credentials, error) { } if ecsMetaURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); len(ecsMetaURI) > 0 { path := "http://169.254.170.2" + ecsMetaURI - return getECSRoleCredentialsByPath(cfg.client, path) + ac, err := getECSRoleCredentialsByPath(cfg.client, path) + if err != nil { + return nil, fmt.Errorf("cannot obtain ECS role credentials: %w", err) + } + acNew = ac } // we need instance credentials if dont have access keys diff --git a/lib/httpserver/httpserver.go b/lib/httpserver/httpserver.go index ddd3352e9..eaa4bc645 100644 --- a/lib/httpserver/httpserver.go +++ b/lib/httpserver/httpserver.go @@ -20,6 +20,7 @@ import ( "sync/atomic" "time" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/appmetrics" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" @@ -281,7 +282,7 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques } startTime := time.Now() w.Header().Set("Content-Type", "text/plain; charset=utf-8") - WritePrometheusMetrics(w) + appmetrics.WritePrometheusMetrics(w) metricsHandlerDuration.UpdateDuration(startTime) return case "/flags": diff --git a/lib/mergeset/encoding.go b/lib/mergeset/encoding.go index 07de82286..991d65f32 100644 --- a/lib/mergeset/encoding.go +++ b/lib/mergeset/encoding.go @@ -56,7 +56,7 @@ func (ib *inmemoryBlock) Less(i, j int) bool { a.Start += cpLen b.Start += cpLen data := ib.data - return string(items[i].Bytes(data)) < string(items[j].Bytes(data)) + return string(a.Bytes(data)) < string(b.Bytes(data)) } func (ib *inmemoryBlock) Swap(i, j int) { diff --git a/lib/promrelabel/relabel.go b/lib/promrelabel/relabel.go index d9c2852d7..9c0516639 100644 --- a/lib/promrelabel/relabel.go +++ b/lib/promrelabel/relabel.go @@ -297,7 +297,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset } return labels case "labelmap_all": - // Replace all the occurences of `regex` at label names with `replacement` + // Replace all the occurrences of `regex` at label names with `replacement` for i := range src { label := &src[i] label.Name, _ = prc.replaceStringSubmatches(label.Name, prc.Replacement, prc.hasCaptureGroupInReplacement) diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index 86c85b76b..3a9df2fc1 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -130,6 +130,10 @@ func (cfg *Config) mustRestart(prevCfg *Config) { prevScrapeCfgByName[scPrev.JobName] = scPrev } + // Restart all the scrape jobs on Global config change. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884 + needGlobalRestart := !areEqualGlobalConfigs(&cfg.Global, &prevCfg.Global) + // Loop over the the new jobs, start new ones and restart updated ones. var started, stopped, restarted int currentJobNames := make(map[string]struct{}, len(cfg.ScrapeConfigs)) @@ -142,7 +146,7 @@ func (cfg *Config) mustRestart(prevCfg *Config) { started++ continue } - if areEqualScrapeConfigs(scPrev, sc) { + if !needGlobalRestart && areEqualScrapeConfigs(scPrev, sc) { // The scrape config didn't change, so no need to restart it. // Use the reference to the previous job, so it could be stopped properly later. cfg.ScrapeConfigs[i] = scPrev @@ -165,6 +169,12 @@ func (cfg *Config) mustRestart(prevCfg *Config) { logger.Infof("restarted service discovery routines in %.3f seconds, stopped=%d, started=%d, restarted=%d", time.Since(startTime).Seconds(), stopped, started, restarted) } +func areEqualGlobalConfigs(a, b *GlobalConfig) bool { + sa := a.marshalJSON() + sb := b.marshalJSON() + return string(sa) == string(sb) +} + func areEqualScrapeConfigs(a, b *ScrapeConfig) bool { sa := a.marshalJSON() sb := b.marshalJSON() @@ -183,6 +193,14 @@ func (sc *ScrapeConfig) marshalJSON() []byte { return data } +func (gc *GlobalConfig) marshalJSON() []byte { + data, err := json.Marshal(gc) + if err != nil { + logger.Panicf("BUG: cannot marshal GlobalConfig: %s", err) + } + return data +} + func (cfg *Config) mustStop() { startTime := time.Now() logger.Infof("stopping service discovery routines...") diff --git a/lib/pushmetrics/pushmetrics.go b/lib/pushmetrics/pushmetrics.go new file mode 100644 index 000000000..05cf22007 --- /dev/null +++ b/lib/pushmetrics/pushmetrics.go @@ -0,0 +1,32 @@ +package pushmetrics + +import ( + "flag" + "strings" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/appmetrics" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" + "github.com/VictoriaMetrics/metrics" +) + +var ( + pushURL = flagutil.NewArray("pushmetrics.url", "Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . "+ + "By default metrics exposed at /metrics page aren't pushed to any remote storage") + pushInterval = flag.Duration("pushmetrics.interval", 10*time.Second, "Interval for pushing metrics to -pushmetrics.url") + pushExtraLabels = flagutil.NewArray("pushmetrics.extraLabels", "Optional labels to add to metrics pushed to -pushmetrics.url . "+ + `For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url`) +) + +func init() { + // The -pushmetrics.url flag can contain basic auth creds, so it mustn't be visible when exposing the flags. + flagutil.RegisterSecretFlag("pushmetrics.url") +} + +// Init must be called after flag.Parse. +func Init() { + extraLabels := strings.Join(*pushExtraLabels, ",") + for _, pu := range *pushURL { + metrics.InitPushExt(pu, *pushInterval, extraLabels, appmetrics.WritePrometheusMetrics) + } +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/README.md b/vendor/github.com/VictoriaMetrics/metrics/README.md index 5eef96a66..e1a2537cb 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/README.md +++ b/vendor/github.com/VictoriaMetrics/metrics/README.md @@ -16,6 +16,9 @@ * Allows exporting distinct metric sets via distinct endpoints. See [Set](http://godoc.org/github.com/VictoriaMetrics/metrics#Set). * Supports [easy-to-use histograms](http://godoc.org/github.com/VictoriaMetrics/metrics#Histogram), which just work without any tuning. Read more about VictoriaMetrics histograms at [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). +* Can push metrics to VictoriaMetrics or to any other remote storage, which accepts metrics + in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format). + See [these docs](http://godoc.org/github.com/VictoriaMetrics/metrics#InitPush). ### Limitations @@ -28,8 +31,8 @@ ```go import "github.com/VictoriaMetrics/metrics" -// Register various time series. -// Time series name may contain labels in Prometheus format - see below. +// Register various metrics. +// Metric name may contain labels in Prometheus format - see below. var ( // Register counter without labels. requestsTotal = metrics.NewCounter("requests_total") @@ -64,6 +67,10 @@ func requestHandler() { http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { metrics.WritePrometheus(w, true) }) + +// ... or push registered metrics every 10 seconds to http://victoria-metrics:8428/api/v1/import/prometheus +// with the added `instance="foobar"` label to all the pushed metrics. +metrics.InitPush("http://victoria-metrics:8428/api/v1/import/prometheus", 10*time.Second, `instance="foobar"`, true) ``` See [docs](http://godoc.org/github.com/VictoriaMetrics/metrics) for more info. @@ -86,8 +93,8 @@ Because the `github.com/prometheus/client_golang` is too complex and is hard to #### Why the `metrics.WritePrometheus` doesn't expose documentation for each metric? Because this documentation is ignored by Prometheus. The documentation is for users. -Just give meaningful names to the exported metrics or add comments in the source code -or in other suitable place explaining each metric exposed from your application. +Just give [meaningful names to the exported metrics](https://prometheus.io/docs/practices/naming/#metric-names) +or add comments in the source code or in other suitable place explaining each metric exposed from your application. #### How to implement [CounterVec](https://godoc.org/github.com/prometheus/client_golang/prometheus#CounterVec) in `metrics`? diff --git a/vendor/github.com/VictoriaMetrics/metrics/metrics.go b/vendor/github.com/VictoriaMetrics/metrics/metrics.go index c28c03613..57dcd3f0a 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/metrics.go +++ b/vendor/github.com/VictoriaMetrics/metrics/metrics.go @@ -110,3 +110,8 @@ func WriteFDMetrics(w io.Writer) { func UnregisterMetric(name string) bool { return defaultSet.UnregisterMetric(name) } + +// ListMetricNames returns a list of all the metric names from default set. +func ListMetricNames() []string { + return defaultSet.ListMetricNames() +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go index 12b5de8e3..005af82f3 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go +++ b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go @@ -45,13 +45,13 @@ func writeProcessMetrics(w io.Writer) { statFilepath := "/proc/self/stat" data, err := ioutil.ReadFile(statFilepath) if err != nil { - log.Printf("ERROR: cannot open %s: %s", statFilepath, err) + log.Printf("ERROR: metrics: cannot open %s: %s", statFilepath, err) return } // Search for the end of command. n := bytes.LastIndex(data, []byte(") ")) if n < 0 { - log.Printf("ERROR: cannot find command in parentheses in %q read from %s", data, statFilepath) + log.Printf("ERROR: metrics: cannot find command in parentheses in %q read from %s", data, statFilepath) return } data = data[n+2:] @@ -62,7 +62,7 @@ func writeProcessMetrics(w io.Writer) { &p.State, &p.Ppid, &p.Pgrp, &p.Session, &p.TtyNr, &p.Tpgid, &p.Flags, &p.Minflt, &p.Cminflt, &p.Majflt, &p.Cmajflt, &p.Utime, &p.Stime, &p.Cutime, &p.Cstime, &p.Priority, &p.Nice, &p.NumThreads, &p.ItrealValue, &p.Starttime, &p.Vsize, &p.Rss) if err != nil { - log.Printf("ERROR: cannot parse %q read from %s: %s", data, statFilepath, err) + log.Printf("ERROR: metrics: cannot parse %q read from %s: %s", data, statFilepath, err) return } @@ -89,17 +89,17 @@ func writeIOMetrics(w io.Writer) { ioFilepath := "/proc/self/io" data, err := ioutil.ReadFile(ioFilepath) if err != nil { - log.Printf("ERROR: cannot open %q: %s", ioFilepath, err) + log.Printf("ERROR: metrics: cannot open %q: %s", ioFilepath, err) } getInt := func(s string) int64 { n := strings.IndexByte(s, ' ') if n < 0 { - log.Printf("ERROR: cannot find whitespace in %q at %q", s, ioFilepath) + log.Printf("ERROR: metrics: cannot find whitespace in %q at %q", s, ioFilepath) return 0 } v, err := strconv.ParseInt(s[n+1:], 10, 64) if err != nil { - log.Printf("ERROR: cannot parse %q at %q: %s", s, ioFilepath, err) + log.Printf("ERROR: metrics: cannot parse %q at %q: %s", s, ioFilepath, err) return 0 } return v @@ -137,12 +137,12 @@ var startTimeSeconds = time.Now().Unix() func writeFDMetrics(w io.Writer) { totalOpenFDs, err := getOpenFDsCount("/proc/self/fd") if err != nil { - log.Printf("ERROR: cannot determine open file descriptors count: %s", err) + log.Printf("ERROR: metrics: cannot determine open file descriptors count: %s", err) return } maxOpenFDs, err := getMaxFilesLimit("/proc/self/limits") if err != nil { - log.Printf("ERROR: cannot determine the limit on open file descritors: %s", err) + log.Printf("ERROR: metrics: cannot determine the limit on open file descritors: %s", err) return } fmt.Fprintf(w, "process_max_fds %d\n", maxOpenFDs) @@ -211,7 +211,7 @@ type memStats struct { func writeProcessMemMetrics(w io.Writer) { ms, err := getMemStats("/proc/self/status") if err != nil { - log.Printf("ERROR: cannot determine memory status: %s", err) + log.Printf("ERROR: metrics: cannot determine memory status: %s", err) return } fmt.Fprintf(w, "process_virtual_memory_peak_bytes %d\n", ms.vmPeak) diff --git a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go index 5e6ac935d..ca7167f80 100644 --- a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go +++ b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package metrics diff --git a/vendor/github.com/VictoriaMetrics/metrics/push.go b/vendor/github.com/VictoriaMetrics/metrics/push.go new file mode 100644 index 000000000..a63c93693 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/push.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "time" +) + +// InitPushProcessMetrics sets up periodic push for 'process_*' metrics to the given pushURL with the given interval. +// +// extraLabels may contain comma-separated list of `label="value"` labels, which will be added +// to all the metrics before pushing them to pushURL. +// +// The metrics are pushed to pushURL in Prometheus text exposition format. +// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format +// +// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to +// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format +// +// It is OK calling InitPushProcessMetrics multiple times with different pushURL - +// in this case metrics are pushed to all the provided pushURL urls. +func InitPushProcessMetrics(pushURL string, interval time.Duration, extraLabels string) error { + writeMetrics := func(w io.Writer) { + WriteProcessMetrics(w) + } + return InitPushExt(pushURL, interval, extraLabels, writeMetrics) +} + +// InitPush sets up periodic push for globally registered metrics to the given pushURL with the given interval. +// +// extraLabels may contain comma-separated list of `label="value"` labels, which will be added +// to all the metrics before pushing them to pushURL. +// +// If pushProcessMetrics is set to true, then 'process_*' metrics are also pushed to pushURL. +// +// The metrics are pushed to pushURL in Prometheus text exposition format. +// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format +// +// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to +// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format +// +// It is OK calling InitPush multiple times with different pushURL - +// in this case metrics are pushed to all the provided pushURL urls. +func InitPush(pushURL string, interval time.Duration, extraLabels string, pushProcessMetrics bool) error { + writeMetrics := func(w io.Writer) { + WritePrometheus(w, pushProcessMetrics) + } + return InitPushExt(pushURL, interval, extraLabels, writeMetrics) +} + +// InitPush sets up periodic push for metrics from s to the given pushURL with the given interval. +// +// extraLabels may contain comma-separated list of `label="value"` labels, which will be added +// to all the metrics before pushing them to pushURL. +// +/// The metrics are pushed to pushURL in Prometheus text exposition format. +// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format +// +// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to +// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format +// +// It is OK calling InitPush multiple times with different pushURL - +// in this case metrics are pushed to all the provided pushURL urls. +func (s *Set) InitPush(pushURL string, interval time.Duration, extraLabels string) error { + writeMetrics := func(w io.Writer) { + s.WritePrometheus(w) + } + return InitPushExt(pushURL, interval, extraLabels, writeMetrics) +} + +// InitPushExt sets up periodic push for metrics obtained by calling writeMetrics with the given interval. +// +// extraLabels may contain comma-separated list of `label="value"` labels, which will be added +// to all the metrics before pushing them to pushURL. +// +// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments. +// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format +// +// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to +// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format +// +// It is OK calling InitPushExt multiple times with different pushURL - +// in this case metrics are pushed to all the provided pushURL urls. +func InitPushExt(pushURL string, interval time.Duration, extraLabels string, writeMetrics func(w io.Writer)) error { + if interval <= 0 { + return fmt.Errorf("interval must be positive; got %s", interval) + } + if err := validateTags(extraLabels); err != nil { + return fmt.Errorf("invalid extraLabels=%q: %w", extraLabels, err) + } + pu, err := url.Parse(pushURL) + if err != nil { + return fmt.Errorf("cannot parse pushURL=%q: %w", pushURL, err) + } + if pu.Scheme != "http" && pu.Scheme != "https" { + return fmt.Errorf("unsupported scheme in pushURL=%q; expecting 'http' or 'https'", pushURL) + } + if pu.Host == "" { + return fmt.Errorf("missing host in pushURL=%q", pushURL) + } + pushURLRedacted := pu.Redacted() + c := &http.Client{ + Timeout: interval, + } + go func() { + ticker := time.NewTicker(interval) + var bb bytes.Buffer + var tmpBuf []byte + for range ticker.C { + bb.Reset() + writeMetrics(&bb) + if len(extraLabels) > 0 { + tmpBuf = addExtraLabels(tmpBuf[:0], bb.Bytes(), extraLabels) + bb.Reset() + bb.Write(tmpBuf) + } + resp, err := c.Post(pushURL, "text/plain", &bb) + if err != nil { + log.Printf("ERROR: metrics.push: cannot push metrics to %q: %s", pushURLRedacted, err) + continue + } + if resp.StatusCode/100 != 2 { + body, _ := ioutil.ReadAll(resp.Body) + _ = resp.Body.Close() + log.Printf("ERROR: metrics.push: unexpected status code in response from %q: %d; expecting 2xx; response body: %q", + pushURLRedacted, resp.StatusCode, body) + continue + } + _ = resp.Body.Close() + } + }() + return nil +} + +func addExtraLabels(dst, src []byte, extraLabels string) []byte { + for len(src) > 0 { + var line []byte + n := bytes.IndexByte(src, '\n') + if n >= 0 { + line = src[:n] + src = src[n+1:] + } else { + line = src + src = nil + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + // Skip empy lines + continue + } + if bytes.HasPrefix(line, bashBytes) { + // Copy comments as is + dst = append(dst, line...) + dst = append(dst, '\n') + continue + } + n = bytes.IndexByte(line, '{') + if n >= 0 { + dst = append(dst, line[:n+1]...) + dst = append(dst, extraLabels...) + dst = append(dst, ',') + dst = append(dst, line[n+1:]...) + } else { + n = bytes.LastIndexByte(line, ' ') + if n < 0 { + panic(fmt.Errorf("BUG: missing whitespace between metric name and metric value in Prometheus text exposition line %q", line)) + } + dst = append(dst, line[:n]...) + dst = append(dst, '{') + dst = append(dst, extraLabels...) + dst = append(dst, '}') + dst = append(dst, line[n:]...) + } + dst = append(dst, '\n') + } + return dst +} + +var bashBytes = []byte("#") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index a5b051ff7..f10e5d541 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -6191,6 +6191,87 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "devops-guru": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -21058,6 +21139,67 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30192,6 +30334,26 @@ var awsusgovPartition = partition{ }, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index f6676b08e..e8931ee1c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.53" +const SDKVersion = "1.44.56" diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c860bd635..ca50e4e14 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -203,6 +203,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -554,6 +555,7 @@ ccflags="$@" $2 ~ /^CLONE_[A-Z_]+/ || $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || @@ -576,7 +578,6 @@ ccflags="$@" $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || - $2 !~ /^AUDIT_RECORD_MAGIC/ && $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index dfa9bd938..b0d6c2738 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -140,6 +140,306 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUDIT_ADD = 0x3eb + AUDIT_ADD_RULE = 0x3f3 + AUDIT_ALWAYS = 0x2 + AUDIT_ANOM_ABEND = 0x6a5 + AUDIT_ANOM_CREAT = 0x6a7 + AUDIT_ANOM_LINK = 0x6a6 + AUDIT_ANOM_PROMISCUOUS = 0x6a4 + AUDIT_ARCH = 0xb + AUDIT_ARCH_AARCH64 = 0xc00000b7 + AUDIT_ARCH_ALPHA = 0xc0009026 + AUDIT_ARCH_ARCOMPACT = 0x4000005d + AUDIT_ARCH_ARCOMPACTBE = 0x5d + AUDIT_ARCH_ARCV2 = 0x400000c3 + AUDIT_ARCH_ARCV2BE = 0xc3 + AUDIT_ARCH_ARM = 0x40000028 + AUDIT_ARCH_ARMEB = 0x28 + AUDIT_ARCH_C6X = 0x4000008c + AUDIT_ARCH_C6XBE = 0x8c + AUDIT_ARCH_CRIS = 0x4000004c + AUDIT_ARCH_CSKY = 0x400000fc + AUDIT_ARCH_FRV = 0x5441 + AUDIT_ARCH_H8300 = 0x2e + AUDIT_ARCH_HEXAGON = 0xa4 + AUDIT_ARCH_I386 = 0x40000003 + AUDIT_ARCH_IA64 = 0xc0000032 + AUDIT_ARCH_LOONGARCH32 = 0x40000102 + AUDIT_ARCH_LOONGARCH64 = 0xc0000102 + AUDIT_ARCH_M32R = 0x58 + AUDIT_ARCH_M68K = 0x4 + AUDIT_ARCH_MICROBLAZE = 0xbd + AUDIT_ARCH_MIPS = 0x8 + AUDIT_ARCH_MIPS64 = 0x80000008 + AUDIT_ARCH_MIPS64N32 = 0xa0000008 + AUDIT_ARCH_MIPSEL = 0x40000008 + AUDIT_ARCH_MIPSEL64 = 0xc0000008 + AUDIT_ARCH_MIPSEL64N32 = 0xe0000008 + AUDIT_ARCH_NDS32 = 0x400000a7 + AUDIT_ARCH_NDS32BE = 0xa7 + AUDIT_ARCH_NIOS2 = 0x40000071 + AUDIT_ARCH_OPENRISC = 0x5c + AUDIT_ARCH_PARISC = 0xf + AUDIT_ARCH_PARISC64 = 0x8000000f + AUDIT_ARCH_PPC = 0x14 + AUDIT_ARCH_PPC64 = 0x80000015 + AUDIT_ARCH_PPC64LE = 0xc0000015 + AUDIT_ARCH_RISCV32 = 0x400000f3 + AUDIT_ARCH_RISCV64 = 0xc00000f3 + AUDIT_ARCH_S390 = 0x16 + AUDIT_ARCH_S390X = 0x80000016 + AUDIT_ARCH_SH = 0x2a + AUDIT_ARCH_SH64 = 0x8000002a + AUDIT_ARCH_SHEL = 0x4000002a + AUDIT_ARCH_SHEL64 = 0xc000002a + AUDIT_ARCH_SPARC = 0x2 + AUDIT_ARCH_SPARC64 = 0x8000002b + AUDIT_ARCH_TILEGX = 0xc00000bf + AUDIT_ARCH_TILEGX32 = 0x400000bf + AUDIT_ARCH_TILEPRO = 0x400000bc + AUDIT_ARCH_UNICORE = 0x4000006e + AUDIT_ARCH_X86_64 = 0xc000003e + AUDIT_ARCH_XTENSA = 0x5e + AUDIT_ARG0 = 0xc8 + AUDIT_ARG1 = 0xc9 + AUDIT_ARG2 = 0xca + AUDIT_ARG3 = 0xcb + AUDIT_AVC = 0x578 + AUDIT_AVC_PATH = 0x57a + AUDIT_BITMASK_SIZE = 0x40 + AUDIT_BIT_MASK = 0x8000000 + AUDIT_BIT_TEST = 0x48000000 + AUDIT_BPF = 0x536 + AUDIT_BPRM_FCAPS = 0x529 + AUDIT_CAPSET = 0x52a + AUDIT_CLASS_CHATTR = 0x2 + AUDIT_CLASS_CHATTR_32 = 0x3 + AUDIT_CLASS_DIR_WRITE = 0x0 + AUDIT_CLASS_DIR_WRITE_32 = 0x1 + AUDIT_CLASS_READ = 0x4 + AUDIT_CLASS_READ_32 = 0x5 + AUDIT_CLASS_SIGNAL = 0x8 + AUDIT_CLASS_SIGNAL_32 = 0x9 + AUDIT_CLASS_WRITE = 0x6 + AUDIT_CLASS_WRITE_32 = 0x7 + AUDIT_COMPARE_AUID_TO_EUID = 0x10 + AUDIT_COMPARE_AUID_TO_FSUID = 0xe + AUDIT_COMPARE_AUID_TO_OBJ_UID = 0x5 + AUDIT_COMPARE_AUID_TO_SUID = 0xf + AUDIT_COMPARE_EGID_TO_FSGID = 0x17 + AUDIT_COMPARE_EGID_TO_OBJ_GID = 0x4 + AUDIT_COMPARE_EGID_TO_SGID = 0x18 + AUDIT_COMPARE_EUID_TO_FSUID = 0x12 + AUDIT_COMPARE_EUID_TO_OBJ_UID = 0x3 + AUDIT_COMPARE_EUID_TO_SUID = 0x11 + AUDIT_COMPARE_FSGID_TO_OBJ_GID = 0x9 + AUDIT_COMPARE_FSUID_TO_OBJ_UID = 0x8 + AUDIT_COMPARE_GID_TO_EGID = 0x14 + AUDIT_COMPARE_GID_TO_FSGID = 0x15 + AUDIT_COMPARE_GID_TO_OBJ_GID = 0x2 + AUDIT_COMPARE_GID_TO_SGID = 0x16 + AUDIT_COMPARE_SGID_TO_FSGID = 0x19 + AUDIT_COMPARE_SGID_TO_OBJ_GID = 0x7 + AUDIT_COMPARE_SUID_TO_FSUID = 0x13 + AUDIT_COMPARE_SUID_TO_OBJ_UID = 0x6 + AUDIT_COMPARE_UID_TO_AUID = 0xa + AUDIT_COMPARE_UID_TO_EUID = 0xb + AUDIT_COMPARE_UID_TO_FSUID = 0xc + AUDIT_COMPARE_UID_TO_OBJ_UID = 0x1 + AUDIT_COMPARE_UID_TO_SUID = 0xd + AUDIT_CONFIG_CHANGE = 0x519 + AUDIT_CWD = 0x51b + AUDIT_DAEMON_ABORT = 0x4b2 + AUDIT_DAEMON_CONFIG = 0x4b3 + AUDIT_DAEMON_END = 0x4b1 + AUDIT_DAEMON_START = 0x4b0 + AUDIT_DEL = 0x3ec + AUDIT_DEL_RULE = 0x3f4 + AUDIT_DEVMAJOR = 0x64 + AUDIT_DEVMINOR = 0x65 + AUDIT_DIR = 0x6b + AUDIT_DM_CTRL = 0x53a + AUDIT_DM_EVENT = 0x53b + AUDIT_EGID = 0x6 + AUDIT_EOE = 0x528 + AUDIT_EQUAL = 0x40000000 + AUDIT_EUID = 0x2 + AUDIT_EVENT_LISTENER = 0x537 + AUDIT_EXE = 0x70 + AUDIT_EXECVE = 0x51d + AUDIT_EXIT = 0x67 + AUDIT_FAIL_PANIC = 0x2 + AUDIT_FAIL_PRINTK = 0x1 + AUDIT_FAIL_SILENT = 0x0 + AUDIT_FANOTIFY = 0x533 + AUDIT_FD_PAIR = 0x525 + AUDIT_FEATURE_BITMAP_ALL = 0x7f + AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT = 0x1 + AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME = 0x2 + AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND = 0x8 + AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH = 0x4 + AUDIT_FEATURE_BITMAP_FILTER_FS = 0x40 + AUDIT_FEATURE_BITMAP_LOST_RESET = 0x20 + AUDIT_FEATURE_BITMAP_SESSIONID_FILTER = 0x10 + AUDIT_FEATURE_CHANGE = 0x530 + AUDIT_FEATURE_LOGINUID_IMMUTABLE = 0x1 + AUDIT_FEATURE_ONLY_UNSET_LOGINUID = 0x0 + AUDIT_FEATURE_VERSION = 0x1 + AUDIT_FIELD_COMPARE = 0x6f + AUDIT_FILETYPE = 0x6c + AUDIT_FILTERKEY = 0xd2 + AUDIT_FILTER_ENTRY = 0x2 + AUDIT_FILTER_EXCLUDE = 0x5 + AUDIT_FILTER_EXIT = 0x4 + AUDIT_FILTER_FS = 0x6 + AUDIT_FILTER_PREPEND = 0x10 + AUDIT_FILTER_TASK = 0x1 + AUDIT_FILTER_TYPE = 0x5 + AUDIT_FILTER_URING_EXIT = 0x7 + AUDIT_FILTER_USER = 0x0 + AUDIT_FILTER_WATCH = 0x3 + AUDIT_FIRST_KERN_ANOM_MSG = 0x6a4 + AUDIT_FIRST_USER_MSG = 0x44c + AUDIT_FIRST_USER_MSG2 = 0x834 + AUDIT_FSGID = 0x8 + AUDIT_FSTYPE = 0x1a + AUDIT_FSUID = 0x4 + AUDIT_GET = 0x3e8 + AUDIT_GET_FEATURE = 0x3fb + AUDIT_GID = 0x5 + AUDIT_GREATER_THAN = 0x20000000 + AUDIT_GREATER_THAN_OR_EQUAL = 0x60000000 + AUDIT_INODE = 0x66 + AUDIT_INTEGRITY_DATA = 0x708 + AUDIT_INTEGRITY_EVM_XATTR = 0x70e + AUDIT_INTEGRITY_HASH = 0x70b + AUDIT_INTEGRITY_METADATA = 0x709 + AUDIT_INTEGRITY_PCR = 0x70c + AUDIT_INTEGRITY_POLICY_RULE = 0x70f + AUDIT_INTEGRITY_RULE = 0x70d + AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_IPC = 0x517 + AUDIT_IPC_SET_PERM = 0x51f + AUDIT_KERNEL = 0x7d0 + AUDIT_KERNEL_OTHER = 0x524 + AUDIT_KERN_MODULE = 0x532 + AUDIT_LAST_FEATURE = 0x1 + AUDIT_LAST_KERN_ANOM_MSG = 0x707 + AUDIT_LAST_USER_MSG = 0x4af + AUDIT_LAST_USER_MSG2 = 0xbb7 + AUDIT_LESS_THAN = 0x10000000 + AUDIT_LESS_THAN_OR_EQUAL = 0x50000000 + AUDIT_LIST = 0x3ea + AUDIT_LIST_RULES = 0x3f5 + AUDIT_LOGIN = 0x3ee + AUDIT_LOGINUID = 0x9 + AUDIT_LOGINUID_SET = 0x18 + AUDIT_MAC_CALIPSO_ADD = 0x58a + AUDIT_MAC_CALIPSO_DEL = 0x58b + AUDIT_MAC_CIPSOV4_ADD = 0x57f + AUDIT_MAC_CIPSOV4_DEL = 0x580 + AUDIT_MAC_CONFIG_CHANGE = 0x57d + AUDIT_MAC_IPSEC_ADDSA = 0x583 + AUDIT_MAC_IPSEC_ADDSPD = 0x585 + AUDIT_MAC_IPSEC_DELSA = 0x584 + AUDIT_MAC_IPSEC_DELSPD = 0x586 + AUDIT_MAC_IPSEC_EVENT = 0x587 + AUDIT_MAC_MAP_ADD = 0x581 + AUDIT_MAC_MAP_DEL = 0x582 + AUDIT_MAC_POLICY_LOAD = 0x57b + AUDIT_MAC_STATUS = 0x57c + AUDIT_MAC_UNLBL_ALLOW = 0x57e + AUDIT_MAC_UNLBL_STCADD = 0x588 + AUDIT_MAC_UNLBL_STCDEL = 0x589 + AUDIT_MAKE_EQUIV = 0x3f7 + AUDIT_MAX_FIELDS = 0x40 + AUDIT_MAX_FIELD_COMPARE = 0x19 + AUDIT_MAX_KEY_LEN = 0x100 + AUDIT_MESSAGE_TEXT_MAX = 0x2170 + AUDIT_MMAP = 0x52b + AUDIT_MQ_GETSETATTR = 0x523 + AUDIT_MQ_NOTIFY = 0x522 + AUDIT_MQ_OPEN = 0x520 + AUDIT_MQ_SENDRECV = 0x521 + AUDIT_MSGTYPE = 0xc + AUDIT_NEGATE = 0x80000000 + AUDIT_NETFILTER_CFG = 0x52d + AUDIT_NETFILTER_PKT = 0x52c + AUDIT_NEVER = 0x0 + AUDIT_NLGRP_MAX = 0x1 + AUDIT_NOT_EQUAL = 0x30000000 + AUDIT_NR_FILTERS = 0x8 + AUDIT_OBJ_GID = 0x6e + AUDIT_OBJ_LEV_HIGH = 0x17 + AUDIT_OBJ_LEV_LOW = 0x16 + AUDIT_OBJ_PID = 0x526 + AUDIT_OBJ_ROLE = 0x14 + AUDIT_OBJ_TYPE = 0x15 + AUDIT_OBJ_UID = 0x6d + AUDIT_OBJ_USER = 0x13 + AUDIT_OPENAT2 = 0x539 + AUDIT_OPERATORS = 0x78000000 + AUDIT_PATH = 0x516 + AUDIT_PERM = 0x6a + AUDIT_PERM_ATTR = 0x8 + AUDIT_PERM_EXEC = 0x1 + AUDIT_PERM_READ = 0x4 + AUDIT_PERM_WRITE = 0x2 + AUDIT_PERS = 0xa + AUDIT_PID = 0x0 + AUDIT_POSSIBLE = 0x1 + AUDIT_PPID = 0x12 + AUDIT_PROCTITLE = 0x52f + AUDIT_REPLACE = 0x531 + AUDIT_SADDR_FAM = 0x71 + AUDIT_SECCOMP = 0x52e + AUDIT_SELINUX_ERR = 0x579 + AUDIT_SESSIONID = 0x19 + AUDIT_SET = 0x3e9 + AUDIT_SET_FEATURE = 0x3fa + AUDIT_SGID = 0x7 + AUDIT_SID_UNSET = 0xffffffff + AUDIT_SIGNAL_INFO = 0x3f2 + AUDIT_SOCKADDR = 0x51a + AUDIT_SOCKETCALL = 0x518 + AUDIT_STATUS_BACKLOG_LIMIT = 0x10 + AUDIT_STATUS_BACKLOG_WAIT_TIME = 0x20 + AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL = 0x80 + AUDIT_STATUS_ENABLED = 0x1 + AUDIT_STATUS_FAILURE = 0x2 + AUDIT_STATUS_LOST = 0x40 + AUDIT_STATUS_PID = 0x4 + AUDIT_STATUS_RATE_LIMIT = 0x8 + AUDIT_SUBJ_CLR = 0x11 + AUDIT_SUBJ_ROLE = 0xe + AUDIT_SUBJ_SEN = 0x10 + AUDIT_SUBJ_TYPE = 0xf + AUDIT_SUBJ_USER = 0xd + AUDIT_SUCCESS = 0x68 + AUDIT_SUID = 0x3 + AUDIT_SYSCALL = 0x514 + AUDIT_SYSCALL_CLASSES = 0x10 + AUDIT_TIME_ADJNTPVAL = 0x535 + AUDIT_TIME_INJOFFSET = 0x534 + AUDIT_TRIM = 0x3f6 + AUDIT_TTY = 0x527 + AUDIT_TTY_GET = 0x3f8 + AUDIT_TTY_SET = 0x3f9 + AUDIT_UID = 0x1 + AUDIT_UID_UNSET = 0xffffffff + AUDIT_UNUSED_BITS = 0x7fffc00 + AUDIT_URINGOP = 0x538 + AUDIT_USER = 0x3ed + AUDIT_USER_AVC = 0x453 + AUDIT_USER_TTY = 0x464 + AUDIT_VERSION_BACKLOG_LIMIT = 0x1 + AUDIT_VERSION_BACKLOG_WAIT_TIME = 0x2 + AUDIT_VERSION_LATEST = 0x7f + AUDIT_WATCH = 0x69 + AUDIT_WATCH_INS = 0x3ef + AUDIT_WATCH_LIST = 0x3f1 + AUDIT_WATCH_REM = 0x3f0 AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B110 = 0x3 @@ -538,6 +838,55 @@ const ( EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EM_386 = 0x3 + EM_486 = 0x6 + EM_68K = 0x4 + EM_860 = 0x7 + EM_88K = 0x5 + EM_AARCH64 = 0xb7 + EM_ALPHA = 0x9026 + EM_ALTERA_NIOS2 = 0x71 + EM_ARCOMPACT = 0x5d + EM_ARCV2 = 0xc3 + EM_ARM = 0x28 + EM_BLACKFIN = 0x6a + EM_BPF = 0xf7 + EM_CRIS = 0x4c + EM_CSKY = 0xfc + EM_CYGNUS_M32R = 0x9041 + EM_CYGNUS_MN10300 = 0xbeef + EM_FRV = 0x5441 + EM_H8_300 = 0x2e + EM_HEXAGON = 0xa4 + EM_IA_64 = 0x32 + EM_LOONGARCH = 0x102 + EM_M32 = 0x1 + EM_M32R = 0x58 + EM_MICROBLAZE = 0xbd + EM_MIPS = 0x8 + EM_MIPS_RS3_LE = 0xa + EM_MIPS_RS4_BE = 0xa + EM_MN10300 = 0x59 + EM_NDS32 = 0xa7 + EM_NONE = 0x0 + EM_OPENRISC = 0x5c + EM_PARISC = 0xf + EM_PPC = 0x14 + EM_PPC64 = 0x15 + EM_RISCV = 0xf3 + EM_S390 = 0x16 + EM_S390_OLD = 0xa390 + EM_SH = 0x2a + EM_SPARC = 0x2 + EM_SPARC32PLUS = 0x12 + EM_SPARCV9 = 0x2b + EM_SPU = 0x17 + EM_TILEGX = 0xbf + EM_TILEPRO = 0xbc + EM_TI_C6000 = 0x8c + EM_UNICORE = 0x6e + EM_X86_64 = 0x3e + EM_XTENSA = 0x5e ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index e62611e53..869847987 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -5594,3 +5594,8 @@ const ( FR_ACT_UNREACHABLE = 0x7 FR_ACT_PROHIBIT = 0x8 ) + +const ( + AUDIT_NLGRP_NONE = 0x0 + AUDIT_NLGRP_READLOG = 0x1 +) diff --git a/vendor/modules.txt b/vendor/modules.txt index 8d9c2daaa..0396f991c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -24,7 +24,7 @@ github.com/VictoriaMetrics/fastcache github.com/VictoriaMetrics/fasthttp github.com/VictoriaMetrics/fasthttp/fasthttputil github.com/VictoriaMetrics/fasthttp/stackless -# github.com/VictoriaMetrics/metrics v1.18.1 +# github.com/VictoriaMetrics/metrics v1.19.3 ## explicit; go 1.12 github.com/VictoriaMetrics/metrics # github.com/VictoriaMetrics/metricsql v0.44.1 @@ -34,7 +34,7 @@ github.com/VictoriaMetrics/metricsql/binaryop # github.com/VividCortex/ewma v1.2.0 ## explicit; go 1.12 github.com/VividCortex/ewma -# github.com/aws/aws-sdk-go v1.44.53 +# github.com/aws/aws-sdk-go v1.44.56 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -306,7 +306,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e +# golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 ## explicit; go 1.17 golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix @@ -354,8 +354,8 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d -## explicit; go 1.15 +# google.golang.org/genproto v0.0.0-20220715211116-798f69b842b9 +## explicit; go 1.17 google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/rpc/code