diff --git a/Makefile b/Makefile index a92100797..6daefb453 100644 --- a/Makefile +++ b/Makefile @@ -53,6 +53,14 @@ vmutils: \ vmrestore \ vmctl +vmutils-pure: \ + vmagent-pure \ + vmalert-pure \ + vmauth-pure \ + vmbackup-pure \ + vmrestore-pure \ + vmctl-pure + vmutils-arm64: \ vmagent-arm64 \ vmalert-arm64 \ @@ -103,7 +111,7 @@ release-victoria-metrics-generic: victoria-metrics-$(GOARCH)-prod victoria-metrics-$(GOARCH)-prod \ && sha256sum victoria-metrics-$(GOARCH)-$(PKG_TAG).tar.gz \ victoria-metrics-$(GOARCH)-prod \ - | sed s/-$(GOARCH)// > victoria-metrics-$(GOARCH)-$(PKG_TAG)_checksums.txt + | sed s/-$(GOARCH)-prod/-prod/ > victoria-metrics-$(GOARCH)-$(PKG_TAG)_checksums.txt release-vmutils: \ release-vmutils-amd64 \ @@ -145,7 +153,7 @@ release-vmutils-generic: \ vmbackup-$(GOARCH)-prod \ vmrestore-$(GOARCH)-prod \ vmctl-$(GOARCH)-prod \ - | sed s/-$(GOARCH)// > vmutils-$(GOARCH)-$(PKG_TAG)_checksums.txt + | sed s/-$(GOARCH)-prod/-prod/ > vmutils-$(GOARCH)-$(PKG_TAG)_checksums.txt release-vmutils-windows-generic: \ vmagent-windows-$(GOARCH)-prod \ @@ -254,11 +262,21 @@ golangci-lint: install-golangci-lint install-golangci-lint: which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.29.0 +copy-docs: + echo "---\nsort: ${ORDER}\n---\n" > ${DST} + cat ${SRC} >> ${DST} + +# Copies docs for all components and adds the order tag. +# Cluster docs are supposed to be ordered as 9th. +# For The rest of docs is ordered manually.t docs-sync: - cp app/vmagent/README.md docs/vmagent.md - cp app/vmalert/README.md docs/vmalert.md - cp app/vmauth/README.md docs/vmauth.md - cp app/vmbackup/README.md docs/vmbackup.md - cp app/vmrestore/README.md docs/vmrestore.md - cp app/vmctl/README.md docs/vmctl.md - cp README.md docs/Single-server-VictoriaMetrics.md + SRC=README.md DST=docs/Single-server-VictoriaMetrics.md ORDER=1 $(MAKE) copy-docs + SRC=app/vmagent/README.md DST=docs/vmagent.md ORDER=2 $(MAKE) copy-docs + SRC=app/vmalert/README.md DST=docs/vmalert.md ORDER=3 $(MAKE) copy-docs + SRC=app/vmauth/README.md DST=docs/vmauth.md ORDER=4 $(MAKE) copy-docs + SRC=app/vmbackup/README.md DST=docs/vmbackup.md ORDER=5 $(MAKE) copy-docs + SRC=app/vmrestore/README.md DST=docs/vmrestore.md ORDER=6 $(MAKE) copy-docs + SRC=app/vmctl/README.md DST=docs/vmctl.md ORDER=7 $(MAKE) copy-docs + SRC=app/vmgateway/README.md DST=docs/vmgateway.md ORDER=8 $(MAKE) copy-docs + + diff --git a/README.md b/README.md index 90a9355a7..07f12a508 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +# VictoriaMetrics + [![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) [![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics.svg?maxAge=604800)](https://hub.docker.com/r/victoriametrics/victoria-metrics) [![Slack](https://img.shields.io/badge/join%20slack-%23victoriametrics-brightgreen.svg)](http://slack.victoriametrics.com/) @@ -6,9 +8,7 @@ [![Build Status](https://github.com/VictoriaMetrics/VictoriaMetrics/workflows/main/badge.svg)](https://github.com/VictoriaMetrics/VictoriaMetrics/actions) [![codecov](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics/branch/master/graph/badge.svg)](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics) -![Victoria Metrics logo](logo.png "Victoria Metrics") - -## VictoriaMetrics +Victoria Metrics logo VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. @@ -117,6 +117,7 @@ Alphabetically sorted links to case studies: * [Prometheus querying API usage](#prometheus-querying-api-usage) * [Prometheus querying API enhancements](#prometheus-querying-api-enhancements) * [Graphite API usage](#graphite-api-usage) + * [Graphite Render API usage](#graphite-render-api-usage) * [Graphite Metrics API usage](#graphite-metrics-api-usage) * [Graphite Tags API usage](#graphite-tags-api-usage) * [How to build from sources](#how-to-build-from-sources) @@ -1324,6 +1325,8 @@ See the example of alerting rules for VM components [here](https://github.com/Vi * It is recommended to use default command-line flag values (i.e. don't set them explicitly) until the need of tweaking these flag values arises. +* It is recommended inspecting logs during troubleshooting, since they may contain useful information. + * It is recommended upgrading to the latest available release from [this page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), since the encountered issue could be already fixed there. @@ -1338,8 +1341,6 @@ See the example of alerting rules for VM components [here](https://github.com/Vi if background merge cannot be initiated due to free disk space shortage. The value shows the number of per-month partitions, which would start background merge if they had more free disk space. -* It is recommended inspecting logs during troubleshooting, since they may contain useful information. - * VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage. This may lead to the following "issues": * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage @@ -1349,10 +1350,13 @@ See the example of alerting rules for VM components [here](https://github.com/Vi * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, then it is likely you have too many active time series for the current amount of RAM. - VictoriaMetrics [exposes](#monitoring) `vm_slow_*` metrics, which could be used as an indicator of low amounts of RAM. - It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve + VictoriaMetrics [exposes](#monitoring) `vm_slow_*` metrics such as `vm_slow_row_inserts_total` and `vm_slow_metric_name_loads_total`, which could be used + as an indicator of low amounts of RAM. It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve ingestion and query performance in this case. +* If the order of labels for the same metrics can change over time (e.g. if `metric{k1="v1",k2="v2"}` may become `metric{k2="v2",k1="v1"}`), + then it is recommended running VictoriaMetrics with `-sortLabels` command-line flag in order to reduce memory usage and CPU usage. + * VictoriaMetrics prioritizes data ingestion over data querying. So if it has no enough resources for data ingestion, then data querying may slow down significantly. @@ -1758,6 +1762,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached; see also -search.maxQueryDuration (default 10s) -search.maxStalenessInterval duration The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons + -search.maxStatusRequestDuration duration + The maximum duration for /api/v1/status/* requests (default 5m0s) -search.maxStepForPointsAdjustment duration The maximum step when /api/v1/query_range handler adjusts points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data (default 1m0s) -search.maxTagKeys int @@ -1788,6 +1794,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li The maximum number of CPU cores to use for small merges. Default value is used if set to 0 -snapshotAuthKey string authKey, which must be passed in query string to /snapshot* pages + -sortLabels + Whether to sort labels for incoming samples before writing them to storage. This may be needed for reducing memory usage at storage when the order of labels in incoming samples is random. For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}. Enabled sorting for labels can slow down ingestion performance a bit -storageDataPath string Path to storage data (default "victoria-metrics-data") -tls diff --git a/app/victoria-metrics/main.go b/app/victoria-metrics/main.go index 79d7f0d10..81d9b24d2 100644 --- a/app/victoria-metrics/main.go +++ b/app/victoria-metrics/main.go @@ -92,6 +92,9 @@ func main() { func requestHandler(w http.ResponseWriter, r *http.Request) bool { if r.URL.Path == "/" { + if r.Method != "GET" { + return false + } fmt.Fprintf(w, "

Single-node VictoriaMetrics.


") fmt.Fprintf(w, "See docs at https://victoriametrics.github.io/
") fmt.Fprintf(w, "Useful endpoints:
") diff --git a/app/vmagent/README.md b/app/vmagent/README.md index cf52fb800..cfc77eda2 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -1,4 +1,4 @@ -## vmagent +# vmagent `vmagent` is a tiny but mighty agent which helps you collect metrics from various sources and store them in [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) @@ -178,7 +178,7 @@ The following scrape types in [scrape_config](https://prometheus.io/docs/prometh Please file feature requests to [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`. -`vmagent` also support the following additional options in `scrape_config` section: +`vmagent` also support the following additional options in `scrape_configs` section: * `disable_compression: true` - to disable response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets to save network bandwidth. @@ -262,7 +262,7 @@ See [these docs](https://victoriametrics.github.io/#deduplication) for details. ## Scraping targets via a proxy -`vmagent` supports scraping targets via http and https proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs +`vmagent` supports scraping targets via http, https and socks5 proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs target scraping via https proxy at `https://proxy-addr:1234`: ```yml @@ -273,6 +273,7 @@ scrape_configs: Proxy can be configured with the following optional settings: +* `proxy_authorization` for generic token authorization. See [Prometheus docs for details on authorization section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) * `proxy_bearer_token` and `proxy_bearer_token_file` for Bearer token authorization * `proxy_basic_auth` for Basic authorization. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config). * `proxy_tls_config` for TLS config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config). @@ -702,6 +703,8 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -remoteWrite.urlRelabelConfig array Optional path to relabel config for the corresponding -remoteWrite.url Supports array of values separated by comma or specified via multiple flags. + -sortLabels + Whether to sort labels for incoming samples before writing them to all the configured remote storage systems. This may be needed for reducing memory usage at remote storage when the order of labels in incoming samples is random. For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}Enabled sorting for labels can slow down ingestion performance a bit -tls Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set -tlsCertFile string diff --git a/app/vmagent/main.go b/app/vmagent/main.go index 7d7635a10..43552cf94 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -145,6 +145,9 @@ func main() { func requestHandler(w http.ResponseWriter, r *http.Request) bool { if r.URL.Path == "/" { + if r.Method != "GET" { + return false + } fmt.Fprintf(w, "vmagent - see docs at https://victoriametrics.github.io/vmagent.html") return true } diff --git a/app/vmagent/remotewrite/client.go b/app/vmagent/remotewrite/client.go index f56ba3a8a..580d2625e 100644 --- a/app/vmagent/remotewrite/client.go +++ b/app/vmagent/remotewrite/client.go @@ -160,7 +160,7 @@ func getTLSConfig(argIdx int) (*tls.Config, error) { if c.CAFile == "" && c.CertFile == "" && c.KeyFile == "" && c.ServerName == "" && !c.InsecureSkipVerify { return nil, nil } - cfg, err := promauth.NewConfig(".", nil, "", "", c) + cfg, err := promauth.NewConfig(".", nil, nil, "", "", c) if err != nil { return nil, fmt.Errorf("cannot populate TLS config: %w", err) } diff --git a/app/vmagent/remotewrite/pendingseries.go b/app/vmagent/remotewrite/pendingseries.go index bcab93fb1..2d6cf12c0 100644 --- a/app/vmagent/remotewrite/pendingseries.go +++ b/app/vmagent/remotewrite/pendingseries.go @@ -27,6 +27,9 @@ var ( // the maximum number of rows to send per each block. const maxRowsPerBlock = 10000 +// the maximum number of labels to send per each block. +const maxLabelsPerBlock = 40000 + type pendingSeries struct { mu sync.Mutex wr writeRequest @@ -125,6 +128,7 @@ func (wr *writeRequest) reset() { } func (wr *writeRequest) flush() { + sortLabelsIfNeeded(wr.tss) wr.wr.Timeseries = wr.tss wr.adjustSampleValues() atomic.StoreUint64(&wr.lastFlushTime, fasttime.UnixTimestamp()) @@ -153,7 +157,7 @@ func (wr *writeRequest) push(src []prompbmarshal.TimeSeries) { for i := range src { tssDst = append(tssDst, prompbmarshal.TimeSeries{}) wr.copyTimeSeries(&tssDst[len(tssDst)-1], &src[i]) - if len(wr.samples) >= maxRowsPerBlock { + if len(wr.samples) >= maxRowsPerBlock || len(wr.labels) >= maxLabelsPerBlock { wr.tss = tssDst wr.flush() tssDst = wr.tss diff --git a/app/vmagent/remotewrite/remotewrite.go b/app/vmagent/remotewrite/remotewrite.go index 3a551ed0e..7961cb560 100644 --- a/app/vmagent/remotewrite/remotewrite.go +++ b/app/vmagent/remotewrite/remotewrite.go @@ -151,11 +151,13 @@ func Push(wr *prompbmarshal.WriteRequest) { for len(tss) > 0 { // Process big tss in smaller blocks in order to reduce the maximum memory usage samplesCount := 0 + labelsCount := 0 i := 0 for i < len(tss) { samplesCount += len(tss[i].Samples) + labelsCount += len(tss[i].Labels) i++ - if samplesCount > maxRowsPerBlock { + if samplesCount >= maxRowsPerBlock || labelsCount >= maxLabelsPerBlock { break } } @@ -208,7 +210,13 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL string, maxInmemoryBlocks int, c := newClient(argIdx, remoteWriteURL, sanitizedURL, fq, *queues) sf := significantFigures.GetOptionalArgOrDefault(argIdx, 0) rd := roundDigits.GetOptionalArgOrDefault(argIdx, 100) - pss := make([]*pendingSeries, *queues) + pssLen := *queues + if n := cgroup.AvailableCPUs(); pssLen > n { + // There is no sense in running more than availableCPUs concurrent pendingSeries, + // since every pendingSeries can saturate up to a single CPU. + pssLen = n + } + pss := make([]*pendingSeries, pssLen) for i := range pss { pss[i] = newPendingSeries(fq.MustWriteBlock, sf, rd) } diff --git a/app/vmagent/remotewrite/sort_labels.go b/app/vmagent/remotewrite/sort_labels.go new file mode 100644 index 000000000..e9ec252bd --- /dev/null +++ b/app/vmagent/remotewrite/sort_labels.go @@ -0,0 +1,51 @@ +package remotewrite + +import ( + "flag" + "sort" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" +) + +var sortLabels = flag.Bool("sortLabels", false, `Whether to sort labels for incoming samples before writing them to all the configured remote storage systems. `+ + `This may be needed for reducing memory usage at remote storage when the order of labels in incoming samples is random. `+ + `For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}`+ + `Enabled sorting for labels can slow down ingestion performance a bit`) + +// sortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set. +func sortLabelsIfNeeded(tss []prompbmarshal.TimeSeries) { + if !*sortLabels { + return + } + // The slc is used for avoiding memory allocation when passing labels to sort.Sort. + slc := sortLabelsCtxPool.Get().(*sortLabelsCtx) + for i := range tss { + slc.labels = tss[i].Labels + sort.Sort(&slc.labels) + } + slc.labels = nil + sortLabelsCtxPool.Put(slc) +} + +type sortLabelsCtx struct { + labels sortedLabels +} + +var sortLabelsCtxPool = &sync.Pool{ + New: func() interface{} { + return &sortLabelsCtx{} + }, +} + +type sortedLabels []prompbmarshal.Label + +func (sl *sortedLabels) Len() int { return len(*sl) } +func (sl *sortedLabels) Less(i, j int) bool { + a := *sl + return a[i].Name < a[j].Name +} +func (sl *sortedLabels) Swap(i, j int) { + a := *sl + a[i], a[j] = a[j], a[i] +} diff --git a/app/vmalert/README.md b/app/vmalert/README.md index e075f27db..2f3daa046 100644 --- a/app/vmalert/README.md +++ b/app/vmalert/README.md @@ -1,10 +1,10 @@ -## vmalert +# vmalert `vmalert` executes a list of given [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules against configured address. -### Features: +## Features * Integration with [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) TSDB; * VictoriaMetrics [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) support and expressions validation; @@ -15,7 +15,7 @@ rules against configured address. * Graphite datasource can be used for alerting and recording rules. See [these docs](#graphite) for details. * Lightweight without extra dependencies. -### Limitations: +## Limitations * `vmalert` execute queries against remote datasource which has reliability risks because of network. It is recommended to configure alerts thresholds and rules expressions with understanding that network request may fail; @@ -24,7 +24,7 @@ storage is asynchronous. Hence, user shouldn't rely on recording rules chaining recording rule is reused in next one; * `vmalert` has no UI, just an API for getting groups and rules statuses. -### QuickStart +## QuickStart To build `vmalert` from sources: ``` @@ -67,7 +67,7 @@ groups: [ - ] ``` -#### Groups +### Groups Each group has following attributes: ```yaml @@ -89,7 +89,7 @@ rules: [ - ... ] ``` -#### Rules +### Rules There are two types of Rules: * [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - @@ -102,7 +102,7 @@ and save their result as a new set of time series. `vmalert` forbids to define duplicates - rules with the same combination of name, expression and labels within one group. -##### Alerting rules +#### Alerting rules The syntax for alerting rule is following: ```yaml @@ -131,7 +131,7 @@ annotations: [ : ] ``` -##### Recording rules +#### Recording rules The syntax for recording rules is following: ```yaml @@ -155,7 +155,7 @@ labels: For recording rules to work `-remoteWrite.url` must specified. -#### Alerts state on restarts +### Alerts state on restarts `vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after reloading of `vmalert` the process alerts state will be lost. To avoid this situation, `vmalert` should be configured via the following flags: @@ -171,7 +171,7 @@ in configured `-remoteRead.url`, weren't updated in the last `1h` or received st rules configuration. -#### WEB +### WEB `vmalert` runs a web-server (`-httpListenAddr`) for serving metrics and alerts endpoints: * `http:///api/v1/groups` - list of all loaded groups and rules; @@ -182,7 +182,7 @@ Used as alert source in AlertManager. * `http:///-/reload` - hot configuration reload. -### Graphite +## Graphite vmalert sends requests to `<-datasource.url>/render?format=json` during evaluation of alerting and recording rules if the corresponding group or rule contains `type: "graphite"` config option. It is expected that the `<-datasource.url>/render` @@ -191,7 +191,7 @@ When using vmalert with both `graphite` and `prometheus` rules configured agains to set `-datasource.appendTypePrefix` flag to `true`, so vmalert can adjust URL prefix automatically based on query type. -### Configuration +## Configuration The shortlist of configuration flags is the following: ``` @@ -375,43 +375,43 @@ command-line flags with their descriptions. To reload configuration without `vmalert` restart send SIGHUP signal or send GET request to `/-/reload` endpoint. -### Contributing +## Contributing `vmalert` is mostly designed and built by VictoriaMetrics community. Feel free to share your experience and ideas for improving this software. Please keep simplicity as the main priority. -### How to build from sources +## How to build from sources It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmalert` is located in `vmutils-*` archives there. -#### Development build +### Development build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.15. 2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). It builds `vmalert` binary and puts it into the `bin` folder. -#### Production build +### Production build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmalert-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). It builds `vmalert-prod` binary and puts it into the `bin` folder. -#### ARM build +### ARM build ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/). -#### Development ARM build +### Development ARM build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.15. 2. Run `make vmalert-arm` or `make vmalert-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). It builds `vmalert-arm` or `vmalert-arm64` binary respectively and puts it into the `bin` folder. -#### Production ARM build +### Production ARM build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmalert-arm-prod` or `make vmalert-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). diff --git a/app/vmalert/web.go b/app/vmalert/web.go index a095356d9..8fc71359e 100644 --- a/app/vmalert/web.go +++ b/app/vmalert/web.go @@ -29,6 +29,9 @@ var pathList = [][]string{ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool { switch r.URL.Path { case "/": + if r.Method != "GET" { + return false + } for _, path := range pathList { p, doc := path[0], path[1] fmt.Fprintf(w, "%q - %s
", p, p, doc) diff --git a/app/vmauth/README.md b/app/vmauth/README.md index 6cb3fc6bf..a241af857 100644 --- a/app/vmauth/README.md +++ b/app/vmauth/README.md @@ -1,4 +1,4 @@ -## vmauth +# vmauth `vmauth` is a simple auth proxy and router for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics). It reads username and password from [Basic Auth headers](https://en.wikipedia.org/wiki/Basic_access_authentication), @@ -23,7 +23,8 @@ Docker images for `vmauth` are available [here](https://hub.docker.com/r/victori Pass `-help` to `vmauth` in order to see all the supported command-line flags with their descriptions. -Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML, accounting, limits, etc. +Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML, +accounting and rate limiting such as [vmgateway](https://victoriametrics.github.io/vmgateway.html). ## Auth config @@ -36,11 +37,15 @@ Auth config is represented in the following simple `yml` format: # Usernames must be unique. users: + # Requests with the 'Authorization: Bearer XXXX' header are proxied to http://localhost:8428 . + # For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query +- bearer_token: "XXXX" + url_prefix: "http://localhost:8428" # The user for querying local single-node VictoriaMetrics. # All the requests to http://vmauth:8427 with the given Basic Auth (username:password) - # will be routed to http://localhost:8428 . - # For example, http://vmauth:8427/api/v1/query is routed to http://localhost:8428/api/v1/query + # will be proxied to http://localhost:8428 . + # For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query - username: "local-single-node" password: "***" url_prefix: "http://localhost:8428" @@ -48,8 +53,8 @@ users: # The user for querying account 123 in VictoriaMetrics cluster # See https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#url-format # All the requests to http://vmauth:8427 with the given Basic Auth (username:password) - # will be routed to http://vmselect:8481/select/123/prometheus . - # For example, http://vmauth:8427/api/v1/query is routed to http://vmselect:8481/select/123/prometheus/api/v1/select + # will be proxied to http://vmselect:8481/select/123/prometheus . + # For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect:8481/select/123/prometheus/api/v1/select - username: "cluster-select-account-123" password: "***" url_prefix: "http://vmselect:8481/select/123/prometheus" @@ -57,8 +62,8 @@ users: # The user for inserting Prometheus data into VictoriaMetrics cluster under account 42 # See https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#url-format # All the requests to http://vmauth:8427 with the given Basic Auth (username:password) - # will be routed to http://vminsert:8480/insert/42/prometheus . - # For example, http://vmauth:8427/api/v1/write is routed to http://vminsert:8480/insert/42/prometheus/api/v1/write + # will be proxied to http://vminsert:8480/insert/42/prometheus . + # For example, http://vmauth:8427/api/v1/write is proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write - username: "cluster-insert-account-42" password: "***" url_prefix: "http://vminsert:8480/insert/42/prometheus" @@ -66,9 +71,9 @@ users: # A single user for querying and inserting data: # - Requests to http://vmauth:8427/api/v1/query, http://vmauth:8427/api/v1/query_range - # and http://vmauth:8427/api/v1/label//values are routed to http://vmselect:8481/select/42/prometheus. - # For example, http://vmauth:8427/api/v1/query is routed to http://vmselect:8480/select/42/prometheus/api/v1/query - # - Requests to http://vmauth:8427/api/v1/write are routed to http://vminsert:8480/insert/42/prometheus/api/v1/write + # and http://vmauth:8427/api/v1/label//values are proxied to http://vmselect:8481/select/42/prometheus. + # For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect:8480/select/42/prometheus/api/v1/query + # - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write - username: "foobar" url_map: - src_paths: ["/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^/]+/values"] diff --git a/app/vmauth/auth_config.go b/app/vmauth/auth_config.go index dc8c1da2f..139056ffd 100644 --- a/app/vmauth/auth_config.go +++ b/app/vmauth/auth_config.go @@ -1,6 +1,7 @@ package main import ( + "encoding/base64" "flag" "fmt" "io/ioutil" @@ -29,10 +30,11 @@ type AuthConfig struct { // UserInfo is user information read from authConfigPath type UserInfo struct { - Username string `yaml:"username"` - Password string `yaml:"password"` - URLPrefix string `yaml:"url_prefix"` - URLMap []URLMap `yaml:"url_map"` + BearerToken string `yaml:"bearer_token"` + Username string `yaml:"username"` + Password string `yaml:"password"` + URLPrefix string `yaml:"url_prefix"` + URLMap []URLMap `yaml:"url_map"` requests *metrics.Counter } @@ -150,12 +152,27 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) { if len(uis) == 0 { return nil, fmt.Errorf("`users` section cannot be empty in AuthConfig") } - m := make(map[string]*UserInfo, len(uis)) + byAuthToken := make(map[string]*UserInfo, len(uis)) + byUsername := make(map[string]bool, len(uis)) + byBearerToken := make(map[string]bool, len(uis)) for i := range uis { ui := &uis[i] - if m[ui.Username] != nil { + if ui.BearerToken == "" && ui.Username == "" { + return nil, fmt.Errorf("either bearer_token or username must be set") + } + if ui.BearerToken != "" && ui.Username != "" { + return nil, fmt.Errorf("bearer_token=%q and username=%q cannot be set simultaneously", ui.BearerToken, ui.Username) + } + if byBearerToken[ui.BearerToken] { + return nil, fmt.Errorf("duplicate bearer_token found; bearer_token: %q", ui.BearerToken) + } + if byUsername[ui.Username] { return nil, fmt.Errorf("duplicate username found; username: %q", ui.Username) } + authToken := getAuthToken(ui.BearerToken, ui.Username, ui.Password) + if byAuthToken[authToken] != nil { + return nil, fmt.Errorf("duplicate auth token found for bearer_token=%q, username=%q: %q", authToken, ui.BearerToken, ui.Username) + } if len(ui.URLPrefix) > 0 { urlPrefix, err := sanitizeURLPrefix(ui.URLPrefix) if err != nil { @@ -176,10 +193,29 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) { if len(ui.URLMap) == 0 && len(ui.URLPrefix) == 0 { return nil, fmt.Errorf("missing `url_prefix`") } - ui.requests = metrics.GetOrCreateCounter(fmt.Sprintf(`vmauth_user_requests_total{username=%q}`, ui.Username)) - m[ui.Username] = ui + if ui.BearerToken != "" { + if ui.Password != "" { + return nil, fmt.Errorf("password shouldn't be set for bearer_token %q", ui.BearerToken) + } + ui.requests = metrics.GetOrCreateCounter(`vmauth_user_requests_total{username="bearer_token"}`) + byBearerToken[ui.BearerToken] = true + } + if ui.Username != "" { + ui.requests = metrics.GetOrCreateCounter(fmt.Sprintf(`vmauth_user_requests_total{username=%q}`, ui.Username)) + byUsername[ui.Username] = true + } + byAuthToken[authToken] = ui } - return m, nil + return byAuthToken, nil +} + +func getAuthToken(bearerToken, username, password string) string { + if bearerToken != "" { + return "Bearer " + bearerToken + } + token := username + ":" + password + token64 := base64.StdEncoding.EncodeToString([]byte(token)) + return "Basic " + token64 } func sanitizeURLPrefix(urlPrefix string) (string, error) { diff --git a/app/vmauth/auth_config_test.go b/app/vmauth/auth_config_test.go index 0033b2b42..759136e5f 100644 --- a/app/vmauth/auth_config_test.go +++ b/app/vmauth/auth_config_test.go @@ -56,6 +56,22 @@ users: url_prefix: http:///bar `) + // Username and bearer_token in a single config + f(` +users: +- username: foo + bearer_token: bbb + url_prefix: http://foo.bar +`) + + // Bearer_token and password in a single config + f(` +users: +- password: foo + bearer_token: bbb + url_prefix: http://foo.bar +`) + // Duplicate users f(` users: @@ -67,6 +83,17 @@ users: url_prefix: https://sss.sss `) + // Duplicate bearer_tokens + f(` +users: +- bearer_token: foo + url_prefix: http://foo.bar +- username: bar + url_prefix: http://xxx.yyy +- bearer_token: foo + url_prefix: https://sss.sss +`) + // Missing url_prefix in url_map f(` users: @@ -113,7 +140,7 @@ users: password: bar url_prefix: http://aaa:343/bbb `, map[string]*UserInfo{ - "foo": { + getAuthToken("", "foo", "bar"): { Username: "foo", Password: "bar", URLPrefix: "http://aaa:343/bbb", @@ -128,11 +155,11 @@ users: - username: bar url_prefix: https://bar/x/// `, map[string]*UserInfo{ - "foo": { + getAuthToken("", "foo", ""): { Username: "foo", URLPrefix: "http://foo", }, - "bar": { + getAuthToken("", "bar", ""): { Username: "bar", URLPrefix: "https://bar/x", }, @@ -141,15 +168,15 @@ users: // non-empty URLMap f(` users: -- username: foo +- bearer_token: foo url_map: - src_paths: ["/api/v1/query","/api/v1/query_range","/api/v1/label/[^./]+/.+"] url_prefix: http://vmselect/select/0/prometheus - src_paths: ["/api/v1/write"] url_prefix: http://vminsert/insert/0/prometheus `, map[string]*UserInfo{ - "foo": { - Username: "foo", + getAuthToken("foo", "", ""): { + BearerToken: "foo", URLMap: []URLMap{ { SrcPaths: getSrcPaths([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}), diff --git a/app/vmauth/main.go b/app/vmauth/main.go index 282467347..23c22d282 100644 --- a/app/vmauth/main.go +++ b/app/vmauth/main.go @@ -47,16 +47,16 @@ func main() { } func requestHandler(w http.ResponseWriter, r *http.Request) bool { - username, password, ok := r.BasicAuth() - if !ok { + authToken := r.Header.Get("Authorization") + if authToken == "" { w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) - http.Error(w, "missing `Authorization: Basic *` header", http.StatusUnauthorized) + http.Error(w, "missing `Authorization` request header", http.StatusUnauthorized) return true } ac := authConfig.Load().(map[string]*UserInfo) - ui := ac[username] - if ui == nil || ui.Password != password { - httpserver.Errorf(w, r, "cannot find the provided username %q or password in config", username) + ui := ac[authToken] + if ui == nil { + httpserver.Errorf(w, r, "cannot find the provided auth token %q in config", authToken) return true } ui.requests.Inc() diff --git a/app/vmbackup/README.md b/app/vmbackup/README.md index 811d71fae..6f9e4a95f 100644 --- a/app/vmbackup/README.md +++ b/app/vmbackup/README.md @@ -1,4 +1,4 @@ -## vmbackup +# vmbackup `vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://victoriametrics.github.io/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots). diff --git a/app/vmctl/README.md b/app/vmctl/README.md index 61813ab52..f26acba85 100644 --- a/app/vmctl/README.md +++ b/app/vmctl/README.md @@ -9,33 +9,6 @@ Features: - [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics - [ ] Storage Management: data re-balancing between nodes -# Table of contents - -* [Articles](#articles) -* [How to build](#how-to-build) -* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x) - * [Data mapping](#data-mapping) - * [Configuration](#configuration) - * [Filtering](#filtering) -* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x) -* [Migrating data from Prometheus](#migrating-data-from-prometheus) - * [Data mapping](#data-mapping-1) - * [Configuration](#configuration-1) - * [Filtering](#filtering-1) -* [Migrating data from Thanos](#migrating-data-from-thanos) - * [Current data](#current-data) - * [Historical data](#historical-data) -* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics) - * [Native protocol](#native-protocol) -* [Tuning](#tuning) - * [Influx mode](#influx-mode) - * [Prometheus mode](#prometheus-mode) - * [VictoriaMetrics importer](#victoriametrics-importer) - * [Importer stats](#importer-stats) -* [Significant figures](#significant-figures) -* [Adding extra labels](#adding-extra-labels) - - ## Articles * [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043) diff --git a/app/vmgateway/README.md b/app/vmgateway/README.md new file mode 100644 index 000000000..1a599230a --- /dev/null +++ b/app/vmgateway/README.md @@ -0,0 +1,287 @@ +# vmgateway + + +vmgateway + +`vmgateway` is a proxy for Victoria Metrics TSDB. It provides the following features: + +* Rate Limiter + * Based on cluster tenants' utilization supports multiple time interval limits for ingestion/retrieving metrics +* Token Access Control + * Supports additional per-label access control for Single and Cluster versions of Victoria Metrics TSDB + * Provides access by tenantID at Cluster version + * Allows to separate write/read/admin access to data + +`vmgateway` is included in an [enterprise package](https://victoriametrics.com/enterprise.html). + + +## Access Control + +vmgateway-ac + +`vmgateway` supports jwt based authentication. With jwt payload can be configured access to specific tenant, labels, read/write. + +jwt token must be in following format: +```json +{ + "exp": 1617304574, + "vm_access": { + "tenant_id": { + "account_id": 1, + "project_id": 5 + }, + "extra_labels": { + "team": "dev", + "project": "mobile" + }, + "mode": 1 + } +} +``` +Where: +- `exp` - required, expire time in unix_timestamp. If token expires, `vmgateway` rejects request. +- `vm_access` - required, dict with claim info, minimum form: `{"vm_access": {"tenand_id": {}}` +- `tenant_id` - optional, make sense only for cluster mode, routes request to corresponding tenant. +- `extra_labels` - optional, key-value pairs for label filters - added to ingested or selected metrics. +- `mode` - optional, access mode for api - read, write, full. supported values: 0 - full (default value), 1 - read, 2 - write. + +## QuickStart + +Start single version of Victoria Metrics + +```bash +# single +# start node +./bin/victoria-metrics --selfScrapeInterval=10s +``` + +Start vmgateway + +```bash +./bin/vmgateway -eula -enable.auth -read.url http://localhost:8428 --write.url http://localhost:8428 +``` + +Retrieve data from database +```bash +curl 'http://localhost:8431/api/v1/series/count' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ2bV9hY2Nlc3MiOnsidGVuYW50X2lkIjp7fSwicm9sZSI6MX0sImV4cCI6MTkzOTM0NjIxMH0.5WUxEfdcV9hKo4CtQdtuZYOGpGXWwaqM9VuVivMMrVg' +``` + + Request with incorrect token or with out token will be rejected: +```bash +curl 'http://localhost:8431/api/v1/series/count' + +curl 'http://localhost:8431/api/v1/series/count' -H 'Authorization: Bearer incorrect-token' +``` + + +## Rate Limiter + +vmgateway-rl + + Limits incoming requests by given pre-configured limits. It supports read and write limiting by a tenant. + + `vmgateway` needs datasource for rate limits queries. It can be single-node or cluster version of `victoria-metrics`. +It must have metrics scrapped from cluster, that you want to rate limit. + +List of supported limit types: +- `queries` - count of api requests made at tenant to read api, such as `/api/v1/query`, `/api/v1/series` and others. +- `active_series` - count of current active series at given tenant. +- `new_series` - count of created series aka churn rate +- `rows_inserted` - count of inserted rows per tenant. + +List of supported time windows: +- `minute` +- `hour` + +Limits can be specified per tenant or at global level, if you omit `project_id` and `account_id`. + +Example of configuration file: + +```yaml +limits: + - type: queries + value: 1000 + resolution: minute + - type: queries + value: 10000 + resolution: hour + - type: queries + value: 10 + resolution: minute + project_id: 5 + account_id: 1 +``` + +## QuickStart + + cluster version required for rate limiting. +```bash +# start datasource for cluster metrics + +cat << EOF > cluster.yaml +scrape_configs: + - job_name: cluster + scrape_interval: 5s + static_configs: + - targets: ['127.0.0.1:8481','127.0.0.1:8482','127.0.0.1:8480'] +EOF + +./bin/victoria-metrics --promscrape.config cluster.yaml + +# start cluster + +# start vmstorage, vmselect and vminsert +./bin/vmstorage -eula +./bin/vmselect -eula -storageNode 127.0.0.1:8401 +./bin/vminsert -eula -storageNode 127.0.0.1:8400 + +# create base rate limitng config: +cat << EOF > limit.yaml +limits: + - type: queries + value: 100 + - type: rows_inserted + value: 100000 + - type: new_series + value: 1000 + - type: active_series + value: 100000 + - type: queries + value: 1 + account_id: 15 +EOF + +# start gateway with clusterMoe +./bin/vmgateway -eula -enable.rateLimit -ratelimit.config limit.yaml -datasource.url http://localhost:8428 -enable.auth -clusterMode -write.url=http://localhost:8480 --read.url=http://localhost:8481 + +# ingest simple metric to tenant 1:5 +curl 'http://localhost:8431/api/v1/import/prometheus' -X POST -d 'foo{bar="baz1"} 123' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MjAxNjIwMDAwMDAsInZtX2FjY2VzcyI6eyJ0ZW5hbnRfaWQiOnsiYWNjb3VudF9pZCI6MTV9fX0.PB1_KXDKPUp-40pxOGk6lt_jt9Yq80PIMpWVJqSForQ' +# read metric from tenant 1:5 +curl 'http://localhost:8431/api/v1/labels' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MjAxNjIwMDAwMDAsInZtX2FjY2VzcyI6eyJ0ZW5hbnRfaWQiOnsiYWNjb3VudF9pZCI6MTV9fX0.PB1_KXDKPUp-40pxOGk6lt_jt9Yq80PIMpWVJqSForQ' + +# check rate limit +``` + +## Configuration + +The shortlist of configuration flags is the following: +```bash + -clusterMode + enable it for cluster version + -datasource.appendTypePrefix + Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to VMSelect URL. + -datasource.basicAuth.password string + Optional basic auth password for -datasource.url + -datasource.basicAuth.username string + Optional basic auth username for -datasource.url + -datasource.lookback duration + Lookback defines how far to look into past when evaluating queries. For example, if datasource.lookback=5m then param "time" with value now()-5m will be added to every query. + -datasource.maxIdleConnections int + Defines the number of idle (keep-alive connections) to configured datasource.Consider to set this value equal to the value: groups_total * group.concurrency. Too low value may result into high number of sockets in TIME_WAIT state. (default 100) + -datasource.queryStep duration + queryStep defines how far a value can fallback to when evaluating queries. For example, if datasource.queryStep=15s then param "step" with value "15s" will be added to every query. + -datasource.tlsCAFile string + Optional path to TLS CA file to use for verifying connections to -datasource.url. By default system CA is used + -datasource.tlsCertFile string + Optional path to client-side TLS certificate file to use when connecting to -datasource.url + -datasource.tlsInsecureSkipVerify + Whether to skip tls verification when connecting to -datasource.url + -datasource.tlsKeyFile string + Optional path to client-side TLS certificate key to use when connecting to -datasource.url + -datasource.tlsServerName string + Optional TLS server name to use for connections to -datasource.url. By default the server name from -datasource.url is used + -datasource.url string + Victoria Metrics or VMSelect url. Required parameter. E.g. http://127.0.0.1:8428 + -enable.auth + enables auth with jwt token + -enable.rateLimit + enables rate limiter + -enableTCP6 + Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used + -envflag.enable + Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isnt set + -envflag.prefix string + Prefix for environment variables if -envflag.enable is set + -eula + By specifying this flag you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf + -fs.disableMmap + Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() + -http.connTimeout duration + Incoming http connections are closed after the configured timeout. This may help spreading incoming load among a cluster of services behind load balancer. Note that the real timeout may be bigger by up to 10% as a protection from Thundering herd problem (default 2m0s) + -http.disableResponseCompression + Disable compression of HTTP responses for saving CPU resources. By default compression is enabled to save network bandwidth + -http.idleConnTimeout duration + Timeout for incoming idle http connections (default 1m0s) + -http.maxGracefulShutdownDuration duration + The maximum duration for graceful shutdown of HTTP server. Highly loaded server may require increased value for graceful shutdown (default 7s) + -http.pathPrefix string + An optional prefix to add to all the paths handled by http server. For example, if '-http.pathPrefix=/foo/bar' is set, then all the http requests will be handled on '/foo/bar/*' paths. This may be useful for proxied requests. See https://www.robustperception.io/using-external-urls-and-proxies-with-prometheus + -http.shutdownDelay duration + Optional delay before http server shutdown. During this dealy the servier returns non-OK responses from /health page, so load balancers can route new requests to other servers + -httpAuth.password string + Password for HTTP Basic Auth. The authentication is disabled if -httpAuth.username is empty + -httpAuth.username string + Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password + -httpListenAddr string + TCP address to listen for http connections (default ":8431") + -loggerDisableTimestamps + Whether to disable writing timestamps in logs + -loggerErrorsPerSecondLimit int + Per-second limit on the number of ERROR messages. If more than the given number of errors are emitted per second, then the remaining errors are suppressed. Zero value disables the rate limit + -loggerFormat string + Format for logs. Possible values: default, json (default "default") + -loggerLevel string + Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO") + -loggerOutput string + Output for the logs. Supported values: stderr, stdout (default "stderr") + -loggerTimezone string + Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") + -loggerWarnsPerSecondLimit int + Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit + -memory.allowedBytes size + Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) + -memory.allowedPercent float + Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) + -metricsAuthKey string + Auth key for /metrics. It overrides httpAuth settings + -pprofAuthKey string + Auth key for /debug/pprof. It overrides httpAuth settings + -ratelimit.config string + path for configuration file + -ratelimit.extraLabels array + additional labels, that will be applied to fetchdata from datasource + Supports array of values separated by comma or specified via multiple flags. + -ratelimit.refreshInterval duration + (default 5s) + -read.url string + read access url address, example: http://vmselect:8481 + -tls + Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set + -tlsCertFile string + Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs, since RSA certs are slow + -tlsKeyFile string + Path to file with TLS key. Used only if -tls is set + -version + Show VictoriaMetrics version + -write.url string + write access url address, example: http://vminsert:8480 + +``` + +## TroubleShooting + +* Access control: + * incorrect `jwt` format, try https://jwt.io/#debugger-io with our tokens + * expired token, check `exp` field. +* Rate Limiting: + * `scrape_interval` at datasource, reduce it to apply limits faster. + + +## Limitations + +* Access Control: + * `jwt` token must be validated by external system, currently `vmgateway` can't validate the signature. +* RateLimiting: + * limits applied based on queries to `datasource.url` + * only cluster version can be rate-limited. diff --git a/app/vmgateway/vmgateway-access-control.jpg b/app/vmgateway/vmgateway-access-control.jpg new file mode 100644 index 000000000..91988329a Binary files /dev/null and b/app/vmgateway/vmgateway-access-control.jpg differ diff --git a/app/vmgateway/vmgateway-overview.jpeg b/app/vmgateway/vmgateway-overview.jpeg new file mode 100644 index 000000000..adb30aa59 Binary files /dev/null and b/app/vmgateway/vmgateway-overview.jpeg differ diff --git a/app/vmgateway/vmgateway-rate-limiting.jpg b/app/vmgateway/vmgateway-rate-limiting.jpg new file mode 100644 index 000000000..2849a0094 Binary files /dev/null and b/app/vmgateway/vmgateway-rate-limiting.jpg differ diff --git a/app/vmgateway/vmgateway.png b/app/vmgateway/vmgateway.png new file mode 100644 index 000000000..79b8579b7 Binary files /dev/null and b/app/vmgateway/vmgateway.png differ diff --git a/app/vminsert/common/insert_ctx.go b/app/vminsert/common/insert_ctx.go index ce6ff849f..e62cf267c 100644 --- a/app/vminsert/common/insert_ctx.go +++ b/app/vminsert/common/insert_ctx.go @@ -14,7 +14,7 @@ import ( // InsertCtx contains common bits for data points insertion. type InsertCtx struct { - Labels []prompb.Label + Labels sortedLabels mrs []storage.MetricRow metricNamesBuf []byte diff --git a/app/vminsert/common/sort_labels.go b/app/vminsert/common/sort_labels.go new file mode 100644 index 000000000..16fa88cc0 --- /dev/null +++ b/app/vminsert/common/sort_labels.go @@ -0,0 +1,32 @@ +package common + +import ( + "flag" + "sort" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" +) + +var sortLabels = flag.Bool("sortLabels", false, `Whether to sort labels for incoming samples before writing them to storage. `+ + `This may be needed for reducing memory usage at storage when the order of labels in incoming samples is random. `+ + `For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}. `+ + `Enabled sorting for labels can slow down ingestion performance a bit`) + +// SortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set +func (ctx *InsertCtx) SortLabelsIfNeeded() { + if *sortLabels { + sort.Sort(&ctx.Labels) + } +} + +type sortedLabels []prompb.Label + +func (sl *sortedLabels) Len() int { return len(*sl) } +func (sl *sortedLabels) Less(i, j int) bool { + a := *sl + return string(a[i].Name) < string(a[j].Name) +} +func (sl *sortedLabels) Swap(i, j int) { + a := *sl + a[i], a[j] = a[j], a[i] +} diff --git a/app/vminsert/csvimport/request_handler.go b/app/vminsert/csvimport/request_handler.go index fc858936e..6997dd4cb 100644 --- a/app/vminsert/csvimport/request_handler.go +++ b/app/vminsert/csvimport/request_handler.go @@ -55,6 +55,7 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error { // Skip metric without labels. continue } + ctx.SortLabelsIfNeeded() if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil { return err } diff --git a/app/vminsert/graphite/request_handler.go b/app/vminsert/graphite/request_handler.go index 84532485c..5aa24f929 100644 --- a/app/vminsert/graphite/request_handler.go +++ b/app/vminsert/graphite/request_handler.go @@ -45,6 +45,7 @@ func insertRows(rows []parser.Row) error { // Skip metric without labels. continue } + ctx.SortLabelsIfNeeded() if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil { return err } diff --git a/app/vminsert/influx/request_handler.go b/app/vminsert/influx/request_handler.go index 784c2c3ec..f1c81bf80 100644 --- a/app/vminsert/influx/request_handler.go +++ b/app/vminsert/influx/request_handler.go @@ -117,11 +117,13 @@ func insertRows(db string, rows []parser.Row, extraLabels []prompbmarshal.Label) // Skip metric without labels. continue } + ic.SortLabelsIfNeeded() if err := ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, f.Value); err != nil { return err } } } else { + ic.SortLabelsIfNeeded() ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels) labelsLen := len(ic.Labels) for j := range r.Fields { diff --git a/app/vminsert/native/request_handler.go b/app/vminsert/native/request_handler.go index cb36025dc..78cbe2d8e 100644 --- a/app/vminsert/native/request_handler.go +++ b/app/vminsert/native/request_handler.go @@ -65,6 +65,7 @@ func insertRows(block *parser.Block, extraLabels []prompbmarshal.Label) error { // Skip metric without labels. return nil } + ic.SortLabelsIfNeeded() ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels) values := block.Values timestamps := block.Timestamps diff --git a/app/vminsert/opentsdb/request_handler.go b/app/vminsert/opentsdb/request_handler.go index 852708862..49a6157a1 100644 --- a/app/vminsert/opentsdb/request_handler.go +++ b/app/vminsert/opentsdb/request_handler.go @@ -45,6 +45,7 @@ func insertRows(rows []parser.Row) error { // Skip metric without labels. continue } + ctx.SortLabelsIfNeeded() if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil { return err } diff --git a/app/vminsert/opentsdbhttp/request_handler.go b/app/vminsert/opentsdbhttp/request_handler.go index 83fc33729..b5927ecc7 100644 --- a/app/vminsert/opentsdbhttp/request_handler.go +++ b/app/vminsert/opentsdbhttp/request_handler.go @@ -63,6 +63,7 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error { // Skip metric without labels. continue } + ctx.SortLabelsIfNeeded() if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil { return err } diff --git a/app/vminsert/prometheusimport/request_handler.go b/app/vminsert/prometheusimport/request_handler.go index cc916e8b5..aa65b7da5 100644 --- a/app/vminsert/prometheusimport/request_handler.go +++ b/app/vminsert/prometheusimport/request_handler.go @@ -60,6 +60,7 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error { // Skip metric without labels. continue } + ctx.SortLabelsIfNeeded() if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil { return err } diff --git a/app/vminsert/prompush/push.go b/app/vminsert/prompush/push.go index 1c6ebe0d7..40d495971 100644 --- a/app/vminsert/prompush/push.go +++ b/app/vminsert/prompush/push.go @@ -62,6 +62,7 @@ func push(ctx *common.InsertCtx, tss []prompbmarshal.TimeSeries) { // Skip metric without labels. continue } + ctx.SortLabelsIfNeeded() var metricNameRaw []byte var err error for i := range ts.Samples { diff --git a/app/vminsert/promremotewrite/request_handler.go b/app/vminsert/promremotewrite/request_handler.go index 15f3895ed..e56f12e25 100644 --- a/app/vminsert/promremotewrite/request_handler.go +++ b/app/vminsert/promremotewrite/request_handler.go @@ -61,6 +61,7 @@ func insertRows(timeseries []prompb.TimeSeries, extraLabels []prompbmarshal.Labe // Skip metric without labels. continue } + ctx.SortLabelsIfNeeded() var metricNameRaw []byte var err error samples := ts.Samples diff --git a/app/vminsert/vmimport/request_handler.go b/app/vminsert/vmimport/request_handler.go index 64002b4d4..5b4332ac9 100644 --- a/app/vminsert/vmimport/request_handler.go +++ b/app/vminsert/vmimport/request_handler.go @@ -67,6 +67,7 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error { // Skip metric without labels. continue } + ic.SortLabelsIfNeeded() ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels) values := r.Values timestamps := r.Timestamps diff --git a/app/vmrestore/README.md b/app/vmrestore/README.md index 4abd1bc9c..2f8f713cb 100644 --- a/app/vmrestore/README.md +++ b/app/vmrestore/README.md @@ -1,4 +1,4 @@ -## vmrestore +# vmrestore `vmrestore` restores data from backups created by [vmbackup](https://victoriametrics.github.io/vbackup.html). VictoriaMetrics `v1.29.0` and newer versions must be used for working with the restored data. diff --git a/app/vmselect/main.go b/app/vmselect/main.go index b8c26c872..cdf21b952 100644 --- a/app/vmselect/main.go +++ b/app/vmselect/main.go @@ -351,23 +351,29 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { } return true case "/api/v1/rules": - // Return dumb placeholder + // Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#rules rulesRequests.Inc() w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "%s", `{"status":"success","data":{"groups":[]}}`) return true case "/api/v1/alerts": - // Return dumb placehloder + // Return dumb placehloder for https://prometheus.io/docs/prometheus/latest/querying/api/#alerts alertsRequests.Inc() w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "%s", `{"status":"success","data":{"alerts":[]}}`) return true case "/api/v1/metadata": - // Return dumb placeholder + // Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#querying-metric-metadata metadataRequests.Inc() w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "%s", `{"status":"success","data":{}}`) return true + case "/api/v1/query_exemplars": + // Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#querying-exemplars + queryExemplarsRequests.Inc() + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "%s", `{"status":"success","data":null}`) + return true case "/api/v1/admin/tsdb/delete_series": deleteRequests.Inc() authKey := r.FormValue("authKey") @@ -490,7 +496,8 @@ var ( graphiteTagsDelSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/delSeries"}`) graphiteTagsDelSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/delSeries"}`) - rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`) - alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`) - metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/metadata"}`) + rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`) + alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`) + metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/metadata"}`) + queryExemplarsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/query_exemplars"}`) ) diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index 8ab587d4f..f89c5fb5b 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -610,7 +610,7 @@ var labelValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path=" // LabelsCountHandler processes /api/v1/labels/count request. func LabelsCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { - deadline := searchutils.GetDeadlineForQuery(r, startTime) + deadline := searchutils.GetDeadlineForStatusRequest(r, startTime) labelEntries, err := netstorage.GetLabelEntries(deadline) if err != nil { return fmt.Errorf(`cannot obtain label entries: %w`, err) @@ -634,7 +634,7 @@ const secsPerDay = 3600 * 24 // // See https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { - deadline := searchutils.GetDeadlineForQuery(r, startTime) + deadline := searchutils.GetDeadlineForStatusRequest(r, startTime) if err := r.ParseForm(); err != nil { return fmt.Errorf("cannot parse form values: %w", err) } @@ -810,7 +810,7 @@ var labelsDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/ // SeriesCountHandler processes /api/v1/series/count request. func SeriesCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { - deadline := searchutils.GetDeadlineForQuery(r, startTime) + deadline := searchutils.GetDeadlineForStatusRequest(r, startTime) n, err := netstorage.GetSeriesCount(deadline) if err != nil { return fmt.Errorf("cannot obtain series count: %w", err) diff --git a/app/vmselect/promql/aggr.go b/app/vmselect/promql/aggr.go index 5d6a71bf9..2555ad75f 100644 --- a/app/vmselect/promql/aggr.go +++ b/app/vmselect/promql/aggr.go @@ -618,7 +618,9 @@ func newAggrFuncTopK(isReverse bool) aggrFunc { }) fillNaNsAtIdx(n, ks[n], tss) } - return removeNaNs(tss) + tss = removeNaNs(tss) + reverseSeries(tss) + return tss } return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true) } @@ -683,7 +685,17 @@ func getRangeTopKTimeseries(tss []*timeseries, modifier *metricsql.ModifierExpr, if remainingSumTS != nil { tss = append(tss, remainingSumTS) } - return removeNaNs(tss) + tss = removeNaNs(tss) + reverseSeries(tss) + return tss +} + +func reverseSeries(tss []*timeseries) { + j := len(tss) + for i := 0; i < len(tss)/2; i++ { + j-- + tss[i], tss[j] = tss[j], tss[i] + } } func getRemainingSumTimeseries(tss []*timeseries, modifier *metricsql.ModifierExpr, ks []float64, remainingSumTagName string) *timeseries { @@ -693,8 +705,14 @@ func getRemainingSumTimeseries(tss []*timeseries, modifier *metricsql.ModifierEx var dst timeseries dst.CopyFromShallowTimestamps(tss[0]) removeGroupTags(&dst.MetricName, modifier) + tagValue := remainingSumTagName + n := strings.IndexByte(remainingSumTagName, '=') + if n >= 0 { + tagValue = remainingSumTagName[n+1:] + remainingSumTagName = remainingSumTagName[:n] + } dst.MetricName.RemoveTag(remainingSumTagName) - dst.MetricName.AddTag(remainingSumTagName, remainingSumTagName) + dst.MetricName.AddTag(remainingSumTagName, tagValue) for i, k := range ks { kn := getIntK(k, len(tss)) var sum float64 diff --git a/app/vmselect/promql/exec.go b/app/vmselect/promql/exec.go index 25e9953d9..5d594ab90 100644 --- a/app/vmselect/promql/exec.go +++ b/app/vmselect/promql/exec.go @@ -85,21 +85,22 @@ func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result, } func maySortResults(e metricsql.Expr, tss []*timeseries) bool { - if len(tss) > 100 { - // There is no sense in sorting a lot of results - return false - } - fe, ok := e.(*metricsql.FuncExpr) - if !ok { - return true - } - switch fe.Name { - case "sort", "sort_desc", - "sort_by_label", "sort_by_label_desc": - return false - default: - return true + switch v := e.(type) { + case *metricsql.FuncExpr: + switch strings.ToLower(v.Name) { + case "sort", "sort_desc", + "sort_by_label", "sort_by_label_desc": + return false + } + case *metricsql.AggrFuncExpr: + switch strings.ToLower(v.Name) { + case "topk", "bottomk", "outliersk", + "topk_max", "topk_min", "topk_avg", "topk_median", + "bottomk_max", "bottomk_min", "bottomk_avg", "bottomk_median": + return false + } } + return true } func timeseriesToResult(tss []*timeseries, maySort bool) ([]netstorage.Result, error) { diff --git a/app/vmselect/promql/exec_test.go b/app/vmselect/promql/exec_test.go index db3389416..61d9318ff 100644 --- a/app/vmselect/promql/exec_test.go +++ b/app/vmselect/promql/exec_test.go @@ -1264,6 +1264,12 @@ func TestExecSuccess(t *testing.T) { Values: []float64{1000, 1200, 1400, 1600, 1800, 2000}, Timestamps: timestampsExpected, } + r.MetricName.Tags = []storage.Tag{ + { + Key: []byte("tagname"), + Value: []byte("foobar"), + }, + } resultExpected := []netstorage.Result{r} f(q, resultExpected) }) @@ -1278,6 +1284,12 @@ func TestExecSuccess(t *testing.T) { Values: []float64{1000, 1200, 1400, 1600, 1800, 2000}, Timestamps: timestampsExpected, } + r.MetricName.Tags = []storage.Tag{ + { + Key: []byte("tagname"), + Value: []byte("foobar"), + }, + } resultExpected := []netstorage.Result{r} f(q, resultExpected) }) @@ -4744,25 +4756,25 @@ func TestExecSuccess(t *testing.T) { }) t.Run(`topk(1)`, func(t *testing.T) { t.Parallel() - q := `sort(topk(1, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss")))` + q := `topk(1, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss"))` r1 := netstorage.Result{ - MetricName: metricNameExpected, - Values: []float64{10, 10, 10, nan, nan, nan}, - Timestamps: timestampsExpected, - } - r1.MetricName.Tags = []storage.Tag{{ - Key: []byte("foo"), - Value: []byte("bar"), - }} - r2 := netstorage.Result{ MetricName: metricNameExpected, Values: []float64{nan, nan, nan, 10.666666666666666, 12, 13.333333333333334}, Timestamps: timestampsExpected, } - r2.MetricName.Tags = []storage.Tag{{ + r1.MetricName.Tags = []storage.Tag{{ Key: []byte("baz"), Value: []byte("sss"), }} + r2 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{10, 10, 10, nan, nan, nan}, + Timestamps: timestampsExpected, + } + r2.MetricName.Tags = []storage.Tag{{ + Key: []byte("foo"), + Value: []byte("bar"), + }} resultExpected := []netstorage.Result{r1, r2} f(q, resultExpected) }) @@ -4813,7 +4825,7 @@ func TestExecSuccess(t *testing.T) { }) t.Run(`topk_max(1, remaining_sum)`, func(t *testing.T) { t.Parallel() - q := `sort_desc(topk_max(1, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss"), "remaining_sum"))` + q := `sort_desc(topk_max(1, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss"), "remaining_sum=foo"))` r1 := netstorage.Result{ MetricName: metricNameExpected, Values: []float64{nan, nan, nan, 10.666666666666666, 12, 13.333333333333334}, @@ -4831,7 +4843,7 @@ func TestExecSuccess(t *testing.T) { r2.MetricName.Tags = []storage.Tag{ { Key: []byte("remaining_sum"), - Value: []byte("remaining_sum"), + Value: []byte("foo"), }, } resultExpected := []netstorage.Result{r1, r2} @@ -5035,25 +5047,25 @@ func TestExecSuccess(t *testing.T) { }) t.Run(`bottomk(1)`, func(t *testing.T) { t.Parallel() - q := `sort(bottomk(1, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss")))` + q := `bottomk(1, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss"))` r1 := netstorage.Result{ - MetricName: metricNameExpected, - Values: []float64{6.666666666666667, 8, 9.333333333333334, nan, nan, nan}, - Timestamps: timestampsExpected, - } - r1.MetricName.Tags = []storage.Tag{{ - Key: []byte("baz"), - Value: []byte("sss"), - }} - r2 := netstorage.Result{ MetricName: metricNameExpected, Values: []float64{nan, nan, nan, 10, 10, 10}, Timestamps: timestampsExpected, } - r2.MetricName.Tags = []storage.Tag{{ + r1.MetricName.Tags = []storage.Tag{{ Key: []byte("foo"), Value: []byte("bar"), }} + r2 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{6.666666666666667, 8, 9.333333333333334, nan, nan, nan}, + Timestamps: timestampsExpected, + } + r2.MetricName.Tags = []storage.Tag{{ + Key: []byte("baz"), + Value: []byte("sss"), + }} resultExpected := []netstorage.Result{r1, r2} f(q, resultExpected) }) diff --git a/app/vmselect/promql/transform.go b/app/vmselect/promql/transform.go index 8165802c3..4402772b7 100644 --- a/app/vmselect/promql/transform.go +++ b/app/vmselect/promql/transform.go @@ -1446,11 +1446,12 @@ func transformLabelCopyExt(tfa *transformFuncArg, removeSrcLabels bool) ([]*time for i, srcLabel := range srcLabels { dstLabel := dstLabels[i] value := mn.GetTagValue(srcLabel) + if len(value) == 0 { + // Do not remove destination label if the source label doesn't exist. + continue + } dstValue := getDstValue(mn, dstLabel) *dstValue = append((*dstValue)[:0], value...) - if len(value) == 0 { - mn.RemoveTag(dstLabel) - } if removeSrcLabels && srcLabel != dstLabel { mn.RemoveTag(srcLabel) } diff --git a/app/vmselect/searchutils/searchutils.go b/app/vmselect/searchutils/searchutils.go index 1033f6ba1..93dc1fbcf 100644 --- a/app/vmselect/searchutils/searchutils.go +++ b/app/vmselect/searchutils/searchutils.go @@ -16,8 +16,9 @@ import ( ) var ( - maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call") - maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution") + maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call") + maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution") + maxStatusRequestDuration = flag.Duration("search.maxStatusRequestDuration", time.Minute*5, "The maximum duration for /api/v1/status/* requests") ) func roundToSeconds(ms int64) int64 { @@ -125,6 +126,12 @@ func GetDeadlineForQuery(r *http.Request, startTime time.Time) Deadline { return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxQueryDuration") } +// GetDeadlineForStatusRequest returns deadline for the given request to /api/v1/status/*. +func GetDeadlineForStatusRequest(r *http.Request, startTime time.Time) Deadline { + dMax := maxStatusRequestDuration.Milliseconds() + return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxStatusRequestDuration") +} + // GetDeadlineForExport returns deadline for the given request to /api/v1/export. func GetDeadlineForExport(r *http.Request, startTime time.Time) Deadline { dMax := maxExportDuration.Milliseconds() diff --git a/deployment/docker/Makefile b/deployment/docker/Makefile index e8efa986b..7a8059cc8 100644 --- a/deployment/docker/Makefile +++ b/deployment/docker/Makefile @@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics ROOT_IMAGE ?= alpine:3.13.2 CERTS_IMAGE := alpine:3.13.2 -GO_BUILDER_IMAGE := golang:1.16.2 +GO_BUILDER_IMAGE := golang:1.16.3 BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr : _) BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo $(CERTS_IMAGE) | tr : _) diff --git a/deployment/docker/docker-compose.yml b/deployment/docker/docker-compose.yml index 27208bff1..d20f1e08a 100644 --- a/deployment/docker/docker-compose.yml +++ b/deployment/docker/docker-compose.yml @@ -39,7 +39,7 @@ services: restart: always grafana: container_name: grafana - image: grafana/grafana:7.1.1 + image: grafana/grafana:7.5.2 depends_on: - "victoriametrics" ports: diff --git a/docs/Articles.md b/docs/Articles.md index deab85c77..bd69f2e87 100644 --- a/docs/Articles.md +++ b/docs/Articles.md @@ -1,3 +1,7 @@ +--- +sort: 16 +--- + # Articles ## Third-party articles and slides about VictoriaMetrics @@ -30,6 +34,7 @@ * [Monitoring with Prometheus, Grafana, AlertManager and VictoriaMetrics](https://www.sensedia.com/post/monitoring-with-prometheus-alertmanager) * [Solving Metrics at scale with VictoriaMetrics](https://www.youtube.com/watch?v=QgLMztnj7-8) * [Monitoring Kubernetes clusters with VictoriaMetrics and Grafana](https://blog.cybozu.io/entry/2021/03/18/115743) +* [Multi-tenancy monitoring system for Kubernetes cluster using VictoriaMetrics and operators](https://blog.kintone.io/entry/2021/03/31/175256) ## Our articles diff --git a/docs/BestPractices.md b/docs/BestPractices.md index 04cc8bb7b..480ee6c38 100644 --- a/docs/BestPractices.md +++ b/docs/BestPractices.md @@ -1,3 +1,7 @@ +--- +sort: 12 +--- + # VM best practices VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. It can be used as a long-term, remote storage for Prometheus which allows it to gather metrics from different systems and store them in a single location or separate them for different purposes (short-, long-term, responsibility zones etc). diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index d50bf273d..60c2371b7 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -1,9 +1,39 @@ +--- +sort: 13 +--- + # CHANGELOG -# tip +## tip -# [v1.57.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.57.1) +## [v1.58.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.58.0) + +* FEATURE: vminsert and vmagent: add `-sortLabels` command-line flag for sorting metric labels before pushing them to `vmstorage`. This should reduce the size of `MetricName -> internal_series_id` cache (aka `vm_cache_size_bytes{type="storage/tsid"}`) when ingesting samples for the same time series with distinct order of labels. For example, `foo{k1="v1",k2="v2"}` and `foo{k2="v2",k1="v1"}` represent a single time series. Labels sorting is disabled by default, since the majority of established exporters preserve the order of labels for the exported metrics. +* FEATURE: allow specifying label value alongside label name for the `others sum` time series returned from `topk_*` and `bottomk_*` functions from [MetricsQL](https://victoriametrics.github.io/MetricsQL.html). For example, `topk_avg(3, max(process_resident_memory_bytes) by (instance), "instance=other_sum")` would return top 3 series from `max(process_resident_memory_bytes) by (instance)` plus a series containing the sum of other series. The `others sum` series will have `{instance="other_sum"}` label. +* FEATURE: do not delete `dst_label` when applying `label_copy(q, "src_label", "dst_label")` and `label_move(q, "src_label", "dst_label")` to series without `src_label` and with non-empty `dst_label`. See more details at [MetricsQL docs](https://victoriametrics.github.io/MetricsQL.html). +* FEATURE: update Go builder from `v1.16.2` to `v1.16.3`. This should fix [these issues](https://github.com/golang/go/issues?q=milestone%3AGo1.16.3+label%3ACherryPickApproved). +* FEATURE: vmagent: add support for `follow_redirects` option to `scrape_configs` section in the same way as [Prometheus 2.26 does](https://github.com/prometheus/prometheus/pull/8546). +* FEATURE: vmagent: add support for `authorization` section in `-promscrape.config` in the same way as [Prometheus 2.26 does](https://github.com/prometheus/prometheus/pull/8512). +* FEATURE: vmagent: add support for socks5 proxy in `proxy_url` config option. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1177). +* FEATURE: vmagent: add support for `socks5 over tls` proxy in `proxy_url` config option. It can be set up with the following config: `proxy_url: "tls+socks5://proxy-addr:port"`. +* FEATURE: vmagent: reduce memory usage when `-remoteWrite.queues` is set to a big value. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1167). +* FEATURE: vmagent: add AWS IAM roles for tasks support for EC2 service discovery according to [these docs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html). +* FEATURE: vmagent: add support for `proxy_tls_config`, `proxy_authorization`, `proxy_basic_auth`, `proxy_bearer_token` and `proxy_bearer_token_file` options in `consul_sd_config`, `dockerswarm_sd_config` and `eureka_sd_config` sections. +* FEATURE: vmagent: pass `X-Prometheus-Scrape-Timeout-Seconds` header to scrape targets as Prometheus does. In this case scrape targets can limit the time needed for performing the scrape. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813118733) for details. +* FEATURE: vmagent: drop corrupted persistent queue files at `-remoteWrite.tmpDataPath` instead of throwing a fatal error. Corrupted files can appear after unclean shutdown of `vmagent` such as OOM kill or hardware reset. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1030). +* FEATURE: vmauth: add support for authorization via [bearer token](https://swagger.io/docs/specification/authentication/bearer-authentication/). See [the docs](https://victoriametrics.github.io/vmauth.html#auth-config) for details. +* FEATURE: publish `arm64` and `amd64` binaries for cluster version of VictoriaMetrics at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). + +* BUGFIX: properly handle `/api/v1/labels` and `/api/v1/label//values` queries on big `start ... end` time range. This should fix big resource usage when VictoriaMetrics is queried with [Promxy](https://github.com/jacksontj/promxy) v0.0.62 or newer versions. +* BUGFIX: do not break sort order for series returned from `topk*`, `bottomk*` and `outliersk` [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) functions. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1189). +* BUGFIX: vmagent: properly work with simple HTTP proxies which don't support `CONNECT` method. For example, [PushProx](https://github.com/prometheus-community/PushProx). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179). +* BUGFIX: vmagent: properly discover targets if multiple namespace selectors are put inside `kubernetes_sd_config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1170). +* BUGFIX: vmagent: properly discover `role: endpoints` and `role: endpointslices` targets in `kubernetes_sd_config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1182). +* BUGFIX: properly generate filename for `*.tar.gz` archive inside `_checksums.txt` file posted at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1171). + + +## [v1.57.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.57.1) * FEATURE: publish vmutils for `GOOS=arm` on [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). @@ -12,7 +42,7 @@ * BUGFIX: vminsert: return back `type` label to per-tenant metric `vm_tenant_inserted_rows_total`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/932). -# [v1.57.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.57.0) +## [v1.57.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.57.0) * FEATURE: optimize query performance by up to 10x on systems with many CPU cores. See [this tweet](https://twitter.com/MetricsVictoria/status/1375064484860067840). * FEATURE: add the following metrics at `/metrics` page for every VictoraMetrics app: @@ -35,7 +65,7 @@ * BUGFIX: properly calculate `summarize` and `*Series` functions in [Graphite Render API](https://victoriametrics.github.io/#graphite-render-api-usage). -# [v1.56.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.56.0) +## [v1.56.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.56.0) * FEATURE: add the following functions to [MetricsQL](https://victoriametrics.github.io/MetricsQL.html): - `histogram_avg(buckets)` - returns the average value for the given buckets. @@ -64,13 +94,13 @@ * BUGFIX: do not crash if a query contains `histogram_over_time()` function name with uppercase chars. For example, `Histogram_Over_Time(m[5m])`. -# [v1.55.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.1) +## [v1.55.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.1) * BUGFIX: vmagent: fix a panic in Kubernetes service discovery when a target is filtered out with relabeling. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1107 * BUGFIX: vmagent: fix Kubernetes service discovery for `role: ingress`. See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1110 -# [v1.55.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.0) +## [v1.55.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.0) * FEATURE: add `sign(q)` and `clamp(q, min, max)` functions, which are planned to be added in [the upcoming Prometheus release](https://twitter.com/roidelapluie/status/1363428376162295811) . The `last_over_time(m[d])` function is already supported in [MetricsQL](https://victoriametrics.github.io/MetricsQL.html). @@ -106,12 +136,12 @@ * BUGFIX: unescape only `\\`, `\n` and `\"` in label names when parsing Prometheus text exposition format as Prometheus does. Previously other escape sequences could be improperly unescaped. -# [v1.54.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.1) +## [v1.54.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.1) * BUGFIX: properly handle queries containing a filter on metric name plus any number of negative filters and zero non-negative filters. For example, `node_cpu_seconds_total{mode!="idle"}`. The bug was introduced in [v1.54.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.0). -# [v1.54.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.0) +## [v1.54.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.54.0) * FEATURE: optimize searching for matching metrics for `metric{}` queries if `` contains at least a single filter. For example, the query `up{job="foobar"}` should find the matching time series much faster than previously. * FEATURE: reduce execution times for `q1 q2` queries by executing `q1` and `q2` in parallel. @@ -132,12 +162,12 @@ * BUGFIX: vmagent: return back unsent block to the queue during graceful shutdown. Previously this block could be dropped if remote storage is unavailable during vmagent shutdown. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1065 . -# [v1.53.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.1) +## [v1.53.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.1) * BUGFIX: vmselect: fix the bug peventing from proper searching by Graphite filter with wildcards such as `{__graphite__="foo.*.bar"}`. -# [v1.53.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.0) +## [v1.53.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.0) * FEATURE: added [vmctl tool](https://victoriametrics.github.io/vmctl.html) to VictoriaMetrics release process. Now it is packaged in `vmutils-*.tar.gz` archive on [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Source code for `vmctl` tool has been moved from [github.com/VictoriaMetrics/vmctl](https://github.com/VictoriaMetrics/vmctl) to [github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmctl). * FEATURE: added `-loggerTimezone` command-line flag for adjusting time zone for timestamps in log messages. By default UTC is used. @@ -162,7 +192,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * BUGFIX: vmagent: retry scrape and service discovery requests when the remote server closes HTTP keep-alive connection. Previously `disable_keepalive: true` option could be used under `scrape_configs` section when working with such servers. -# [v1.52.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.52.0) +## [v1.52.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.52.0) * FEATURE: provide a sample list of alerting rules for VictoriaMetrics components. It is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml). * FEATURE: disable final merge for data for the previous month at the beginning of new month, since it may result in high disk IO and CPU usage. Final merge can be enabled by setting `-finalMergeDelay` command-line flag to positive duration. @@ -182,7 +212,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * BUGFIX: upgrade base image for Docker packages from Alpine 3.12.1 to Alpine 3.12.3 in order to fix potential security issues. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1010 -# [v1.51.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.51.0) +## [v1.51.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.51.0) * FEATURE: add `/api/v1/status/top_queries` handler, which returns the most frequently executed queries and queries that took the most time for execution. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/907 * FEATURE: vmagent: add support for `proxy_url` config option in Prometheus scrape configs. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/503 @@ -194,23 +224,23 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * BUGFIX: do not adjust `offset` value provided in MetricsQL query. Previously it could be modified in order to improve response cache hit ratio. This is unneeded, since cache hit ratio should remain good because the query time range should be already aligned to multiple of `step` values. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/976 -# [v1.50.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.2) +## [v1.50.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.2) * FEATURE: do not publish duplicate Docker images with `-cluster` tag suffix for [vmagent](https://victoriametrics.github.io/vmagent.html), [vmalert](https://victoriametrics.github.io/vmalert.html), [vmauth](https://victoriametrics.github.io/vmauth.html), [vmbackup](https://victoriametrics.github.io/vmbackup.html) and [vmrestore](https://victoriametrics.github.io/vmrestore.html), since they are identical to images without `-cluster` tag suffix. * BUGFIX: vmalert: properly populate template variables. This has been broken in v1.50.0. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/974 * BUGFIX: properly parse negative combined duration in MetricsQL such as `-1h3m4s`. It must be parsed as `-(1h + 3m + 4s)`. Prevsiously it was parsed as `-1h + 3m + 4s`. -* BUGFIX: properly parse lines in [Prometheus exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md) and in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md) with whitespace after the timestamp. For example, `foo 123 456 # some comment here`. See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/970 +* BUGFIX: properly parse lines in [Prometheus exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md) and in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md) with whitespace after the timestamp. For example, `foo 123 456 ## some comment here`. See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/970 -# [v1.50.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.1) +## [v1.50.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.1) * FEATURE: vmagent: export `vmagent_remotewrite_blocks_sent_total` and `vmagent_remotewrite_blocks_sent_total` metrics for each `-remoteWrite.url`. * BUGFIX: vmagent: properly delete unregistered scrape targets from `/targets` and `/api/v1/targets` pages. They weren't deleted due to the bug in `v1.50.0`. -# [v1.50.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.0) +## [v1.50.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.0) * FEATURE: automatically reset response cache when samples with timestamps older than `now - search.cacheTimestampOffset` are ingested to VictoriaMetrics. This makes unnecessary disabling response cache during data backfilling or resetting it after backfilling is complete as described [in these docs](https://victoriametrics.github.io/#backfilling). This feature applies only to single-node VictoriaMetrics. It doesn't apply to cluster version of VictoriaMetrics because `vminsert` nodes don't know about `vmselect` nodes where the response cache must be reset. * FEATURE: vmalert: add `query`, `first` and `value` functions to alert templates. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/539 @@ -235,7 +265,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * BUGFIX: assume the previous value is 0 when calculating `increase()` for the first point on the graph if its value doesn't exceed 100 and the delta between two first points equals to 0. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/962 -# [v1.49.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.49.0) +## [v1.49.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.49.0) * FEATURE: optimize Consul service discovery speed when discovering big number of services. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/574 * FEATURE: add `label_uppercase(q, label1, ... labelN)` and `label_lowercase(q, label1, ... labelN)` function to [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) @@ -255,7 +285,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y `days_in_month`, `hour`, `month` and `year`. -# [v1.48.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.48.0) +## [v1.48.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.48.0) * FEATURE: added [Snap package for single-node VictoriaMetrics](https://snapcraft.io/victoriametrics). This simplifies installation under Ubuntu to a single command: ```bash @@ -276,12 +306,12 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * FEATURE: log metric name plus all its labels when the metric timestamp is out of the configured retention. This should simplify detecting the source of metrics with unexpected timestamps. * FEATURE: add `-dryRun` command-line flag to single-node VictoriaMetrics in order to check config file pointed by `-promscrape.config`. -* BUGFIX: properly parse Prometheus metrics with [exemplars](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1) such as `foo 123 # {bar="baz"} 1`. +* BUGFIX: properly parse Prometheus metrics with [exemplars](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1) such as `foo 123 ## {bar="baz"} 1`. * BUGFIX: properly parse "infinity" values in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#abnf). See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/924 -# [v1.47.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.47.0) +## [v1.47.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.47.0) * FEATURE: vmselect: return the original error from `vmstorage` node in query response if `-search.denyPartialResponse` is set. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/891 @@ -311,7 +341,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * BUGFIX: vminsert: properly return HTTP 503 status code when all the vmstorage nodes are unavailable. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896 -# [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0) +## [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0) * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label//values` when `start` and `end` args are set. * FEATURE: reduce memory usage when query touches big number of time series. @@ -329,7 +359,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/883 -# [v1.45.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.45.0) +## [v1.45.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.45.0) * FEATURE: allow setting `-retentionPeriod` smaller than one month. I.e. `-retentionPeriod=3d`, `-retentionPeriod=2w`, etc. is supported now. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/173 @@ -363,7 +393,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * BUGFIX: vmagent: properly handle 301 redirects. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/869 -# [v1.44.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.44.0) +## [v1.44.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.44.0) * FEATURE: automatically add missing label filters to binary operands as described at https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization . This should improve performance for queries with missing label filters in binary operands. For example, the following query should work faster now, because it shouldn't @@ -423,7 +453,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * BUGFIX: fix `mode_over_time(m[d])` calculations. Previously the function could return incorrect results. -# [v1.43.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.43.0) +## [v1.43.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.43.0) * FEATURE: reduce CPU usage for repeated queries over sliding time window when no new time series are added to the database. Typical use cases: repeated evaluation of alerting rules in [vmalert](https://victoriametrics.github.io/vmalert.html) or dashboard auto-refresh in Grafana. @@ -442,7 +472,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y * BUGFIX: support parsing floating-point timestamp like Graphite Carbon does. Such timestmaps are truncated to seconds. -# [v1.42.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0) +## [v1.42.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0) * FEATURE: use all the available CPU cores when accepting data via a single TCP connection for [all the supported protocols](https://victoriametrics.github.io/#how-to-import-time-series-data). @@ -472,6 +502,6 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y In this case only the node must be returned with stripped dot in the end of id as carbonapi does. -# Previous releases +## Previous releases See [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). diff --git a/docs/CaseStudies.md b/docs/CaseStudies.md index 6b54ef38a..baccc44a3 100644 --- a/docs/CaseStudies.md +++ b/docs/CaseStudies.md @@ -1,3 +1,7 @@ +--- +sort: 17 +--- + # Case studies and talks Below please find public case studies and talks from VictoriaMetrics users. You can also join our [community Slack channel](http://slack.victoriametrics.com/) diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md index 88ed91695..013a4c7b5 100644 --- a/docs/Cluster-VictoriaMetrics.md +++ b/docs/Cluster-VictoriaMetrics.md @@ -1,3 +1,7 @@ +--- +sort: 9 +--- + # Cluster version Victoria Metrics diff --git a/docs/FAQ.md b/docs/FAQ.md index 882ee80b8..aa8295c88 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -1,3 +1,7 @@ +--- +sort: 18 +--- + # FAQ ### What is the main purpose of VictoriaMetrics? diff --git a/docs/Home.md b/docs/Home.md index 4cf04ca97..944eea014 100644 --- a/docs/Home.md +++ b/docs/Home.md @@ -1,3 +1,7 @@ +--- +sort: 19 +--- + # Docs * [Quick start](Quick-Start) diff --git a/docs/MetricsQL.md b/docs/MetricsQL.md index b67cb4d3c..cd86c984e 100644 --- a/docs/MetricsQL.md +++ b/docs/MetricsQL.md @@ -1,3 +1,7 @@ +--- +sort: 11 +--- + # MetricsQL VictoriaMetrics implements MetricsQL - query language inspired by [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/). @@ -55,19 +59,19 @@ This functionality can be tried at [an editable Grafana dashboard](http://play-g - `ru(freeResources, maxResources)` function for returning resource utilization percentage in the range `0% - 100%`. For instance, `ru(node_memory_MemFree_bytes, node_memory_MemTotal_bytes)` returns memory utilization over [node_exporter](https://github.com/prometheus/node_exporter) metrics. - `ttf(slowlyChangingFreeResources)` function for returning the time in seconds when the given `slowlyChangingFreeResources` expression reaches zero. For instance, `ttf(node_filesystem_avail_byte)` returns the time to storage space exhaustion. This function may be useful for capacity planning. - Functions for label manipulation: - - `alias(q, name)` for setting metric name across all the time series `q`. - - `label_set(q, label1, value1, ... labelN, valueN)` for setting the given values for the given labels on `q`. - - `label_map(q, label, srcValue1, dstValue1, ... srcValueN, dstValueN)` for mapping `label` values from `src*` to `dst*`. - - `label_uppercase(q, label1, ... labelN)` for uppercasing values for the given labels. - - `label_lowercase(q, label2, ... labelN)` for lowercasing value for the given labels. - - `label_del(q, label1, ... labelN)` for deleting the given labels from `q`. - - `label_keep(q, label1, ... labelN)` for deleting all the labels except the given labels from `q`. - - `label_copy(q, src_label1, dst_label1, ... src_labelN, dst_labelN)` for copying label values from `src_*` to `dst_*`. - - `label_move(q, src_label1, dst_label1, ... src_labelN, dst_labelN)` for moving label values from `src_*` to `dst_*`. - - `label_transform(q, label, regexp, replacement)` for replacing all the `regexp` occurences with `replacement` in the `label` values from `q`. - - `label_value(q, label)` - returns numeric values for the given `label` from `q`. + - `alias(q, name)` for setting metric name across all the time series `q`. For example, `alias(foo, "bar")` would give `bar` name to all the `foo` series. + - `label_set(q, label1, value1, ... labelN, valueN)` for setting the given values for the given labels on `q`. For example, `label_set(foo, "bar", "baz")` would add `{bar="baz"}` label to all the `foo` series. + - `label_map(q, label, srcValue1, dstValue1, ... srcValueN, dstValueN)` for mapping `label` values from `src*` to `dst*`. For example, `label_map(foo, "instance", "127.0.0.1", "locahost")` would rename `foo{instance="127.0.0.1"}` to `foo{instance="localhost"}`. + - `label_uppercase(q, label1, ... labelN)` for uppercasing values for the given labels. For example, `label_uppercase(foo, "instance")` would transform `foo{instance="bar"}` to `foo{instance="BAR"}`. + - `label_lowercase(q, label2, ... labelN)` for lowercasing value for the given labels. For example, `label_lowercase(foo, "instance")` would transform `foo{instance="BAR"}` to `foo{instance="bar"}`. + - `label_del(q, label1, ... labelN)` for deleting the given labels from `q`. For example, `label_del(foo, "bar")` would delete `bar` label from all the `foo` series. + - `label_keep(q, label1, ... labelN)` for deleting all the labels except the given labels from `q`. For example, `label_keep(foo, "bar")` would delete all the labels except `bar` from `foo` series. + - `label_copy(q, src_label1, dst_label1, ... src_labelN, dst_labelN)` for copying label values from `src_*` to `dst_*`. If `src_label` is empty, then `dst_label` is left untouched. For example, `label_copy(foo, "bar", baz")` would transform `foo{bar="x"}` to `foo{bar="x",baz="x"}`. + - `label_move(q, src_label1, dst_label1, ... src_labelN, dst_labelN)` for moving label values from `src_*` to `dst_*`. If `src_label` is empty, then `dst_label` is left untouched. For example, `label_move(foo, 'bar", "baz")` would transform `foo{bar="x"}` to `foo{bax="x"}`. + - `label_transform(q, label, regexp, replacement)` for replacing all the `regexp` occurences with `replacement` in the `label` values from `q`. For example, `label_transform(foo, "bar", "-", "_")` would transform `foo{bar="a-b-c"}` to `foo{bar="a_b_c"}`. + - `label_value(q, label)` - returns numeric values for the given `label` from `q`. For example, if `label_value(foo, "bar")` is applied to `foo{bar="1.234"}`, then it will return a time series `foo{bar="1.234"}` with `1.234` value. - `label_match(q, label, regexp)` and `label_mismatch(q, label, regexp)` for filtering time series with labels matching (or not matching) the given regexps. -- `sort_by_label(q, label1, ... labelN)` and `sort_by_label_desc(q, label1, ... labelN)` for sorting time series by the given set of labels. +- `sort_by_label(q, label1, ... labelN)` and `sort_by_label_desc(q, label1, ... labelN)` for sorting time series by the given set of labels. For example, `sort_by_label(foo, "bar")` would sort `foo` series by values of the label `bar` in these series. - `step()` function for returning the step in seconds used in the query. - `start()` and `end()` functions for returning the start and end timestamps of the `[start ... end]` range used in the query. - `integrate(m[d])` for returning integral over the given duration `d` for the given metric `m`. @@ -121,7 +125,7 @@ This functionality can be tried at [an editable Grafana dashboard](http://play-g - `bottomk_avg(k, q)` - returns bottom K time series with the min averages on the given time range - `bottomk_median(k, q)` - returns bottom K time series with the min medians on the given time range. - All the `topk_*` and `bottomk_*` functions accept optional third argument - label name for the sum of the remaining time series outside top K or bottom K time series. For example, `topk_max(3, process_resident_memory_bytes, "remaining_sum")` would return up to 3 time series with the maximum value for `process_resident_memory_bytes` plus fourth time series with the sum of the remaining time series if any. The fourth time series will contain `remaining_sum="remaining_sum"` additional label. + All the `topk_*` and `bottomk_*` functions accept optional third argument - label to add to the sum of the remaining time series outside top K or bottom K time series. For example, `topk_max(3, sum(process_resident_memory_bytes) by (job), "job=other")` would return up to 3 time series with the maximum value for `sum(process_resident_memory_bytes) by (job)` plus fourth time series with the sum of the remaining time series if any. The fourth time series will contain `job="other"` label. - `share_le_over_time(m[d], le)` - returns share (in the range 0..1) of values in `m` over `d`, which are smaller or equal to `le`. Useful for calculating SLI and SLO. Example: `share_le_over_time(memory_usage_bytes[24h], 100*1024*1024)` returns the share of time series values for the last 24 hours when memory usage was below or equal to 100MB. - `share_gt_over_time(m[d], gt)` - returns share (in the range 0..1) of values in `m` over `d`, which are bigger than `gt`. Useful for calculating SLI and SLO. diff --git a/docs/Quick-Start.md b/docs/Quick-Start.md index e2f0f085c..33a93eb6f 100644 --- a/docs/Quick-Start.md +++ b/docs/Quick-Start.md @@ -1,3 +1,7 @@ +--- +sort: 10 +--- + # Quick Start 1. If you run Ubuntu please run the `snap install victoriametrics` command to install and start VictoriaMetrics. Then read [these docs](https://snapcraft.io/victoriametrics). diff --git a/docs/Release-Guide.md b/docs/Release-Guide.md index 29e607d5c..1000f0ac2 100644 --- a/docs/Release-Guide.md +++ b/docs/Release-Guide.md @@ -1,4 +1,8 @@ -Release process guidance +--- +sort: 14 +--- + +# Release process guidance ## Release version and Docker images diff --git a/docs/SampleSizeCalculations.md b/docs/SampleSizeCalculations.md index 9881f52d4..75ebb3599 100644 --- a/docs/SampleSizeCalculations.md +++ b/docs/SampleSizeCalculations.md @@ -1,3 +1,7 @@ +--- +sort: 15 +--- + # Sample size calculations These calculations are for the “Lowest sample size” graph at https://victoriametrics.com/ . @@ -14,7 +18,7 @@ That means each metric will contain 6307200 points. 2tb disk contains 2 (tb) * 1024 (gb) * 1024 (mb) * 1024 (kb) * 1024 (b) = 2199023255552 bytes -# VictoriaMetrics +## VictoriaMetrics Based on production data from our customers, sample size is 0.4 byte That means one metric with 10 seconds resolution will need 6307200 points * 0.4 bytes/point = 2522880 bytes or 2.4 megabytes. @@ -22,13 +26,13 @@ Calculation for number of metrics can be stored in 2 tb disk: 2199023255552 (disk size) / 2522880 (one metric for 2 year) = 871632 metrics So in 2tb we can store 871 632 metrics -# Graphite +## Graphite Based on https://m30m.github.io/whisper-calculator/ sample size of graphite metrics is 12b + 28b for each metric That means, one metric with 10 second resolution will need 75686428 bytes or 72.18 megabytes Calculation for number of metrics can be stored in 2 tb disk: 2199023255552 / 75686428 = 29 054 metrics -# OpenTSDB +## OpenTSDB Let's check official openTSDB site http://opentsdb.net/faq.html 16 bytes of HBase overhead, 3 bytes for the metric, 4 bytes for the timestamp, 6 bytes per tag, 2 bytes of OpenTSDB overhead, up to 8 bytes for the value. Integers are stored with variable length encoding and can consume 1, 2, 4 or 8 bytes. @@ -46,14 +50,15 @@ Also, openTSDB allows to use compression So, let's multiply numbers on 4.2 69 730 * 4,2 = 292 866 metrics for best scenario 29 054 * 4,2 = 122 026 metrics for worst scenario -# m3db + +## M3DB Let's look at official m3db site https://m3db.github.io/m3/m3db/architecture/engine/ They can achieve a sample size of 1.45 bytes/datapoint That means, one metric with 10 second resolution will need 9145440 bytes or 8,72177124 megabytes Calculation for number of metrics can be stored in 2 tb disk: 2199023255552 / 9145440 = 240 450 metrics -# InfluxDB +## InfluxDB Based on official influxDB site https://docs.influxdata.com/influxdb/v1.8/guides/hardware_sizing/#bytes-and-compression "Non-string values require approximately three bytes". That means, one metric with 10 second resolution will need 6307200 * 3 = 18921600 bytes or 18 megabytes @@ -61,7 +66,7 @@ Calculation for number of metrics can be stored in 2 tb disk: 2199023255552 / 18921600 = 116 217 metrics -# Prometheus +## Prometheus Let's check official site: https://prometheus.io/docs/prometheus/latest/storage/ "On average, Prometheus uses only around 1-2 bytes per sample." That means, one metric with 10 second resolution will need diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index 90a9355a7..9560e2dc2 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -1,3 +1,9 @@ +--- +sort: 1 +--- + +# VictoriaMetrics + [![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) [![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics.svg?maxAge=604800)](https://hub.docker.com/r/victoriametrics/victoria-metrics) [![Slack](https://img.shields.io/badge/join%20slack-%23victoriametrics-brightgreen.svg)](http://slack.victoriametrics.com/) @@ -6,9 +12,7 @@ [![Build Status](https://github.com/VictoriaMetrics/VictoriaMetrics/workflows/main/badge.svg)](https://github.com/VictoriaMetrics/VictoriaMetrics/actions) [![codecov](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics/branch/master/graph/badge.svg)](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics) -![Victoria Metrics logo](logo.png "Victoria Metrics") - -## VictoriaMetrics +Victoria Metrics logo VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. @@ -117,6 +121,7 @@ Alphabetically sorted links to case studies: * [Prometheus querying API usage](#prometheus-querying-api-usage) * [Prometheus querying API enhancements](#prometheus-querying-api-enhancements) * [Graphite API usage](#graphite-api-usage) + * [Graphite Render API usage](#graphite-render-api-usage) * [Graphite Metrics API usage](#graphite-metrics-api-usage) * [Graphite Tags API usage](#graphite-tags-api-usage) * [How to build from sources](#how-to-build-from-sources) @@ -1324,6 +1329,8 @@ See the example of alerting rules for VM components [here](https://github.com/Vi * It is recommended to use default command-line flag values (i.e. don't set them explicitly) until the need of tweaking these flag values arises. +* It is recommended inspecting logs during troubleshooting, since they may contain useful information. + * It is recommended upgrading to the latest available release from [this page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), since the encountered issue could be already fixed there. @@ -1338,8 +1345,6 @@ See the example of alerting rules for VM components [here](https://github.com/Vi if background merge cannot be initiated due to free disk space shortage. The value shows the number of per-month partitions, which would start background merge if they had more free disk space. -* It is recommended inspecting logs during troubleshooting, since they may contain useful information. - * VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage. This may lead to the following "issues": * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage @@ -1349,10 +1354,13 @@ See the example of alerting rules for VM components [here](https://github.com/Vi * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, then it is likely you have too many active time series for the current amount of RAM. - VictoriaMetrics [exposes](#monitoring) `vm_slow_*` metrics, which could be used as an indicator of low amounts of RAM. - It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve + VictoriaMetrics [exposes](#monitoring) `vm_slow_*` metrics such as `vm_slow_row_inserts_total` and `vm_slow_metric_name_loads_total`, which could be used + as an indicator of low amounts of RAM. It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve ingestion and query performance in this case. +* If the order of labels for the same metrics can change over time (e.g. if `metric{k1="v1",k2="v2"}` may become `metric{k2="v2",k1="v1"}`), + then it is recommended running VictoriaMetrics with `-sortLabels` command-line flag in order to reduce memory usage and CPU usage. + * VictoriaMetrics prioritizes data ingestion over data querying. So if it has no enough resources for data ingestion, then data querying may slow down significantly. @@ -1758,6 +1766,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached; see also -search.maxQueryDuration (default 10s) -search.maxStalenessInterval duration The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons + -search.maxStatusRequestDuration duration + The maximum duration for /api/v1/status/* requests (default 5m0s) -search.maxStepForPointsAdjustment duration The maximum step when /api/v1/query_range handler adjusts points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data (default 1m0s) -search.maxTagKeys int @@ -1788,6 +1798,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li The maximum number of CPU cores to use for small merges. Default value is used if set to 0 -snapshotAuthKey string authKey, which must be passed in query string to /snapshot* pages + -sortLabels + Whether to sort labels for incoming samples before writing them to storage. This may be needed for reducing memory usage at storage when the order of labels in incoming samples is random. For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}. Enabled sorting for labels can slow down ingestion performance a bit -storageDataPath string Path to storage data (default "victoria-metrics-data") -tls diff --git a/docs/vmagent.md b/docs/vmagent.md index cf52fb800..e610ed05e 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -1,4 +1,8 @@ -## vmagent +--- +sort: 2 +--- + +# vmagent `vmagent` is a tiny but mighty agent which helps you collect metrics from various sources and store them in [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) @@ -178,7 +182,7 @@ The following scrape types in [scrape_config](https://prometheus.io/docs/prometh Please file feature requests to [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`. -`vmagent` also support the following additional options in `scrape_config` section: +`vmagent` also support the following additional options in `scrape_configs` section: * `disable_compression: true` - to disable response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets to save network bandwidth. @@ -262,7 +266,7 @@ See [these docs](https://victoriametrics.github.io/#deduplication) for details. ## Scraping targets via a proxy -`vmagent` supports scraping targets via http and https proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs +`vmagent` supports scraping targets via http, https and socks5 proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs target scraping via https proxy at `https://proxy-addr:1234`: ```yml @@ -273,6 +277,7 @@ scrape_configs: Proxy can be configured with the following optional settings: +* `proxy_authorization` for generic token authorization. See [Prometheus docs for details on authorization section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) * `proxy_bearer_token` and `proxy_bearer_token_file` for Bearer token authorization * `proxy_basic_auth` for Basic authorization. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config). * `proxy_tls_config` for TLS config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config). @@ -702,6 +707,8 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -remoteWrite.urlRelabelConfig array Optional path to relabel config for the corresponding -remoteWrite.url Supports array of values separated by comma or specified via multiple flags. + -sortLabels + Whether to sort labels for incoming samples before writing them to all the configured remote storage systems. This may be needed for reducing memory usage at remote storage when the order of labels in incoming samples is random. For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}Enabled sorting for labels can slow down ingestion performance a bit -tls Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set -tlsCertFile string diff --git a/docs/vmalert.md b/docs/vmalert.md index e075f27db..c1e912aa7 100644 --- a/docs/vmalert.md +++ b/docs/vmalert.md @@ -1,10 +1,14 @@ -## vmalert +--- +sort: 3 +--- + +# vmalert `vmalert` executes a list of given [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules against configured address. -### Features: +## Features * Integration with [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) TSDB; * VictoriaMetrics [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) support and expressions validation; @@ -15,7 +19,7 @@ rules against configured address. * Graphite datasource can be used for alerting and recording rules. See [these docs](#graphite) for details. * Lightweight without extra dependencies. -### Limitations: +## Limitations * `vmalert` execute queries against remote datasource which has reliability risks because of network. It is recommended to configure alerts thresholds and rules expressions with understanding that network request may fail; @@ -24,7 +28,7 @@ storage is asynchronous. Hence, user shouldn't rely on recording rules chaining recording rule is reused in next one; * `vmalert` has no UI, just an API for getting groups and rules statuses. -### QuickStart +## QuickStart To build `vmalert` from sources: ``` @@ -67,7 +71,7 @@ groups: [ - ] ``` -#### Groups +### Groups Each group has following attributes: ```yaml @@ -89,7 +93,7 @@ rules: [ - ... ] ``` -#### Rules +### Rules There are two types of Rules: * [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - @@ -102,7 +106,7 @@ and save their result as a new set of time series. `vmalert` forbids to define duplicates - rules with the same combination of name, expression and labels within one group. -##### Alerting rules +#### Alerting rules The syntax for alerting rule is following: ```yaml @@ -131,7 +135,7 @@ annotations: [ : ] ``` -##### Recording rules +#### Recording rules The syntax for recording rules is following: ```yaml @@ -155,7 +159,7 @@ labels: For recording rules to work `-remoteWrite.url` must specified. -#### Alerts state on restarts +### Alerts state on restarts `vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after reloading of `vmalert` the process alerts state will be lost. To avoid this situation, `vmalert` should be configured via the following flags: @@ -171,7 +175,7 @@ in configured `-remoteRead.url`, weren't updated in the last `1h` or received st rules configuration. -#### WEB +### WEB `vmalert` runs a web-server (`-httpListenAddr`) for serving metrics and alerts endpoints: * `http:///api/v1/groups` - list of all loaded groups and rules; @@ -182,7 +186,7 @@ Used as alert source in AlertManager. * `http:///-/reload` - hot configuration reload. -### Graphite +## Graphite vmalert sends requests to `<-datasource.url>/render?format=json` during evaluation of alerting and recording rules if the corresponding group or rule contains `type: "graphite"` config option. It is expected that the `<-datasource.url>/render` @@ -191,7 +195,7 @@ When using vmalert with both `graphite` and `prometheus` rules configured agains to set `-datasource.appendTypePrefix` flag to `true`, so vmalert can adjust URL prefix automatically based on query type. -### Configuration +## Configuration The shortlist of configuration flags is the following: ``` @@ -375,43 +379,43 @@ command-line flags with their descriptions. To reload configuration without `vmalert` restart send SIGHUP signal or send GET request to `/-/reload` endpoint. -### Contributing +## Contributing `vmalert` is mostly designed and built by VictoriaMetrics community. Feel free to share your experience and ideas for improving this software. Please keep simplicity as the main priority. -### How to build from sources +## How to build from sources It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmalert` is located in `vmutils-*` archives there. -#### Development build +### Development build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.15. 2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). It builds `vmalert` binary and puts it into the `bin` folder. -#### Production build +### Production build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmalert-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). It builds `vmalert-prod` binary and puts it into the `bin` folder. -#### ARM build +### ARM build ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/). -#### Development ARM build +### Development ARM build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.15. 2. Run `make vmalert-arm` or `make vmalert-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). It builds `vmalert-arm` or `vmalert-arm64` binary respectively and puts it into the `bin` folder. -#### Production ARM build +### Production ARM build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmalert-arm-prod` or `make vmalert-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics). diff --git a/docs/vmauth.md b/docs/vmauth.md index 6cb3fc6bf..3b9d64f43 100644 --- a/docs/vmauth.md +++ b/docs/vmauth.md @@ -1,4 +1,8 @@ -## vmauth +--- +sort: 4 +--- + +# vmauth `vmauth` is a simple auth proxy and router for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics). It reads username and password from [Basic Auth headers](https://en.wikipedia.org/wiki/Basic_access_authentication), @@ -23,7 +27,8 @@ Docker images for `vmauth` are available [here](https://hub.docker.com/r/victori Pass `-help` to `vmauth` in order to see all the supported command-line flags with their descriptions. -Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML, accounting, limits, etc. +Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML, +accounting and rate limiting such as [vmgateway](https://victoriametrics.github.io/vmgateway.html). ## Auth config @@ -36,11 +41,15 @@ Auth config is represented in the following simple `yml` format: # Usernames must be unique. users: + # Requests with the 'Authorization: Bearer XXXX' header are proxied to http://localhost:8428 . + # For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query +- bearer_token: "XXXX" + url_prefix: "http://localhost:8428" # The user for querying local single-node VictoriaMetrics. # All the requests to http://vmauth:8427 with the given Basic Auth (username:password) - # will be routed to http://localhost:8428 . - # For example, http://vmauth:8427/api/v1/query is routed to http://localhost:8428/api/v1/query + # will be proxied to http://localhost:8428 . + # For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query - username: "local-single-node" password: "***" url_prefix: "http://localhost:8428" @@ -48,8 +57,8 @@ users: # The user for querying account 123 in VictoriaMetrics cluster # See https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#url-format # All the requests to http://vmauth:8427 with the given Basic Auth (username:password) - # will be routed to http://vmselect:8481/select/123/prometheus . - # For example, http://vmauth:8427/api/v1/query is routed to http://vmselect:8481/select/123/prometheus/api/v1/select + # will be proxied to http://vmselect:8481/select/123/prometheus . + # For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect:8481/select/123/prometheus/api/v1/select - username: "cluster-select-account-123" password: "***" url_prefix: "http://vmselect:8481/select/123/prometheus" @@ -57,8 +66,8 @@ users: # The user for inserting Prometheus data into VictoriaMetrics cluster under account 42 # See https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#url-format # All the requests to http://vmauth:8427 with the given Basic Auth (username:password) - # will be routed to http://vminsert:8480/insert/42/prometheus . - # For example, http://vmauth:8427/api/v1/write is routed to http://vminsert:8480/insert/42/prometheus/api/v1/write + # will be proxied to http://vminsert:8480/insert/42/prometheus . + # For example, http://vmauth:8427/api/v1/write is proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write - username: "cluster-insert-account-42" password: "***" url_prefix: "http://vminsert:8480/insert/42/prometheus" @@ -66,9 +75,9 @@ users: # A single user for querying and inserting data: # - Requests to http://vmauth:8427/api/v1/query, http://vmauth:8427/api/v1/query_range - # and http://vmauth:8427/api/v1/label//values are routed to http://vmselect:8481/select/42/prometheus. - # For example, http://vmauth:8427/api/v1/query is routed to http://vmselect:8480/select/42/prometheus/api/v1/query - # - Requests to http://vmauth:8427/api/v1/write are routed to http://vminsert:8480/insert/42/prometheus/api/v1/write + # and http://vmauth:8427/api/v1/label//values are proxied to http://vmselect:8481/select/42/prometheus. + # For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect:8480/select/42/prometheus/api/v1/query + # - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write - username: "foobar" url_map: - src_paths: ["/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^/]+/values"] diff --git a/docs/vmbackup.md b/docs/vmbackup.md index 811d71fae..9affa6dfd 100644 --- a/docs/vmbackup.md +++ b/docs/vmbackup.md @@ -1,4 +1,8 @@ -## vmbackup +--- +sort: 5 +--- + +# vmbackup `vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://victoriametrics.github.io/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots). diff --git a/docs/vmctl.md b/docs/vmctl.md index 61813ab52..5fed39e36 100644 --- a/docs/vmctl.md +++ b/docs/vmctl.md @@ -1,3 +1,7 @@ +--- +sort: 7 +--- + # vmctl Victoria metrics command-line tool @@ -9,33 +13,6 @@ Features: - [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics - [ ] Storage Management: data re-balancing between nodes -# Table of contents - -* [Articles](#articles) -* [How to build](#how-to-build) -* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x) - * [Data mapping](#data-mapping) - * [Configuration](#configuration) - * [Filtering](#filtering) -* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x) -* [Migrating data from Prometheus](#migrating-data-from-prometheus) - * [Data mapping](#data-mapping-1) - * [Configuration](#configuration-1) - * [Filtering](#filtering-1) -* [Migrating data from Thanos](#migrating-data-from-thanos) - * [Current data](#current-data) - * [Historical data](#historical-data) -* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics) - * [Native protocol](#native-protocol) -* [Tuning](#tuning) - * [Influx mode](#influx-mode) - * [Prometheus mode](#prometheus-mode) - * [VictoriaMetrics importer](#victoriametrics-importer) - * [Importer stats](#importer-stats) -* [Significant figures](#significant-figures) -* [Adding extra labels](#adding-extra-labels) - - ## Articles * [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043) diff --git a/docs/vmgateway-access-control.jpg b/docs/vmgateway-access-control.jpg new file mode 100644 index 000000000..91988329a Binary files /dev/null and b/docs/vmgateway-access-control.jpg differ diff --git a/docs/vmgateway-overview.jpeg b/docs/vmgateway-overview.jpeg new file mode 100644 index 000000000..adb30aa59 Binary files /dev/null and b/docs/vmgateway-overview.jpeg differ diff --git a/docs/vmgateway-rate-limiting.jpg b/docs/vmgateway-rate-limiting.jpg new file mode 100644 index 000000000..2849a0094 Binary files /dev/null and b/docs/vmgateway-rate-limiting.jpg differ diff --git a/docs/vmgateway.md b/docs/vmgateway.md new file mode 100644 index 000000000..f2da4e476 --- /dev/null +++ b/docs/vmgateway.md @@ -0,0 +1,291 @@ +--- +sort: 8 +--- + +# vmgateway + + +vmgateway + +`vmgateway` is a proxy for Victoria Metrics TSDB. It provides the following features: + +* Rate Limiter + * Based on cluster tenants' utilization supports multiple time interval limits for ingestion/retrieving metrics +* Token Access Control + * Supports additional per-label access control for Single and Cluster versions of Victoria Metrics TSDB + * Provides access by tenantID at Cluster version + * Allows to separate write/read/admin access to data + +`vmgateway` is included in an [enterprise package](https://victoriametrics.com/enterprise.html). + + +## Access Control + +vmgateway-ac + +`vmgateway` supports jwt based authentication. With jwt payload can be configured access to specific tenant, labels, read/write. + +jwt token must be in following format: +```json +{ + "exp": 1617304574, + "vm_access": { + "tenant_id": { + "account_id": 1, + "project_id": 5 + }, + "extra_labels": { + "team": "dev", + "project": "mobile" + }, + "mode": 1 + } +} +``` +Where: +- `exp` - required, expire time in unix_timestamp. If token expires, `vmgateway` rejects request. +- `vm_access` - required, dict with claim info, minimum form: `{"vm_access": {"tenand_id": {}}` +- `tenant_id` - optional, make sense only for cluster mode, routes request to corresponding tenant. +- `extra_labels` - optional, key-value pairs for label filters - added to ingested or selected metrics. +- `mode` - optional, access mode for api - read, write, full. supported values: 0 - full (default value), 1 - read, 2 - write. + +## QuickStart + +Start single version of Victoria Metrics + +```bash +# single +# start node +./bin/victoria-metrics --selfScrapeInterval=10s +``` + +Start vmgateway + +```bash +./bin/vmgateway -eula -enable.auth -read.url http://localhost:8428 --write.url http://localhost:8428 +``` + +Retrieve data from database +```bash +curl 'http://localhost:8431/api/v1/series/count' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ2bV9hY2Nlc3MiOnsidGVuYW50X2lkIjp7fSwicm9sZSI6MX0sImV4cCI6MTkzOTM0NjIxMH0.5WUxEfdcV9hKo4CtQdtuZYOGpGXWwaqM9VuVivMMrVg' +``` + + Request with incorrect token or with out token will be rejected: +```bash +curl 'http://localhost:8431/api/v1/series/count' + +curl 'http://localhost:8431/api/v1/series/count' -H 'Authorization: Bearer incorrect-token' +``` + + +## Rate Limiter + +vmgateway-rl + + Limits incoming requests by given pre-configured limits. It supports read and write limiting by a tenant. + + `vmgateway` needs datasource for rate limits queries. It can be single-node or cluster version of `victoria-metrics`. +It must have metrics scrapped from cluster, that you want to rate limit. + +List of supported limit types: +- `queries` - count of api requests made at tenant to read api, such as `/api/v1/query`, `/api/v1/series` and others. +- `active_series` - count of current active series at given tenant. +- `new_series` - count of created series aka churn rate +- `rows_inserted` - count of inserted rows per tenant. + +List of supported time windows: +- `minute` +- `hour` + +Limits can be specified per tenant or at global level, if you omit `project_id` and `account_id`. + +Example of configuration file: + +```yaml +limits: + - type: queries + value: 1000 + resolution: minute + - type: queries + value: 10000 + resolution: hour + - type: queries + value: 10 + resolution: minute + project_id: 5 + account_id: 1 +``` + +## QuickStart + + cluster version required for rate limiting. +```bash +# start datasource for cluster metrics + +cat << EOF > cluster.yaml +scrape_configs: + - job_name: cluster + scrape_interval: 5s + static_configs: + - targets: ['127.0.0.1:8481','127.0.0.1:8482','127.0.0.1:8480'] +EOF + +./bin/victoria-metrics --promscrape.config cluster.yaml + +# start cluster + +# start vmstorage, vmselect and vminsert +./bin/vmstorage -eula +./bin/vmselect -eula -storageNode 127.0.0.1:8401 +./bin/vminsert -eula -storageNode 127.0.0.1:8400 + +# create base rate limitng config: +cat << EOF > limit.yaml +limits: + - type: queries + value: 100 + - type: rows_inserted + value: 100000 + - type: new_series + value: 1000 + - type: active_series + value: 100000 + - type: queries + value: 1 + account_id: 15 +EOF + +# start gateway with clusterMoe +./bin/vmgateway -eula -enable.rateLimit -ratelimit.config limit.yaml -datasource.url http://localhost:8428 -enable.auth -clusterMode -write.url=http://localhost:8480 --read.url=http://localhost:8481 + +# ingest simple metric to tenant 1:5 +curl 'http://localhost:8431/api/v1/import/prometheus' -X POST -d 'foo{bar="baz1"} 123' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MjAxNjIwMDAwMDAsInZtX2FjY2VzcyI6eyJ0ZW5hbnRfaWQiOnsiYWNjb3VudF9pZCI6MTV9fX0.PB1_KXDKPUp-40pxOGk6lt_jt9Yq80PIMpWVJqSForQ' +# read metric from tenant 1:5 +curl 'http://localhost:8431/api/v1/labels' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MjAxNjIwMDAwMDAsInZtX2FjY2VzcyI6eyJ0ZW5hbnRfaWQiOnsiYWNjb3VudF9pZCI6MTV9fX0.PB1_KXDKPUp-40pxOGk6lt_jt9Yq80PIMpWVJqSForQ' + +# check rate limit +``` + +## Configuration + +The shortlist of configuration flags is the following: +```bash + -clusterMode + enable it for cluster version + -datasource.appendTypePrefix + Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to VMSelect URL. + -datasource.basicAuth.password string + Optional basic auth password for -datasource.url + -datasource.basicAuth.username string + Optional basic auth username for -datasource.url + -datasource.lookback duration + Lookback defines how far to look into past when evaluating queries. For example, if datasource.lookback=5m then param "time" with value now()-5m will be added to every query. + -datasource.maxIdleConnections int + Defines the number of idle (keep-alive connections) to configured datasource.Consider to set this value equal to the value: groups_total * group.concurrency. Too low value may result into high number of sockets in TIME_WAIT state. (default 100) + -datasource.queryStep duration + queryStep defines how far a value can fallback to when evaluating queries. For example, if datasource.queryStep=15s then param "step" with value "15s" will be added to every query. + -datasource.tlsCAFile string + Optional path to TLS CA file to use for verifying connections to -datasource.url. By default system CA is used + -datasource.tlsCertFile string + Optional path to client-side TLS certificate file to use when connecting to -datasource.url + -datasource.tlsInsecureSkipVerify + Whether to skip tls verification when connecting to -datasource.url + -datasource.tlsKeyFile string + Optional path to client-side TLS certificate key to use when connecting to -datasource.url + -datasource.tlsServerName string + Optional TLS server name to use for connections to -datasource.url. By default the server name from -datasource.url is used + -datasource.url string + Victoria Metrics or VMSelect url. Required parameter. E.g. http://127.0.0.1:8428 + -enable.auth + enables auth with jwt token + -enable.rateLimit + enables rate limiter + -enableTCP6 + Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used + -envflag.enable + Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isnt set + -envflag.prefix string + Prefix for environment variables if -envflag.enable is set + -eula + By specifying this flag you confirm that you have an enterprise license and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf + -fs.disableMmap + Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() + -http.connTimeout duration + Incoming http connections are closed after the configured timeout. This may help spreading incoming load among a cluster of services behind load balancer. Note that the real timeout may be bigger by up to 10% as a protection from Thundering herd problem (default 2m0s) + -http.disableResponseCompression + Disable compression of HTTP responses for saving CPU resources. By default compression is enabled to save network bandwidth + -http.idleConnTimeout duration + Timeout for incoming idle http connections (default 1m0s) + -http.maxGracefulShutdownDuration duration + The maximum duration for graceful shutdown of HTTP server. Highly loaded server may require increased value for graceful shutdown (default 7s) + -http.pathPrefix string + An optional prefix to add to all the paths handled by http server. For example, if '-http.pathPrefix=/foo/bar' is set, then all the http requests will be handled on '/foo/bar/*' paths. This may be useful for proxied requests. See https://www.robustperception.io/using-external-urls-and-proxies-with-prometheus + -http.shutdownDelay duration + Optional delay before http server shutdown. During this dealy the servier returns non-OK responses from /health page, so load balancers can route new requests to other servers + -httpAuth.password string + Password for HTTP Basic Auth. The authentication is disabled if -httpAuth.username is empty + -httpAuth.username string + Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password + -httpListenAddr string + TCP address to listen for http connections (default ":8431") + -loggerDisableTimestamps + Whether to disable writing timestamps in logs + -loggerErrorsPerSecondLimit int + Per-second limit on the number of ERROR messages. If more than the given number of errors are emitted per second, then the remaining errors are suppressed. Zero value disables the rate limit + -loggerFormat string + Format for logs. Possible values: default, json (default "default") + -loggerLevel string + Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO") + -loggerOutput string + Output for the logs. Supported values: stderr, stdout (default "stderr") + -loggerTimezone string + Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") + -loggerWarnsPerSecondLimit int + Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit + -memory.allowedBytes size + Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) + -memory.allowedPercent float + Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) + -metricsAuthKey string + Auth key for /metrics. It overrides httpAuth settings + -pprofAuthKey string + Auth key for /debug/pprof. It overrides httpAuth settings + -ratelimit.config string + path for configuration file + -ratelimit.extraLabels array + additional labels, that will be applied to fetchdata from datasource + Supports array of values separated by comma or specified via multiple flags. + -ratelimit.refreshInterval duration + (default 5s) + -read.url string + read access url address, example: http://vmselect:8481 + -tls + Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set + -tlsCertFile string + Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs, since RSA certs are slow + -tlsKeyFile string + Path to file with TLS key. Used only if -tls is set + -version + Show VictoriaMetrics version + -write.url string + write access url address, example: http://vminsert:8480 + +``` + +## TroubleShooting + +* Access control: + * incorrect `jwt` format, try https://jwt.io/#debugger-io with our tokens + * expired token, check `exp` field. +* Rate Limiting: + * `scrape_interval` at datasource, reduce it to apply limits faster. + + +## Limitations + +* Access Control: + * `jwt` token must be validated by external system, currently `vmgateway` can't validate the signature. +* RateLimiting: + * limits applied based on queries to `datasource.url` + * only cluster version can be rate-limited. diff --git a/docs/vmrestore.md b/docs/vmrestore.md index 4abd1bc9c..4d7a66894 100644 --- a/docs/vmrestore.md +++ b/docs/vmrestore.md @@ -1,4 +1,8 @@ -## vmrestore +--- +sort: 6 +--- + +# vmrestore `vmrestore` restores data from backups created by [vmbackup](https://victoriametrics.github.io/vbackup.html). VictoriaMetrics `v1.29.0` and newer versions must be used for working with the restored data. diff --git a/go.mod b/go.mod index eff0baaeb..86ced0b91 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,6 @@ module github.com/VictoriaMetrics/VictoriaMetrics require ( - cloud.google.com/go v0.80.0 // indirect cloud.google.com/go/storage v1.14.0 github.com/VictoriaMetrics/fastcache v1.5.8 @@ -10,16 +9,17 @@ require ( github.com/VictoriaMetrics/fasthttp v1.0.14 github.com/VictoriaMetrics/metrics v1.17.2 github.com/VictoriaMetrics/metricsql v0.14.0 - github.com/aws/aws-sdk-go v1.38.5 + github.com/aws/aws-sdk-go v1.38.15 github.com/cespare/xxhash/v2 v2.1.1 github.com/cheggaaa/pb/v3 v3.0.7 github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/fatih/color v1.10.0 // indirect github.com/go-kit/kit v0.10.0 + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.3 github.com/influxdata/influxdb v1.8.4 github.com/klauspost/compress v1.11.13 - github.com/mattn/go-runewidth v0.0.10 // indirect + github.com/mattn/go-runewidth v0.0.12 // indirect github.com/oklog/ulid v1.3.1 github.com/prometheus/client_golang v1.10.0 // indirect github.com/prometheus/common v0.20.0 // indirect @@ -34,11 +34,13 @@ require ( github.com/valyala/histogram v1.1.2 github.com/valyala/quicktemplate v1.6.3 golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210324205630-d1beb07c2056 // indirect - golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 - golang.org/x/sys v0.0.0-20210324051608-47abb6519492 - google.golang.org/api v0.43.0 - google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14 // indirect + golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 + golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 + golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 + golang.org/x/text v0.3.6 // indirect + google.golang.org/api v0.44.0 + google.golang.org/genproto v0.0.0-20210406143921-e86de6bf7a46 // indirect + google.golang.org/grpc v1.37.0 // indirect gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index 8369e2138..2db87c3e5 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.80.0 h1:kAdyAMrj9CjqOSGiluseVjIgAyQ3uxADYtUYR6MwYeY= -cloud.google.com/go v0.80.0/go.mod h1:fqpb6QRi1CFGAMXDoE72G+b+Ybv7dMB/T1tbExDHktI= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -125,8 +125,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.5 h1:iUc1s0J/5Akc/pM5LNMKUEGTofSUjA3aiXL+hQwMbww= -github.com/aws/aws-sdk-go v1.38.5/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.15 h1:usaPeqoxFUzy0FfBLZLZHya5Kv2cpURjb1jqCa7+odA= +github.com/aws/aws-sdk-go v1.38.15/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -196,6 +196,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -342,8 +343,9 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -367,8 +369,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -563,8 +566,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= -github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -923,8 +926,8 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210324205630-d1beb07c2056 h1:sANdAef76Ioam9aQUUdcAqricwY/WUaMc4+7LY4eGg8= -golang.org/x/net v0.0.0-20210324205630-d1beb07c2056/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -936,8 +939,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 h1:D7nTwh4J0i+5mW4Zjzn5omvlr6YBcWywE6KOcatyNxY= -golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1019,11 +1022,11 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1031,8 +1034,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1141,9 +1145,9 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= -google.golang.org/api v0.43.0 h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1196,11 +1200,10 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210323160006-e668133fea6a/go.mod h1:f2Bd7+2PlaVKmvKQ52aspJZXIDaRQBVdOOBfJ5i8OEs= -google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14 h1:0VNRpy5TroA/6mYt3pPEq+E3oomxLJ+FUit3+oIsUy4= -google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14/go.mod h1:f2Bd7+2PlaVKmvKQ52aspJZXIDaRQBVdOOBfJ5i8OEs= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210406143921-e86de6bf7a46 h1:f4STrQZf8jaowsiUitigvrqMCCM4QJH1A2JCSI7U1ow= +google.golang.org/genproto v0.0.0-20210406143921-e86de6bf7a46/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1223,8 +1226,10 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/lib/persistentqueue/fastqueue.go b/lib/persistentqueue/fastqueue.go index 5c25933f6..7dda73a86 100644 --- a/lib/persistentqueue/fastqueue.go +++ b/lib/persistentqueue/fastqueue.go @@ -8,7 +8,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" ) -// FastQueue is a wrapper around Queue, which prefers sending data via memory. +// FastQueue is fast persistent queue, which prefers sending data via memory. // // It falls back to sending data via file when readers don't catch up with writers. type FastQueue struct { @@ -20,7 +20,7 @@ type FastQueue struct { cond sync.Cond // pq is file-based queue - pq *Queue + pq *queue // ch is in-memory queue ch chan *bytesutil.ByteBuffer @@ -40,7 +40,7 @@ type FastQueue struct { // Otherwise its size is limited by maxPendingBytes. The oldest data is dropped when the queue // reaches maxPendingSize. func MustOpenFastQueue(path, name string, maxInmemoryBlocks, maxPendingBytes int) *FastQueue { - pq := MustOpen(path, name, maxPendingBytes) + pq := mustOpen(path, name, maxPendingBytes) fq := &FastQueue{ pq: pq, ch: make(chan *bytesutil.ByteBuffer, maxInmemoryBlocks), @@ -174,7 +174,12 @@ func (fq *FastQueue) MustReadBlock(dst []byte) ([]byte, bool) { return dst, true } if n := fq.pq.GetPendingBytes(); n > 0 { - return fq.pq.MustReadBlock(dst) + data, ok := fq.pq.MustReadBlockNonblocking(dst) + if ok { + return data, true + } + dst = data + continue } // There are no blocks. Wait for new block. diff --git a/lib/persistentqueue/persistentqueue.go b/lib/persistentqueue/persistentqueue.go index 916276c3f..4c082a9f8 100644 --- a/lib/persistentqueue/persistentqueue.go +++ b/lib/persistentqueue/persistentqueue.go @@ -8,7 +8,6 @@ import ( "os" "regexp" "strconv" - "sync" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" @@ -26,8 +25,10 @@ const defaultChunkFileSize = (MaxBlockSize + 8) * 16 var chunkFileNameRegex = regexp.MustCompile("^[0-9A-F]{16}$") -// Queue represents persistent queue. -type Queue struct { +// queue represents persistent queue. +// +// It is unsafe to call queue methods from concurrent goroutines. +type queue struct { chunkFileSize uint64 maxBlockSize uint64 maxPendingBytes uint64 @@ -37,13 +38,6 @@ type Queue struct { flockF *os.File - // mu protects all the fields below. - mu sync.Mutex - - // cond is used for notifying blocked readers when new data has been added - // or when MustClose is called. - cond sync.Cond - reader *filestream.Reader readerPath string readerOffset uint64 @@ -74,10 +68,7 @@ type Queue struct { // ResetIfEmpty resets q if it is empty. // // This is needed in order to remove chunk file associated with empty q. -func (q *Queue) ResetIfEmpty() { - q.mu.Lock() - defer q.mu.Unlock() - +func (q *queue) ResetIfEmpty() { if q.readerOffset != q.writerOffset { // The queue isn't empty. return @@ -86,10 +77,13 @@ func (q *Queue) ResetIfEmpty() { // The file is too small to drop. Leave it as is in order to reduce filesystem load. return } + q.mustResetFiles() +} + +func (q *queue) mustResetFiles() { if q.readerPath != q.writerPath { logger.Panicf("BUG: readerPath=%q doesn't match writerPath=%q", q.readerPath, q.writerPath) } - q.reader.MustClose() q.writer.MustClose() fs.MustRemoveAll(q.readerPath) @@ -115,31 +109,29 @@ func (q *Queue) ResetIfEmpty() { } q.reader = r - if err := q.flushMetainfoLocked(); err != nil { + if err := q.flushMetainfo(); err != nil { logger.Panicf("FATAL: cannot flush metainfo: %s", err) } } // GetPendingBytes returns the number of pending bytes in the queue. -func (q *Queue) GetPendingBytes() uint64 { - q.mu.Lock() +func (q *queue) GetPendingBytes() uint64 { n := q.writerOffset - q.readerOffset - q.mu.Unlock() return n } -// MustOpen opens persistent queue from the given path. +// mustOpen opens persistent queue from the given path. // // If maxPendingBytes is greater than 0, then the max queue size is limited by this value. // The oldest data is deleted when queue size exceeds maxPendingBytes. -func MustOpen(path, name string, maxPendingBytes int) *Queue { +func mustOpen(path, name string, maxPendingBytes int) *queue { if maxPendingBytes < 0 { maxPendingBytes = 0 } - return mustOpen(path, name, defaultChunkFileSize, MaxBlockSize, uint64(maxPendingBytes)) + return mustOpenInternal(path, name, defaultChunkFileSize, MaxBlockSize, uint64(maxPendingBytes)) } -func mustOpen(path, name string, chunkFileSize, maxBlockSize, maxPendingBytes uint64) *Queue { +func mustOpenInternal(path, name string, chunkFileSize, maxBlockSize, maxPendingBytes uint64) *queue { if chunkFileSize < 8 || chunkFileSize-8 < maxBlockSize { logger.Panicf("BUG: too small chunkFileSize=%d for maxBlockSize=%d; chunkFileSize must fit at least one block", chunkFileSize, maxBlockSize) } @@ -166,15 +158,14 @@ func mustCreateFlockFile(path string) *os.File { return f } -func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingBytes uint64) (*Queue, error) { +func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingBytes uint64) (*queue, error) { // Protect from concurrent opens. - var q Queue + var q queue q.chunkFileSize = chunkFileSize q.maxBlockSize = maxBlockSize q.maxPendingBytes = maxPendingBytes q.dir = path q.name = name - q.cond.L = &q.mu q.blocksDropped = metrics.GetOrCreateCounter(fmt.Sprintf(`vm_persistentqueue_blocks_dropped_total{path=%q}`, path)) q.bytesDropped = metrics.GetOrCreateCounter(fmt.Sprintf(`vm_persistentqueue_bytes_dropped_total{path=%q}`, path)) @@ -346,17 +337,8 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB // MustClose closes q. // -// It unblocks all the MustReadBlock calls. -// // MustWriteBlock mustn't be called during and after the call to MustClose. -func (q *Queue) MustClose() { - q.mu.Lock() - defer q.mu.Unlock() - - // Unblock goroutines blocked on cond in MustReadBlock. - q.mustStop = true - q.cond.Broadcast() - +func (q *queue) MustClose() { // Close writer. q.writer.MustClose() q.writer = nil @@ -366,7 +348,7 @@ func (q *Queue) MustClose() { q.reader = nil // Store metainfo - if err := q.flushMetainfoLocked(); err != nil { + if err := q.flushMetainfo(); err != nil { logger.Panicf("FATAL: cannot flush chunked queue metainfo: %s", err) } @@ -377,11 +359,11 @@ func (q *Queue) MustClose() { q.flockF = nil } -func (q *Queue) chunkFilePath(offset uint64) string { +func (q *queue) chunkFilePath(offset uint64) string { return fmt.Sprintf("%s/%016X", q.dir, offset) } -func (q *Queue) metainfoPath() string { +func (q *queue) metainfoPath() string { return q.dir + "/metainfo.json" } @@ -390,14 +372,10 @@ func (q *Queue) metainfoPath() string { // The block size cannot exceed MaxBlockSize. // // It is safe calling this function from concurrent goroutines. -func (q *Queue) MustWriteBlock(block []byte) { +func (q *queue) MustWriteBlock(block []byte) { if uint64(len(block)) > q.maxBlockSize { logger.Panicf("BUG: too big block to send: %d bytes; it mustn't exceed %d bytes", len(block), q.maxBlockSize) } - - q.mu.Lock() - defer q.mu.Unlock() - if q.mustStop { logger.Panicf("BUG: MustWriteBlock cannot be called after MustClose") } @@ -416,7 +394,10 @@ func (q *Queue) MustWriteBlock(block []byte) { bb := blockBufPool.Get() for q.writerOffset-q.readerOffset > maxPendingBytes { var err error - bb.B, err = q.readBlockLocked(bb.B[:0]) + bb.B, err = q.readBlock(bb.B[:0]) + if err == errEmptyQueue { + break + } if err != nil { logger.Panicf("FATAL: cannot read the oldest block %s", err) } @@ -429,38 +410,18 @@ func (q *Queue) MustWriteBlock(block []byte) { return } } - if err := q.writeBlockLocked(block); err != nil { + if err := q.writeBlock(block); err != nil { logger.Panicf("FATAL: %s", err) } - - // Notify blocked reader if any. - // See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/484 for details. - q.cond.Signal() } var blockBufPool bytesutil.ByteBufferPool -func (q *Queue) writeBlockLocked(block []byte) error { +func (q *queue) writeBlock(block []byte) error { if q.writerLocalOffset+q.maxBlockSize+8 > q.chunkFileSize { - // Finalize the current chunk and start new one. - q.writer.MustClose() - // There is no need to do fs.MustSyncPath(q.writerPath) here, - // since MustClose already does this. - if n := q.writerOffset % q.chunkFileSize; n > 0 { - q.writerOffset += (q.chunkFileSize - n) + if err := q.nextChunkFileForWrite(); err != nil { + return fmt.Errorf("cannot create next chunk file: %w", err) } - q.writerFlushedOffset = q.writerOffset - q.writerLocalOffset = 0 - q.writerPath = q.chunkFilePath(q.writerOffset) - w, err := filestream.Create(q.writerPath, false) - if err != nil { - return fmt.Errorf("cannot create chunk file %q: %w", q.writerPath, err) - } - q.writer = w - if err := q.flushMetainfoLocked(); err != nil { - return fmt.Errorf("cannot flush metainfo: %w", err) - } - fs.MustSyncPath(q.dir) } // Write block len. @@ -479,62 +440,61 @@ func (q *Queue) writeBlockLocked(block []byte) error { } q.blocksWritten.Inc() q.bytesWritten.Add(len(block)) - return q.flushMetainfoIfNeededLocked(true) + return q.flushWriterMetainfoIfNeeded() } -// MustReadBlock appends the next block from q to dst and returns the result. -// -// false is returned after MustClose call. -// -// It is safe calling this function from concurrent goroutines. -func (q *Queue) MustReadBlock(dst []byte) ([]byte, bool) { - q.mu.Lock() - defer q.mu.Unlock() +func (q *queue) nextChunkFileForWrite() error { + // Finalize the current chunk and start new one. + q.writer.MustClose() + // There is no need to do fs.MustSyncPath(q.writerPath) here, + // since MustClose already does this. + if n := q.writerOffset % q.chunkFileSize; n > 0 { + q.writerOffset += q.chunkFileSize - n + } + q.writerFlushedOffset = q.writerOffset + q.writerLocalOffset = 0 + q.writerPath = q.chunkFilePath(q.writerOffset) + w, err := filestream.Create(q.writerPath, false) + if err != nil { + return fmt.Errorf("cannot create chunk file %q: %w", q.writerPath, err) + } + q.writer = w + if err := q.flushMetainfo(); err != nil { + return fmt.Errorf("cannot flush metainfo: %w", err) + } + fs.MustSyncPath(q.dir) + return nil +} - for { - if q.mustStop { +// MustReadBlockNonblocking appends the next block from q to dst and returns the result. +// +// false is returned if q is empty. +func (q *queue) MustReadBlockNonblocking(dst []byte) ([]byte, bool) { + if q.readerOffset > q.writerOffset { + logger.Panicf("BUG: readerOffset=%d cannot exceed writerOffset=%d", q.readerOffset, q.writerOffset) + } + if q.readerOffset == q.writerOffset { + return dst, false + } + var err error + dst, err = q.readBlock(dst) + if err != nil { + if err == errEmptyQueue { return dst, false } - if q.readerOffset > q.writerOffset { - logger.Panicf("BUG: readerOffset=%d cannot exceed writerOffset=%d", q.readerOffset, q.writerOffset) - } - if q.readerOffset < q.writerOffset { - break - } - q.cond.Wait() - } - - data, err := q.readBlockLocked(dst) - if err != nil { - // Skip the current chunk, since it may be broken. - q.readerOffset += q.chunkFileSize - q.readerOffset%q.chunkFileSize - _ = q.flushMetainfoLocked() logger.Panicf("FATAL: %s", err) } - return data, true + return dst, true } -func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) { +func (q *queue) readBlock(dst []byte) ([]byte, error) { if q.readerLocalOffset+q.maxBlockSize+8 > q.chunkFileSize { - // Remove the current chunk and go to the next chunk. - q.reader.MustClose() - fs.MustRemoveAll(q.readerPath) - if n := q.readerOffset % q.chunkFileSize; n > 0 { - q.readerOffset += (q.chunkFileSize - n) + if err := q.nextChunkFileForRead(); err != nil { + return dst, fmt.Errorf("cannot open next chunk file: %w", err) } - q.readerLocalOffset = 0 - q.readerPath = q.chunkFilePath(q.readerOffset) - r, err := filestream.Open(q.readerPath, true) - if err != nil { - return dst, fmt.Errorf("cannot open chunk file %q: %w", q.readerPath, err) - } - q.reader = r - if err := q.flushMetainfoLocked(); err != nil { - return dst, fmt.Errorf("cannot flush metainfo: %w", err) - } - fs.MustSyncPath(q.dir) } +again: // Read block len. header := headerBufPool.Get() header.B = bytesutil.Resize(header.B, 8) @@ -542,27 +502,73 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) { blockLen := encoding.UnmarshalUint64(header.B) headerBufPool.Put(header) if err != nil { - return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %w", q.readerPath, err) + logger.Errorf("skipping corrupted %q, since header with size 8 bytes cannot be read from it: %s", q.readerPath, err) + if err := q.skipBrokenChunkFile(); err != nil { + return dst, err + } + goto again } if blockLen > q.maxBlockSize { - return dst, fmt.Errorf("too big block size read from %q: %d bytes; cannot exceed %d bytes", q.readerPath, blockLen, q.maxBlockSize) + logger.Errorf("skipping corrupted %q, since too big block size is read from it: %d bytes; cannot exceed %d bytes", q.readerPath, blockLen, q.maxBlockSize) + if err := q.skipBrokenChunkFile(); err != nil { + return dst, err + } + goto again } // Read block contents. dstLen := len(dst) dst = bytesutil.Resize(dst, dstLen+int(blockLen)) if err := q.readFull(dst[dstLen:]); err != nil { - return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %w", blockLen, q.readerPath, err) + logger.Errorf("skipping corrupted %q, since contents with size %d bytes cannot be read from it: %s", q.readerPath, blockLen, err) + if err := q.skipBrokenChunkFile(); err != nil { + return dst[:dstLen], err + } + goto again } q.blocksRead.Inc() q.bytesRead.Add(int(blockLen)) - if err := q.flushMetainfoIfNeededLocked(false); err != nil { + if err := q.flushReaderMetainfoIfNeeded(); err != nil { return dst, err } return dst, nil } -func (q *Queue) write(buf []byte) error { +func (q *queue) skipBrokenChunkFile() error { + // Try to recover from broken chunk file by skipping it. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1030 + q.readerOffset += q.chunkFileSize - q.readerOffset%q.chunkFileSize + if q.readerOffset >= q.writerOffset { + q.mustResetFiles() + return errEmptyQueue + } + return q.nextChunkFileForRead() +} + +var errEmptyQueue = fmt.Errorf("the queue is empty") + +func (q *queue) nextChunkFileForRead() error { + // Remove the current chunk and go to the next chunk. + q.reader.MustClose() + fs.MustRemoveAll(q.readerPath) + if n := q.readerOffset % q.chunkFileSize; n > 0 { + q.readerOffset += q.chunkFileSize - n + } + q.readerLocalOffset = 0 + q.readerPath = q.chunkFilePath(q.readerOffset) + r, err := filestream.Open(q.readerPath, true) + if err != nil { + return fmt.Errorf("cannot open chunk file %q: %w", q.readerPath, err) + } + q.reader = r + if err := q.flushMetainfo(); err != nil { + return fmt.Errorf("cannot flush metainfo: %w", err) + } + fs.MustSyncPath(q.dir) + return nil +} + +func (q *queue) write(buf []byte) error { bufLen := uint64(len(buf)) n, err := q.writer.Write(buf) if err != nil { @@ -576,7 +582,7 @@ func (q *Queue) write(buf []byte) error { return nil } -func (q *Queue) readFull(buf []byte) error { +func (q *queue) readFull(buf []byte) error { bufLen := uint64(len(buf)) if q.readerOffset+bufLen > q.writerFlushedOffset { q.writer.MustFlush(false) @@ -594,22 +600,32 @@ func (q *Queue) readFull(buf []byte) error { return nil } -func (q *Queue) flushMetainfoIfNeededLocked(flushData bool) error { +func (q *queue) flushReaderMetainfoIfNeeded() error { t := fasttime.UnixTimestamp() if t == q.lastMetainfoFlushTime { return nil } - if flushData { - q.writer.MustFlush(true) - } - if err := q.flushMetainfoLocked(); err != nil { + if err := q.flushMetainfo(); err != nil { return fmt.Errorf("cannot flush metainfo: %w", err) } q.lastMetainfoFlushTime = t return nil } -func (q *Queue) flushMetainfoLocked() error { +func (q *queue) flushWriterMetainfoIfNeeded() error { + t := fasttime.UnixTimestamp() + if t == q.lastMetainfoFlushTime { + return nil + } + q.writer.MustFlush(true) + if err := q.flushMetainfo(); err != nil { + return fmt.Errorf("cannot flush metainfo: %w", err) + } + q.lastMetainfoFlushTime = t + return nil +} + +func (q *queue) flushMetainfo() error { mi := &metainfo{ Name: q.name, ReaderOffset: q.readerOffset, diff --git a/lib/persistentqueue/persistentqueue_test.go b/lib/persistentqueue/persistentqueue_test.go index e4b83d540..e135eb415 100644 --- a/lib/persistentqueue/persistentqueue_test.go +++ b/lib/persistentqueue/persistentqueue_test.go @@ -5,16 +5,14 @@ import ( "io/ioutil" "os" "strconv" - "sync" "testing" - "time" ) func TestQueueOpenClose(t *testing.T) { path := "queue-open-close" mustDeleteDir(path) for i := 0; i < 3; i++ { - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) if n := q.GetPendingBytes(); n > 0 { t.Fatalf("pending bytes must be 0; got %d", n) } @@ -28,7 +26,7 @@ func TestQueueOpen(t *testing.T) { path := "queue-open-invalid-metainfo" mustCreateDir(path) mustCreateFile(path+"/metainfo.json", "foobarbaz") - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) q.MustClose() mustDeleteDir(path) }) @@ -38,7 +36,7 @@ func TestQueueOpen(t *testing.T) { mustCreateEmptyMetainfo(path, "foobar") mustCreateFile(path+"/junk-file", "foobar") mustCreateDir(path + "/junk-dir") - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) q.MustClose() mustDeleteDir(path) }) @@ -47,7 +45,7 @@ func TestQueueOpen(t *testing.T) { mustCreateDir(path) mustCreateEmptyMetainfo(path, "foobar") mustCreateFile(fmt.Sprintf("%s/%016X", path, 1234), "qwere") - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) q.MustClose() mustDeleteDir(path) }) @@ -56,7 +54,7 @@ func TestQueueOpen(t *testing.T) { mustCreateDir(path) mustCreateEmptyMetainfo(path, "foobar") mustCreateFile(fmt.Sprintf("%s/%016X", path, 100*uint64(defaultChunkFileSize)), "asdf") - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) q.MustClose() mustDeleteDir(path) }) @@ -72,7 +70,7 @@ func TestQueueOpen(t *testing.T) { t.Fatalf("unexpected error: %s", err) } mustCreateFile(fmt.Sprintf("%s/%016X", path, 0), "adfsfd") - q := MustOpen(path, mi.Name, 0) + q := mustOpen(path, mi.Name, 0) q.MustClose() mustDeleteDir(path) }) @@ -86,7 +84,7 @@ func TestQueueOpen(t *testing.T) { if err := mi.WriteToFile(path + "/metainfo.json"); err != nil { t.Fatalf("unexpected error: %s", err) } - q := MustOpen(path, mi.Name, 0) + q := mustOpen(path, mi.Name, 0) q.MustClose() mustDeleteDir(path) }) @@ -94,7 +92,7 @@ func TestQueueOpen(t *testing.T) { path := "queue-open-metainfo-dir" mustCreateDir(path) mustCreateDir(path + "/metainfo.json") - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) q.MustClose() mustDeleteDir(path) }) @@ -110,7 +108,7 @@ func TestQueueOpen(t *testing.T) { t.Fatalf("unexpected error: %s", err) } mustCreateFile(fmt.Sprintf("%s/%016X", path, 0), "sdf") - q := MustOpen(path, mi.Name, 0) + q := mustOpen(path, mi.Name, 0) q.MustClose() mustDeleteDir(path) }) @@ -119,7 +117,7 @@ func TestQueueOpen(t *testing.T) { mustCreateDir(path) mustCreateEmptyMetainfo(path, "foobar") mustCreateFile(fmt.Sprintf("%s/%016X", path, 0), "sdfdsf") - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) q.MustClose() mustDeleteDir(path) }) @@ -133,7 +131,7 @@ func TestQueueOpen(t *testing.T) { t.Fatalf("unexpected error: %s", err) } mustCreateFile(fmt.Sprintf("%s/%016X", path, 0), "sdf") - q := MustOpen(path, "baz", 0) + q := mustOpen(path, "baz", 0) q.MustClose() mustDeleteDir(path) }) @@ -142,7 +140,7 @@ func TestQueueOpen(t *testing.T) { func TestQueueResetIfEmpty(t *testing.T) { path := "queue-reset-if-empty" mustDeleteDir(path) - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) defer func() { q.MustClose() mustDeleteDir(path) @@ -154,14 +152,18 @@ func TestQueueResetIfEmpty(t *testing.T) { for i := 0; i < 10; i++ { q.MustWriteBlock(block) var ok bool - buf, ok = q.MustReadBlock(buf[:0]) + buf, ok = q.MustReadBlockNonblocking(buf[:0]) if !ok { - t.Fatalf("unexpected ok=false returned from MustReadBlock") + t.Fatalf("unexpected ok=false returned from MustReadBlockNonblocking") } } q.ResetIfEmpty() if n := q.GetPendingBytes(); n > 0 { - t.Fatalf("unexpected non-zer pending bytes after queue reset: %d", n) + t.Fatalf("unexpected non-zero pending bytes after queue reset: %d", n) + } + q.ResetIfEmpty() + if n := q.GetPendingBytes(); n > 0 { + t.Fatalf("unexpected non-zero pending bytes after queue reset: %d", n) } } } @@ -169,7 +171,7 @@ func TestQueueResetIfEmpty(t *testing.T) { func TestQueueWriteRead(t *testing.T) { path := "queue-write-read" mustDeleteDir(path) - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) defer func() { q.MustClose() mustDeleteDir(path) @@ -188,9 +190,9 @@ func TestQueueWriteRead(t *testing.T) { var buf []byte var ok bool for _, block := range blocks { - buf, ok = q.MustReadBlock(buf[:0]) + buf, ok = q.MustReadBlockNonblocking(buf[:0]) if !ok { - t.Fatalf("unexpected ok=%v returned from MustReadBlock; want true", ok) + t.Fatalf("unexpected ok=%v returned from MustReadBlockNonblocking; want true", ok) } if string(buf) != string(block) { t.Fatalf("unexpected block read; got %q; want %q", buf, block) @@ -205,7 +207,7 @@ func TestQueueWriteRead(t *testing.T) { func TestQueueWriteCloseRead(t *testing.T) { path := "queue-write-close-read" mustDeleteDir(path) - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) defer func() { q.MustClose() mustDeleteDir(path) @@ -222,16 +224,16 @@ func TestQueueWriteCloseRead(t *testing.T) { t.Fatalf("pending bytes must be greater than 0; got %d", n) } q.MustClose() - q = MustOpen(path, "foobar", 0) + q = mustOpen(path, "foobar", 0) if n := q.GetPendingBytes(); n <= 0 { t.Fatalf("pending bytes must be greater than 0; got %d", n) } var buf []byte var ok bool for _, block := range blocks { - buf, ok = q.MustReadBlock(buf[:0]) + buf, ok = q.MustReadBlockNonblocking(buf[:0]) if !ok { - t.Fatalf("unexpected ok=%v returned from MustReadBlock; want true", ok) + t.Fatalf("unexpected ok=%v returned from MustReadBlockNonblocking; want true", ok) } if string(buf) != string(block) { t.Fatalf("unexpected block read; got %q; want %q", buf, block) @@ -243,137 +245,12 @@ func TestQueueWriteCloseRead(t *testing.T) { } } -func TestQueueReadEmpty(t *testing.T) { - path := "queue-read-empty" - mustDeleteDir(path) - q := MustOpen(path, "foobar", 0) - defer mustDeleteDir(path) - - resultCh := make(chan error) - go func() { - data, ok := q.MustReadBlock(nil) - var err error - if ok { - err = fmt.Errorf("unexpected ok=%v returned from MustReadBlock; want false", ok) - } else if len(data) > 0 { - err = fmt.Errorf("unexpected non-empty data returned from MustReadBlock: %q", data) - } - resultCh <- err - }() - if n := q.GetPendingBytes(); n > 0 { - t.Fatalf("pending bytes must be 0; got %d", n) - } - q.MustClose() - select { - case err := <-resultCh: - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - case <-time.After(time.Second): - t.Fatalf("timeout") - } -} - -func TestQueueReadWriteConcurrent(t *testing.T) { - path := "queue-read-write-concurrent" - mustDeleteDir(path) - q := MustOpen(path, "foobar", 0) - defer mustDeleteDir(path) - - blocksMap := make(map[string]bool, 1000) - var blocksMapLock sync.Mutex - blocks := make([]string, 1000) - for i := 0; i < 1000; i++ { - block := fmt.Sprintf("block #%d", i) - blocksMap[block] = true - blocks[i] = block - } - - // Start block readers - var readersWG sync.WaitGroup - for workerID := 0; workerID < 10; workerID++ { - readersWG.Add(1) - go func() { - defer readersWG.Done() - for { - block, ok := q.MustReadBlock(nil) - if !ok { - return - } - blocksMapLock.Lock() - if !blocksMap[string(block)] { - panic(fmt.Errorf("unexpected block read: %q", block)) - } - delete(blocksMap, string(block)) - blocksMapLock.Unlock() - } - }() - } - - // Start block writers - blocksCh := make(chan string) - var writersWG sync.WaitGroup - for workerID := 0; workerID < 10; workerID++ { - writersWG.Add(1) - go func(workerID int) { - defer writersWG.Done() - for block := range blocksCh { - q.MustWriteBlock([]byte(block)) - } - }(workerID) - } - for _, block := range blocks { - blocksCh <- block - } - close(blocksCh) - - // Wait for block writers to finish - writersWG.Wait() - - // Notify readers that the queue is closed - q.MustClose() - - // Wait for block readers to finish - readersWG.Wait() - - // Read the remaining blocks in q. - q = MustOpen(path, "foobar", 0) - defer q.MustClose() - resultCh := make(chan error) - go func() { - for len(blocksMap) > 0 { - block, ok := q.MustReadBlock(nil) - if !ok { - resultCh <- fmt.Errorf("unexpected ok=false returned from MustReadBlock") - return - } - if !blocksMap[string(block)] { - resultCh <- fmt.Errorf("unexpected block read from the queue: %q", block) - return - } - delete(blocksMap, string(block)) - } - resultCh <- nil - }() - select { - case err := <-resultCh: - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("timeout") - } - if n := q.GetPendingBytes(); n > 0 { - t.Fatalf("pending bytes must be 0; got %d", n) - } -} - func TestQueueChunkManagementSimple(t *testing.T) { path := "queue-chunk-management-simple" mustDeleteDir(path) const chunkFileSize = 100 const maxBlockSize = 20 - q := mustOpen(path, "foobar", chunkFileSize, maxBlockSize, 0) + q := mustOpenInternal(path, "foobar", chunkFileSize, maxBlockSize, 0) defer mustDeleteDir(path) defer q.MustClose() var blocks []string @@ -386,7 +263,7 @@ func TestQueueChunkManagementSimple(t *testing.T) { t.Fatalf("unexpected zero number of bytes pending") } for _, block := range blocks { - data, ok := q.MustReadBlock(nil) + data, ok := q.MustReadBlockNonblocking(nil) if !ok { t.Fatalf("unexpected ok=false") } @@ -404,7 +281,7 @@ func TestQueueChunkManagementPeriodicClose(t *testing.T) { mustDeleteDir(path) const chunkFileSize = 100 const maxBlockSize = 20 - q := mustOpen(path, "foobar", chunkFileSize, maxBlockSize, 0) + q := mustOpenInternal(path, "foobar", chunkFileSize, maxBlockSize, 0) defer func() { q.MustClose() mustDeleteDir(path) @@ -415,13 +292,13 @@ func TestQueueChunkManagementPeriodicClose(t *testing.T) { q.MustWriteBlock([]byte(block)) blocks = append(blocks, block) q.MustClose() - q = mustOpen(path, "foobar", chunkFileSize, maxBlockSize, 0) + q = mustOpenInternal(path, "foobar", chunkFileSize, maxBlockSize, 0) } if n := q.GetPendingBytes(); n == 0 { t.Fatalf("unexpected zero number of bytes pending") } for _, block := range blocks { - data, ok := q.MustReadBlock(nil) + data, ok := q.MustReadBlockNonblocking(nil) if !ok { t.Fatalf("unexpected ok=false") } @@ -429,7 +306,7 @@ func TestQueueChunkManagementPeriodicClose(t *testing.T) { t.Fatalf("unexpected block read; got %q; want %q", data, block) } q.MustClose() - q = mustOpen(path, "foobar", chunkFileSize, maxBlockSize, 0) + q = mustOpenInternal(path, "foobar", chunkFileSize, maxBlockSize, 0) } if n := q.GetPendingBytes(); n != 0 { t.Fatalf("unexpected non-zero number of pending bytes: %d", n) @@ -440,7 +317,7 @@ func TestQueueLimitedSize(t *testing.T) { const maxPendingBytes = 1000 path := "queue-limited-size" mustDeleteDir(path) - q := MustOpen(path, "foobar", maxPendingBytes) + q := mustOpen(path, "foobar", maxPendingBytes) defer func() { q.MustClose() mustDeleteDir(path) @@ -456,7 +333,7 @@ func TestQueueLimitedSize(t *testing.T) { var buf []byte var ok bool for _, block := range blocks { - buf, ok = q.MustReadBlock(buf[:0]) + buf, ok = q.MustReadBlockNonblocking(buf[:0]) if !ok { t.Fatalf("unexpected ok=false") } @@ -473,7 +350,7 @@ func TestQueueLimitedSize(t *testing.T) { if n := q.GetPendingBytes(); n > maxPendingBytes { t.Fatalf("too many pending bytes; got %d; mustn't exceed %d", n, maxPendingBytes) } - buf, ok = q.MustReadBlock(buf[:0]) + buf, ok = q.MustReadBlockNonblocking(buf[:0]) if !ok { t.Fatalf("unexpected ok=false") } diff --git a/lib/persistentqueue/persistentqueue_timing_test.go b/lib/persistentqueue/persistentqueue_timing_test.go index 02e87513f..e7a3b7874 100644 --- a/lib/persistentqueue/persistentqueue_timing_test.go +++ b/lib/persistentqueue/persistentqueue_timing_test.go @@ -2,13 +2,14 @@ package persistentqueue import ( "fmt" + "sync" "testing" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" ) func BenchmarkQueueThroughputSerial(b *testing.B) { - const iterationsCount = 10 + const iterationsCount = 100 for _, blockSize := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} { block := make([]byte, blockSize) b.Run(fmt.Sprintf("block-size-%d", blockSize), func(b *testing.B) { @@ -16,7 +17,7 @@ func BenchmarkQueueThroughputSerial(b *testing.B) { b.SetBytes(int64(blockSize) * iterationsCount) path := fmt.Sprintf("bench-queue-throughput-serial-%d", blockSize) mustDeleteDir(path) - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) defer func() { q.MustClose() mustDeleteDir(path) @@ -29,7 +30,7 @@ func BenchmarkQueueThroughputSerial(b *testing.B) { } func BenchmarkQueueThroughputConcurrent(b *testing.B) { - const iterationsCount = 10 + const iterationsCount = 100 for _, blockSize := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} { block := make([]byte, blockSize) b.Run(fmt.Sprintf("block-size-%d", blockSize), func(b *testing.B) { @@ -37,28 +38,31 @@ func BenchmarkQueueThroughputConcurrent(b *testing.B) { b.SetBytes(int64(blockSize) * iterationsCount) path := fmt.Sprintf("bench-queue-throughput-concurrent-%d", blockSize) mustDeleteDir(path) - q := MustOpen(path, "foobar", 0) + q := mustOpen(path, "foobar", 0) + var qLock sync.Mutex defer func() { q.MustClose() mustDeleteDir(path) }() b.RunParallel(func(pb *testing.PB) { for pb.Next() { + qLock.Lock() writeReadIteration(q, block, iterationsCount) + qLock.Unlock() } }) }) } } -func writeReadIteration(q *Queue, block []byte, iterationsCount int) { +func writeReadIteration(q *queue, block []byte, iterationsCount int) { for i := 0; i < iterationsCount; i++ { q.MustWriteBlock(block) } var ok bool bb := bbPool.Get() for i := 0; i < iterationsCount; i++ { - bb.B, ok = q.MustReadBlock(bb.B[:0]) + bb.B, ok = q.MustReadBlockNonblocking(bb.B[:0]) if !ok { panic(fmt.Errorf("unexpected ok=false")) } diff --git a/lib/promauth/config.go b/lib/promauth/config.go index f80b15ae2..268589f06 100644 --- a/lib/promauth/config.go +++ b/lib/promauth/config.go @@ -20,6 +20,15 @@ type TLSConfig struct { InsecureSkipVerify bool `yaml:"insecure_skip_verify,omitempty"` } +// Authorization represents generic authorization config. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/ +type Authorization struct { + Type string `yaml:"type,omitempty"` + Credentials string `yaml:"credentials,omitempty"` + CredentialsFile string `yaml:"credentials_file,omitempty"` +} + // BasicAuthConfig represents basic auth config. type BasicAuthConfig struct { Username string `yaml:"username"` @@ -27,6 +36,24 @@ type BasicAuthConfig struct { PasswordFile string `yaml:"password_file,omitempty"` } +// HTTPClientConfig represents http client config. +type HTTPClientConfig struct { + Authorization *Authorization `yaml:"authorization,omitempty"` + BasicAuth *BasicAuthConfig `yaml:"basic_auth,omitempty"` + BearerToken string `yaml:"bearer_token,omitempty"` + BearerTokenFile string `yaml:"bearer_token_file,omitempty"` + TLSConfig *TLSConfig `yaml:"tls_config,omitempty"` +} + +// ProxyClientConfig represents proxy client config. +type ProxyClientConfig struct { + Authorization *Authorization `yaml:"proxy_authorization,omitempty"` + BasicAuth *BasicAuthConfig `yaml:"proxy_basic_auth,omitempty"` + BearerToken string `yaml:"proxy_bearer_token,omitempty"` + BearerTokenFile string `yaml:"proxy_bearer_token_file,omitempty"` + TLSConfig *TLSConfig `yaml:"proxy_tls_config,omitempty"` +} + // Config is auth config. type Config struct { // Optional `Authorization` header. @@ -80,10 +107,42 @@ func (ac *Config) NewTLSConfig() *tls.Config { return tlsCfg } +// NewConfig creates auth config for the given hcc. +func (hcc *HTTPClientConfig) NewConfig(baseDir string) (*Config, error) { + return NewConfig(baseDir, hcc.Authorization, hcc.BasicAuth, hcc.BearerToken, hcc.BearerTokenFile, hcc.TLSConfig) +} + +// NewConfig creates auth config for the given pcc. +func (pcc *ProxyClientConfig) NewConfig(baseDir string) (*Config, error) { + return NewConfig(baseDir, pcc.Authorization, pcc.BasicAuth, pcc.BearerToken, pcc.BearerTokenFile, pcc.TLSConfig) +} + // NewConfig creates auth config from the given args. -func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTokenFile string, tlsConfig *TLSConfig) (*Config, error) { +func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, bearerToken, bearerTokenFile string, tlsConfig *TLSConfig) (*Config, error) { var authorization string + if az != nil { + azType := "Bearer" + if az.Type != "" { + azType = az.Type + } + azToken := az.Credentials + if az.CredentialsFile != "" { + if az.Credentials != "" { + return nil, fmt.Errorf("both `credentials`=%q and `credentials_file`=%q are set", az.Credentials, az.CredentialsFile) + } + path := getFilepath(baseDir, az.CredentialsFile) + token, err := readPasswordFromFile(path) + if err != nil { + return nil, fmt.Errorf("cannot read credentials from `credentials_file`=%q: %w", az.CredentialsFile, err) + } + azToken = token + } + authorization = azType + " " + azToken + } if basicAuth != nil { + if authorization != "" { + return nil, fmt.Errorf("cannot use both `authorization` and `basic_auth`") + } if basicAuth.Username == "" { return nil, fmt.Errorf("missing `username` in `basic_auth` section") } @@ -106,6 +165,9 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo authorization = "Basic " + token64 } if bearerTokenFile != "" { + if authorization != "" { + return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth` and `bearer_token_file`") + } if bearerToken != "" { return nil, fmt.Errorf("both `bearer_token`=%q and `bearer_token_file`=%q are set", bearerToken, bearerTokenFile) } @@ -114,11 +176,11 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo if err != nil { return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %w", bearerTokenFile, err) } - bearerToken = token + authorization = "Bearer " + token } if bearerToken != "" { if authorization != "" { - return nil, fmt.Errorf("cannot use both `basic_auth` and `bearer_token`") + return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth` and `bearer_token`") } authorization = "Bearer " + bearerToken } diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go index 0f2ddd202..e023a95d3 100644 --- a/lib/promscrape/client.go +++ b/lib/promscrape/client.go @@ -15,6 +15,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy" "github.com/VictoriaMetrics/fasthttp" "github.com/VictoriaMetrics/metrics" ) @@ -42,12 +43,15 @@ type client struct { // It may be useful for scraping targets with millions of metrics per target. sc *http.Client - scrapeURL string - host string - requestURI string - authHeader string - disableCompression bool - disableKeepAlive bool + scrapeURL string + scrapeTimeoutSecondsStr string + host string + requestURI string + authHeader string + proxyAuthHeader string + denyRedirects bool + disableCompression bool + disableKeepAlive bool } func newClient(sw *ScrapeWork) *client { @@ -60,6 +64,22 @@ func newClient(sw *ScrapeWork) *client { if isTLS { tlsCfg = sw.AuthConfig.NewTLSConfig() } + proxyAuthHeader := "" + proxyURL := sw.ProxyURL + if !isTLS && proxyURL.IsHTTPOrHTTPS() { + // Send full sw.ScrapeURL in requests to a proxy host for non-TLS scrape targets + // like net/http package from Go does. + // See https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers + pu := proxyURL.URL() + host = pu.Host + requestURI = sw.ScrapeURL + isTLS = pu.Scheme == "https" + if isTLS { + tlsCfg = sw.ProxyAuthConfig.NewTLSConfig() + } + proxyAuthHeader = proxyURL.GetAuthHeader(sw.ProxyAuthConfig) + proxyURL = proxy.URL{} + } if !strings.Contains(host, ":") { if !isTLS { host += ":80" @@ -67,7 +87,7 @@ func newClient(sw *ScrapeWork) *client { host += ":443" } } - dialFunc, err := newStatDialFunc(sw.ProxyURL, sw.ProxyAuthConfig) + dialFunc, err := newStatDialFunc(proxyURL, sw.ProxyAuthConfig) if err != nil { logger.Fatalf("cannot create dial func: %s", err) } @@ -85,14 +105,14 @@ func newClient(sw *ScrapeWork) *client { } var sc *http.Client if *streamParse || sw.StreamParse { - var proxy func(*http.Request) (*url.URL, error) + var proxyURLFunc func(*http.Request) (*url.URL, error) if proxyURL := sw.ProxyURL.URL(); proxyURL != nil { - proxy = http.ProxyURL(proxyURL) + proxyURLFunc = http.ProxyURL(proxyURL) } sc = &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsCfg, - Proxy: proxy, + Proxy: proxyURLFunc, TLSHandshakeTimeout: 10 * time.Second, IdleConnTimeout: 2 * sw.ScrapeInterval, DisableCompression: *disableCompression || sw.DisableCompression, @@ -101,16 +121,24 @@ func newClient(sw *ScrapeWork) *client { }, Timeout: sw.ScrapeTimeout, } + if sw.DenyRedirects { + sc.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + } } return &client{ - hc: hc, - sc: sc, - scrapeURL: sw.ScrapeURL, - host: host, - requestURI: requestURI, - authHeader: sw.AuthConfig.Authorization, - disableCompression: sw.DisableCompression, - disableKeepAlive: sw.DisableKeepAlive, + hc: hc, + sc: sc, + scrapeURL: sw.ScrapeURL, + scrapeTimeoutSecondsStr: fmt.Sprintf("%.3f", sw.ScrapeTimeout.Seconds()), + host: host, + requestURI: requestURI, + authHeader: sw.AuthConfig.Authorization, + proxyAuthHeader: proxyAuthHeader, + denyRedirects: sw.DenyRedirects, + disableCompression: sw.DisableCompression, + disableKeepAlive: sw.DisableKeepAlive, } } @@ -128,9 +156,15 @@ func (c *client) GetStreamReader() (*streamReader, error) { // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/608 for details. // Do not bloat the `Accept` header with OpenMetrics shit, since it looks like dead standard now. req.Header.Set("Accept", "text/plain;version=0.0.4;q=1,*/*;q=0.1") + // Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162 + req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr) if c.authHeader != "" { req.Header.Set("Authorization", c.authHeader) } + if c.proxyAuthHeader != "" { + req.Header.Set("Proxy-Authorization", c.proxyAuthHeader) + } resp, err := c.sc.Do(req) if err != nil { cancel() @@ -155,22 +189,28 @@ func (c *client) ReadData(dst []byte) ([]byte, error) { deadline := time.Now().Add(c.hc.ReadTimeout) req := fasthttp.AcquireRequest() req.SetRequestURI(c.requestURI) - req.SetHost(c.host) + req.Header.SetHost(c.host) // The following `Accept` header has been copied from Prometheus sources. // See https://github.com/prometheus/prometheus/blob/f9d21f10ecd2a343a381044f131ea4e46381ce09/scrape/scrape.go#L532 . // This is needed as a workaround for scraping stupid Java-based servers such as Spring Boot. // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/608 for details. // Do not bloat the `Accept` header with OpenMetrics shit, since it looks like dead standard now. req.Header.Set("Accept", "text/plain;version=0.0.4;q=1,*/*;q=0.1") + // Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162 + req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr) + if c.authHeader != "" { + req.Header.Set("Authorization", c.authHeader) + } + if c.proxyAuthHeader != "" { + req.Header.Set("Proxy-Authorization", c.proxyAuthHeader) + } if !*disableCompression && !c.disableCompression { req.Header.Set("Accept-Encoding", "gzip") } if *disableKeepAlive || c.disableKeepAlive { req.SetConnectionClose() } - if c.authHeader != "" { - req.Header.Set("Authorization", c.authHeader) - } resp := fasthttp.AcquireResponse() swapResponseBodies := len(dst) == 0 if swapResponseBodies { @@ -181,13 +221,17 @@ func (c *client) ReadData(dst []byte) ([]byte, error) { err := doRequestWithPossibleRetry(c.hc, req, resp, deadline) statusCode := resp.StatusCode() if err == nil && (statusCode == fasthttp.StatusMovedPermanently || statusCode == fasthttp.StatusFound) { - // Allow a single redirect. - // It is expected that the redirect is made on the same host. - // Otherwise it won't work. - if location := resp.Header.Peek("Location"); len(location) > 0 { - req.URI().UpdateBytes(location) - err = c.hc.DoDeadline(req, resp, deadline) - statusCode = resp.StatusCode() + if c.denyRedirects { + err = fmt.Errorf("cannot follow redirects if `follow_redirects: false` is set") + } else { + // Allow a single redirect. + // It is expected that the redirect is made on the same host. + // Otherwise it won't work. + if location := resp.Header.Peek("Location"); len(location) > 0 { + req.URI().UpdateBytes(location) + err = c.hc.DoDeadline(req, resp, deadline) + statusCode = resp.StatusCode() + } } } if swapResponseBodies { diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index a65f651c3..059291277 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -60,6 +60,15 @@ type Config struct { baseDir string } +func (cfg *Config) mustStart() { + startTime := time.Now() + logger.Infof("starting service discovery routines...") + for i := range cfg.ScrapeConfigs { + cfg.ScrapeConfigs[i].mustStart(cfg.baseDir) + } + logger.Infof("started service discovery routines in %.3f seconds", time.Since(startTime).Seconds()) +} + func (cfg *Config) mustStop() { startTime := time.Now() logger.Infof("stopping service discovery routines...") @@ -88,42 +97,53 @@ type ScrapeConfig struct { MetricsPath string `yaml:"metrics_path,omitempty"` HonorLabels bool `yaml:"honor_labels,omitempty"` HonorTimestamps bool `yaml:"honor_timestamps,omitempty"` + FollowRedirects *bool `yaml:"follow_redirects"` // omitempty isn't set, since the default value for this flag is true. Scheme string `yaml:"scheme,omitempty"` Params map[string][]string `yaml:"params,omitempty"` - BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth,omitempty"` - BearerToken string `yaml:"bearer_token,omitempty"` - BearerTokenFile string `yaml:"bearer_token_file,omitempty"` + HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"` ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` - StaticConfigs []StaticConfig `yaml:"static_configs,omitempty"` - FileSDConfigs []FileSDConfig `yaml:"file_sd_configs,omitempty"` - KubernetesSDConfigs []kubernetes.SDConfig `yaml:"kubernetes_sd_configs,omitempty"` - OpenStackSDConfigs []openstack.SDConfig `yaml:"openstack_sd_configs,omitempty"` - ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"` - EurekaSDConfigs []eureka.SDConfig `yaml:"eureka_sd_configs,omitempty"` - DockerSwarmSDConfigs []dockerswarm.SDConfig `yaml:"dockerswarm_sd_configs,omitempty"` - DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"` - EC2SDConfigs []ec2.SDConfig `yaml:"ec2_sd_configs,omitempty"` - GCESDConfigs []gce.SDConfig `yaml:"gce_sd_configs,omitempty"` RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"` MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs,omitempty"` SampleLimit int `yaml:"sample_limit,omitempty"` + StaticConfigs []StaticConfig `yaml:"static_configs,omitempty"` + FileSDConfigs []FileSDConfig `yaml:"file_sd_configs,omitempty"` + KubernetesSDConfigs []kubernetes.SDConfig `yaml:"kubernetes_sd_configs,omitempty"` + OpenStackSDConfigs []openstack.SDConfig `yaml:"openstack_sd_configs,omitempty"` + ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"` + EurekaSDConfigs []eureka.SDConfig `yaml:"eureka_sd_configs,omitempty"` + DockerSwarmSDConfigs []dockerswarm.SDConfig `yaml:"dockerswarm_sd_configs,omitempty"` + DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"` + EC2SDConfigs []ec2.SDConfig `yaml:"ec2_sd_configs,omitempty"` + GCESDConfigs []gce.SDConfig `yaml:"gce_sd_configs,omitempty"` + // These options are supported only by lib/promscrape. - DisableCompression bool `yaml:"disable_compression,omitempty"` - DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"` - StreamParse bool `yaml:"stream_parse,omitempty"` - ScrapeAlignInterval time.Duration `yaml:"scrape_align_interval,omitempty"` - ScrapeOffset time.Duration `yaml:"scrape_offset,omitempty"` - ProxyTLSConfig *promauth.TLSConfig `yaml:"proxy_tls_config,omitempty"` - ProxyBasicAuth *promauth.BasicAuthConfig `yaml:"proxy_basic_auth,omitempty"` - ProxyBearerToken string `yaml:"proxy_bearer_token,omitempty"` - ProxyBearerTokenFile string `yaml:"proxy_bearer_token_file,omitempty"` + DisableCompression bool `yaml:"disable_compression,omitempty"` + DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"` + StreamParse bool `yaml:"stream_parse,omitempty"` + ScrapeAlignInterval time.Duration `yaml:"scrape_align_interval,omitempty"` + ScrapeOffset time.Duration `yaml:"scrape_offset,omitempty"` + ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"` // This is set in loadConfig swc *scrapeWorkConfig } +func (sc *ScrapeConfig) mustStart(baseDir string) { + for i := range sc.KubernetesSDConfigs { + swosFunc := func(metaLabels map[string]string) interface{} { + target := metaLabels["__address__"] + sw, err := sc.swc.getScrapeWork(target, nil, metaLabels) + if err != nil { + logger.Errorf("cannot create kubernetes_sd_config target %q for job_name %q: %s", target, sc.swc.jobName, err) + return nil + } + return sw + } + sc.KubernetesSDConfigs[i].MustStart(baseDir, swosFunc) + } +} + func (sc *ScrapeConfig) mustStop() { for i := range sc.KubernetesSDConfigs { sc.KubernetesSDConfigs[i].MustStop() @@ -247,15 +267,7 @@ func (cfg *Config) getKubernetesSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork { ok := true for j := range sc.KubernetesSDConfigs { sdc := &sc.KubernetesSDConfigs[j] - swos, err := sdc.GetScrapeWorkObjects(cfg.baseDir, func(metaLabels map[string]string) interface{} { - target := metaLabels["__address__"] - sw, err := sc.swc.getScrapeWork(target, nil, metaLabels) - if err != nil { - logger.Errorf("cannot create kubernetes_sd_config target %q for job_name %q: %s", target, sc.swc.jobName, err) - return nil - } - return sw - }) + swos, err := sdc.GetScrapeWorkObjects() if err != nil { logger.Errorf("skipping kubernetes_sd_config targets for job_name %q because of error: %s", sc.swc.jobName, err) ok = false @@ -531,6 +543,10 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf } honorLabels := sc.HonorLabels honorTimestamps := sc.HonorTimestamps + denyRedirects := false + if sc.FollowRedirects != nil { + denyRedirects = !*sc.FollowRedirects + } metricsPath := sc.MetricsPath if metricsPath == "" { metricsPath = "/metrics" @@ -543,11 +559,11 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf return nil, fmt.Errorf("unexpected `scheme` for `job_name` %q: %q; supported values: http or https", jobName, scheme) } params := sc.Params - ac, err := promauth.NewConfig(baseDir, sc.BasicAuth, sc.BearerToken, sc.BearerTokenFile, sc.TLSConfig) + ac, err := sc.HTTPClientConfig.NewConfig(baseDir) if err != nil { return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %w", jobName, err) } - proxyAC, err := promauth.NewConfig(baseDir, sc.ProxyBasicAuth, sc.ProxyBearerToken, sc.ProxyBearerTokenFile, sc.ProxyTLSConfig) + proxyAC, err := sc.ProxyClientConfig.NewConfig(baseDir) if err != nil { return nil, fmt.Errorf("cannot parse proxy auth config for `job_name` %q: %w", jobName, err) } @@ -571,6 +587,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf authConfig: ac, honorLabels: honorLabels, honorTimestamps: honorTimestamps, + denyRedirects: denyRedirects, externalLabels: globalCfg.ExternalLabels, relabelConfigs: relabelConfigs, metricRelabelConfigs: metricRelabelConfigs, @@ -596,6 +613,7 @@ type scrapeWorkConfig struct { authConfig *promauth.Config honorLabels bool honorTimestamps bool + denyRedirects bool externalLabels map[string]string relabelConfigs *promrelabel.ParsedConfigs metricRelabelConfigs *promrelabel.ParsedConfigs @@ -777,12 +795,15 @@ func appendSortedKeyValuePairs(dst []byte, m map[string]string) []byte { var scrapeWorkKeyBufPool bytesutil.ByteBufferPool func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabels map[string]string) (*ScrapeWork, error) { - // Verify whether the scrape work must be skipped. - bb := scrapeWorkKeyBufPool.Get() - defer scrapeWorkKeyBufPool.Put(bb) - bb.B = appendScrapeWorkKey(bb.B[:0], target, extraLabels, metaLabels) - if needSkipScrapeWork(bytesutil.ToUnsafeString(bb.B), *clusterMembersCount, *clusterReplicationFactor, *clusterMemberNum) { - return nil, nil + // Verify whether the scrape work must be skipped because of `-promscrape.cluster.*` configs. + if *clusterMembersCount > 1 { + bb := scrapeWorkKeyBufPool.Get() + bb.B = appendScrapeWorkKey(bb.B[:0], target, extraLabels, metaLabels) + needSkip := needSkipScrapeWork(bytesutil.ToUnsafeString(bb.B), *clusterMembersCount, *clusterReplicationFactor, *clusterMemberNum) + scrapeWorkKeyBufPool.Put(bb) + if needSkip { + return nil, nil + } } labels := mergeLabels(swc.jobName, swc.scheme, target, swc.metricsPath, extraLabels, swc.externalLabels, metaLabels, swc.params) @@ -856,6 +877,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel ScrapeTimeout: swc.scrapeTimeout, HonorLabels: swc.honorLabels, HonorTimestamps: swc.honorTimestamps, + DenyRedirects: swc.denyRedirects, OriginalLabels: originalLabels, Labels: labels, ProxyURL: swc.proxyURL, diff --git a/lib/promscrape/config_test.go b/lib/promscrape/config_test.go index ac7ce98fb..8f3cf6a97 100644 --- a/lib/promscrape/config_test.go +++ b/lib/promscrape/config_test.go @@ -328,6 +328,29 @@ scrape_configs: - targets: ["a"] `) + // Both `authorization` and `basic_auth` are set + f(` +scrape_configs: +- job_name: x + authorization: + credentials: foobar + basic_auth: + username: foobar + static_configs: + - targets: ["a"] +`) + + // Both `authorization` and `bearer_token` are set + f(` +scrape_configs: +- job_name: x + authorization: + credentials: foobar + bearer_token: foo + static_configs: + - targets: ["a"] +`) + // Invalid `bearer_token_file` f(` scrape_configs: @@ -751,6 +774,7 @@ scrape_configs: scheme: https honor_labels: true honor_timestamps: true + follow_redirects: false params: p: ["x&y", "="] xaa: @@ -772,6 +796,12 @@ scrape_configs: insecure_skip_verify: true static_configs: - targets: [1.2.3.4] +- job_name: asdf + authorization: + type: xyz + credentials: abc + static_configs: + - targets: [foobar] `, []*ScrapeWork{ { ScrapeURL: "https://foo.bar:443/foo/bar?p=x%26y&p=%3D", @@ -779,6 +809,7 @@ scrape_configs: ScrapeTimeout: 12 * time.Second, HonorLabels: true, HonorTimestamps: true, + DenyRedirects: true, Labels: []prompbmarshal.Label{ { Name: "__address__", @@ -824,6 +855,7 @@ scrape_configs: ScrapeTimeout: 12 * time.Second, HonorLabels: true, HonorTimestamps: true, + DenyRedirects: true, Labels: []prompbmarshal.Label{ { Name: "__address__", @@ -864,11 +896,9 @@ scrape_configs: jobNameOriginal: "foo", }, { - ScrapeURL: "http://1.2.3.4:80/metrics", - ScrapeInterval: 8 * time.Second, - ScrapeTimeout: 34 * time.Second, - HonorLabels: false, - HonorTimestamps: false, + ScrapeURL: "http://1.2.3.4:80/metrics", + ScrapeInterval: 8 * time.Second, + ScrapeTimeout: 34 * time.Second, Labels: []prompbmarshal.Label{ { Name: "__address__", @@ -899,6 +929,38 @@ scrape_configs: ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "qwer", }, + { + ScrapeURL: "http://foobar:80/metrics", + ScrapeInterval: 8 * time.Second, + ScrapeTimeout: 34 * time.Second, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foobar", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "instance", + Value: "foobar:80", + }, + { + Name: "job", + Value: "asdf", + }, + }, + AuthConfig: &promauth.Config{ + Authorization: "xyz abc", + }, + ProxyAuthConfig: &promauth.Config{}, + jobNameOriginal: "asdf", + }, }) f(` scrape_configs: diff --git a/lib/promscrape/discovery/consul/api.go b/lib/promscrape/discovery/consul/api.go index 583af0e75..9ffac6945 100644 --- a/lib/promscrape/discovery/consul/api.go +++ b/lib/promscrape/discovery/consul/api.go @@ -50,7 +50,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { } token = "" } - ac, err := promauth.NewConfig(baseDir, ba, token, "", sdc.TLSConfig) + ac, err := promauth.NewConfig(baseDir, nil, ba, token, "", sdc.TLSConfig) if err != nil { return nil, fmt.Errorf("cannot parse auth config: %w", err) } @@ -65,7 +65,11 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { } apiServer = scheme + "://" + apiServer } - client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL) + proxyAC, err := sdc.ProxyClientConfig.NewConfig(baseDir) + if err != nil { + return nil, fmt.Errorf("cannot parse proxy auth config: %w", err) + } + client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC) if err != nil { return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err) } diff --git a/lib/promscrape/discovery/consul/consul.go b/lib/promscrape/discovery/consul/consul.go index bc949641a..5d4e84656 100644 --- a/lib/promscrape/discovery/consul/consul.go +++ b/lib/promscrape/discovery/consul/consul.go @@ -11,19 +11,20 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config type SDConfig struct { - Server string `yaml:"server,omitempty"` - Token *string `yaml:"token"` - Datacenter string `yaml:"datacenter"` - Scheme string `yaml:"scheme,omitempty"` - Username string `yaml:"username"` - Password string `yaml:"password"` - ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` - Services []string `yaml:"services,omitempty"` - Tags []string `yaml:"tags,omitempty"` - NodeMeta map[string]string `yaml:"node_meta,omitempty"` - TagSeparator *string `yaml:"tag_separator,omitempty"` - AllowStale bool `yaml:"allow_stale,omitempty"` + Server string `yaml:"server,omitempty"` + Token *string `yaml:"token"` + Datacenter string `yaml:"datacenter"` + Scheme string `yaml:"scheme,omitempty"` + Username string `yaml:"username"` + Password string `yaml:"password"` + ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` + ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"` + TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` + Services []string `yaml:"services,omitempty"` + Tags []string `yaml:"tags,omitempty"` + NodeMeta map[string]string `yaml:"node_meta,omitempty"` + TagSeparator *string `yaml:"tag_separator,omitempty"` + AllowStale bool `yaml:"allow_stale,omitempty"` // RefreshInterval time.Duration `yaml:"refresh_interval"` // refresh_interval is obtained from `-promscrape.consulSDCheckInterval` command-line option. } diff --git a/lib/promscrape/discovery/dockerswarm/api.go b/lib/promscrape/discovery/dockerswarm/api.go index c79d12fc7..472a07736 100644 --- a/lib/promscrape/discovery/dockerswarm/api.go +++ b/lib/promscrape/discovery/dockerswarm/api.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" ) @@ -34,12 +33,15 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { port: sdc.Port, filtersQueryArg: getFiltersQueryArg(sdc.Filters), } - - ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig) + ac, err := sdc.HTTPClientConfig.NewConfig(baseDir) if err != nil { - return nil, err + return nil, fmt.Errorf("cannot parse auth config: %w", err) } - client, err := discoveryutils.NewClient(sdc.Host, ac, sdc.ProxyURL) + proxyAC, err := sdc.ProxyClientConfig.NewConfig(baseDir) + if err != nil { + return nil, fmt.Errorf("cannot parse proxy auth config: %w", err) + } + client, err := discoveryutils.NewClient(sdc.Host, ac, sdc.ProxyURL, proxyAC) if err != nil { return nil, fmt.Errorf("cannot create HTTP client for %q: %w", sdc.Host, err) } diff --git a/lib/promscrape/discovery/dockerswarm/dockerswarm.go b/lib/promscrape/discovery/dockerswarm/dockerswarm.go index a3cd256ea..5d9bd73d1 100644 --- a/lib/promscrape/discovery/dockerswarm/dockerswarm.go +++ b/lib/promscrape/discovery/dockerswarm/dockerswarm.go @@ -16,12 +16,10 @@ type SDConfig struct { Port int `yaml:"port,omitempty"` Filters []Filter `yaml:"filters,omitempty"` - ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` + HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"` + ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` + ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"` // refresh_interval is obtained from `-promscrape.dockerswarmSDCheckInterval` command-line option - BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth,omitempty"` - BearerToken string `yaml:"bearer_token,omitempty"` - BearerTokenFile string `yaml:"bearer_token_file,omitempty"` } // Filter is a filter, which can be passed to SDConfig. diff --git a/lib/promscrape/discovery/ec2/api.go b/lib/promscrape/discovery/ec2/api.go index e0e6b30a3..9a13dcb9d 100644 --- a/lib/promscrape/discovery/ec2/api.go +++ b/lib/promscrape/discovery/ec2/api.go @@ -172,6 +172,11 @@ func getAPICredentials(cfg *apiConfig) (*apiCredentials, error) { return getRoleWebIdentityCredentials(cfg.stsEndpoint, cfg.roleARN, string(token)) } + if ecsMetaURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); len(ecsMetaURI) > 0 { + path := "http://169.254.170.2" + ecsMetaURI + return getECSRoleCredentialsByPath(path) + } + // we need instance credentials if dont have access keys if len(acNew.AccessKeyID) == 0 && len(acNew.SecretAccessKey) == 0 { ac, err := getInstanceRoleCredentials() @@ -200,6 +205,22 @@ func getAPICredentials(cfg *apiConfig) (*apiCredentials, error) { return acNew, nil } +// getECSRoleCredentialsByPath makes request to ecs metadata service +// and retrieves instances credentails +// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html +func getECSRoleCredentialsByPath(path string) (*apiCredentials, error) { + client := discoveryutils.GetHTTPClient() + resp, err := client.Get(path) + if err != nil { + return nil, fmt.Errorf("cannot get ECS instance role credentials: %w", err) + } + data, err := readResponseBody(resp, path) + if err != nil { + return nil, err + } + return parseMetadataSecurityCredentials(data) +} + // getInstanceRoleCredentials makes request to local ec2 instance metadata service // and tries to retrieve credentials from assigned iam role. // diff --git a/lib/promscrape/discovery/eureka/api.go b/lib/promscrape/discovery/eureka/api.go index 255c9005d..93e5e2eeb 100644 --- a/lib/promscrape/discovery/eureka/api.go +++ b/lib/promscrape/discovery/eureka/api.go @@ -5,7 +5,6 @@ import ( "fmt" "strings" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" ) @@ -16,19 +15,7 @@ type apiConfig struct { } func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - token := "" - if sdc.Token != nil { - token = *sdc.Token - } - var ba *promauth.BasicAuthConfig - if len(sdc.Username) > 0 { - ba = &promauth.BasicAuthConfig{ - Username: sdc.Username, - Password: sdc.Password, - } - token = "" - } - ac, err := promauth.NewConfig(baseDir, ba, token, "", sdc.TLSConfig) + ac, err := sdc.HTTPClientConfig.NewConfig(baseDir) if err != nil { return nil, fmt.Errorf("cannot parse auth config: %w", err) } @@ -37,13 +24,17 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { apiServer = "localhost:8080/eureka/v2" } if !strings.Contains(apiServer, "://") { - scheme := sdc.Scheme - if scheme == "" { - scheme = "http" + scheme := "http" + if sdc.HTTPClientConfig.TLSConfig != nil { + scheme = "https" } apiServer = scheme + "://" + apiServer } - client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL) + proxyAC, err := sdc.ProxyClientConfig.NewConfig(baseDir) + if err != nil { + return nil, fmt.Errorf("cannot parse proxy auth config: %w", err) + } + client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC) if err != nil { return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err) } diff --git a/lib/promscrape/discovery/eureka/eureka.go b/lib/promscrape/discovery/eureka/eureka.go index 2c2f19f3b..54b7c03f2 100644 --- a/lib/promscrape/discovery/eureka/eureka.go +++ b/lib/promscrape/discovery/eureka/eureka.go @@ -16,17 +16,12 @@ const appsAPIPath = "/apps" // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka type SDConfig struct { - Server string `yaml:"server,omitempty"` - Token *string `yaml:"token"` - Datacenter string `yaml:"datacenter"` - Scheme string `yaml:"scheme,omitempty"` - Username string `yaml:"username"` - Password string `yaml:"password"` - ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` + Server string `yaml:"server,omitempty"` + HTTPClientConfig promauth.HTTPClientConfig `ymal:",inline"` + ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` + ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"` // RefreshInterval time.Duration `yaml:"refresh_interval"` // refresh_interval is obtained from `-promscrape.ec2SDCheckInterval` command-line option. - Port *int `yaml:"port,omitempty"` } type applications struct { @@ -95,11 +90,7 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) { if err != nil { return nil, err } - port := 80 - if sdc.Port != nil { - port = *sdc.Port - } - return addInstanceLabels(apps, port), nil + return addInstanceLabels(apps), nil } // MustStop stops further usage for sdc. @@ -107,11 +98,11 @@ func (sdc *SDConfig) MustStop() { configMap.Delete(sdc) } -func addInstanceLabels(apps *applications, port int) []map[string]string { +func addInstanceLabels(apps *applications) []map[string]string { var ms []map[string]string for _, app := range apps.Applications { for _, instance := range app.Instances { - instancePort := port + instancePort := 80 if instance.Port.Port != 0 { instancePort = instance.Port.Port } diff --git a/lib/promscrape/discovery/eureka/eureka_test.go b/lib/promscrape/discovery/eureka/eureka_test.go index 8b5091648..26bc0988c 100644 --- a/lib/promscrape/discovery/eureka/eureka_test.go +++ b/lib/promscrape/discovery/eureka/eureka_test.go @@ -11,7 +11,6 @@ import ( func Test_addInstanceLabels(t *testing.T) { type args struct { applications *applications - port int } tests := []struct { name string @@ -21,7 +20,6 @@ func Test_addInstanceLabels(t *testing.T) { { name: "1 application", args: args{ - port: 9100, applications: &applications{ Applications: []Application{ { @@ -43,6 +41,9 @@ func Test_addInstanceLabels(t *testing.T) { XMLName: struct{ Space, Local string }{Local: "key-1"}, }, }}, + Port: Port{ + Port: 9100, + }, }, }, }, @@ -64,6 +65,8 @@ func Test_addInstanceLabels(t *testing.T) { "__meta_eureka_app_instance_statuspage_url": "some-status-url", "__meta_eureka_app_instance_id": "some-id", "__meta_eureka_app_instance_metadata_key_1": "value-1", + "__meta_eureka_app_instance_port": "9100", + "__meta_eureka_app_instance_port_enabled": "false", "__meta_eureka_app_instance_status": "Ok", }), }, @@ -71,7 +74,7 @@ func Test_addInstanceLabels(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := addInstanceLabels(tt.args.applications, tt.args.port) + got := addInstanceLabels(tt.args.applications) var sortedLabelss [][]prompbmarshal.Label for _, labels := range got { sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels)) diff --git a/lib/promscrape/discovery/kubernetes/api.go b/lib/promscrape/discovery/kubernetes/api.go index 5596c7f5b..fa39666a8 100644 --- a/lib/promscrape/discovery/kubernetes/api.go +++ b/lib/promscrape/discovery/kubernetes/api.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" ) // apiConfig contains config for API server @@ -15,27 +14,13 @@ type apiConfig struct { aw *apiWatcher } -func (ac *apiConfig) mustStop() { - ac.aw.mustStop() -} - -var configMap = discoveryutils.NewConfigMap() - -func getAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFunc) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir, swcFunc) }) - if err != nil { - return nil, err - } - return v.(*apiConfig), nil -} - func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFunc) (*apiConfig, error) { switch sdc.Role { case "node", "pod", "service", "endpoints", "endpointslices", "ingress": default: return nil, fmt.Errorf("unexpected `role`: %q; must be one of `node`, `pod`, `service`, `endpoints`, `endpointslices` or `ingress`", sdc.Role) } - ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig) + ac, err := sdc.HTTPClientConfig.NewConfig(baseDir) if err != nil { return nil, fmt.Errorf("cannot parse auth config: %w", err) } @@ -58,7 +43,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu tlsConfig := promauth.TLSConfig{ CAFile: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", } - acNew, err := promauth.NewConfig(".", nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", &tlsConfig) + acNew, err := promauth.NewConfig(".", nil, nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", &tlsConfig) if err != nil { return nil, fmt.Errorf("cannot initialize service account auth: %w; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err) } @@ -66,7 +51,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu } if !strings.Contains(apiServer, "://") { proto := "http" - if sdc.TLSConfig != nil { + if sdc.HTTPClientConfig.TLSConfig != nil { proto = "https" } apiServer = proto + "://" + apiServer diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 2e1bfe0cc..9e1c80830 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -51,9 +51,9 @@ type apiWatcher struct { gw *groupWatcher - // swos contains a map of ScrapeWork objects for the given apiWatcher - swosByKey map[string][]interface{} - swosByKeyLock sync.Mutex + // swos contains per-namepsace maps of ScrapeWork objects for the given apiWatcher + swosByNamespace map[string]map[string][]interface{} + swosByNamespaceLock sync.Mutex swosCount *metrics.Counter } @@ -64,50 +64,64 @@ func newAPIWatcher(apiServer string, ac *promauth.Config, sdc *SDConfig, swcFunc proxyURL := sdc.ProxyURL.URL() gw := getGroupWatcher(apiServer, ac, namespaces, selectors, proxyURL) return &apiWatcher{ - role: sdc.Role, - swcFunc: swcFunc, - gw: gw, - swosByKey: make(map[string][]interface{}), - swosCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_scrape_works{role=%q}`, sdc.Role)), + role: sdc.Role, + swcFunc: swcFunc, + gw: gw, + swosByNamespace: make(map[string]map[string][]interface{}), + swosCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_scrape_works{role=%q}`, sdc.Role)), } } +func (aw *apiWatcher) mustStart() { + aw.gw.startWatchersForRole(aw.role, aw) +} + func (aw *apiWatcher) mustStop() { aw.gw.unsubscribeAPIWatcher(aw) - aw.reloadScrapeWorks(make(map[string][]interface{})) + aw.swosByNamespaceLock.Lock() + aw.swosByNamespace = make(map[string]map[string][]interface{}) + aw.swosByNamespaceLock.Unlock() } -func (aw *apiWatcher) reloadScrapeWorks(swosByKey map[string][]interface{}) { - aw.swosByKeyLock.Lock() - aw.swosCount.Add(len(swosByKey) - len(aw.swosByKey)) - aw.swosByKey = swosByKey - aw.swosByKeyLock.Unlock() +func (aw *apiWatcher) reloadScrapeWorks(namespace string, swosByKey map[string][]interface{}) { + aw.swosByNamespaceLock.Lock() + aw.swosCount.Add(len(swosByKey) - len(aw.swosByNamespace[namespace])) + aw.swosByNamespace[namespace] = swosByKey + aw.swosByNamespaceLock.Unlock() } -func (aw *apiWatcher) setScrapeWorks(key string, labels []map[string]string) { - swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) - aw.swosByKeyLock.Lock() - if len(swos) > 0 { - aw.swosCount.Add(len(swos) - len(aw.swosByKey[key])) - aw.swosByKey[key] = swos - } else { - aw.swosCount.Add(-len(aw.swosByKey[key])) - delete(aw.swosByKey, key) +func (aw *apiWatcher) setScrapeWorks(namespace, key string, labels []map[string]string) { + swos := aw.getScrapeWorkObjectsForLabels(labels) + aw.swosByNamespaceLock.Lock() + swosByKey := aw.swosByNamespace[namespace] + if swosByKey == nil { + swosByKey = make(map[string][]interface{}) + aw.swosByNamespace[namespace] = swosByKey } - aw.swosByKeyLock.Unlock() + if len(swos) > 0 { + aw.swosCount.Add(len(swos) - len(swosByKey[key])) + swosByKey[key] = swos + } else { + aw.swosCount.Add(-len(swosByKey[key])) + delete(swosByKey, key) + } + aw.swosByNamespaceLock.Unlock() } -func (aw *apiWatcher) removeScrapeWorks(key string) { - aw.swosByKeyLock.Lock() - aw.swosCount.Add(-len(aw.swosByKey[key])) - delete(aw.swosByKey, key) - aw.swosByKeyLock.Unlock() +func (aw *apiWatcher) removeScrapeWorks(namespace, key string) { + aw.swosByNamespaceLock.Lock() + swosByKey := aw.swosByNamespace[namespace] + if len(swosByKey) > 0 { + aw.swosCount.Add(-len(swosByKey[key])) + delete(swosByKey, key) + } + aw.swosByNamespaceLock.Unlock() } -func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []map[string]string) []interface{} { +func (aw *apiWatcher) getScrapeWorkObjectsForLabels(labelss []map[string]string) []interface{} { swos := make([]interface{}, 0, len(labelss)) for _, labels := range labelss { - swo := swcFunc(labels) + swo := aw.swcFunc(labels) // The reflect check is needed because of https://mangatmodi.medium.com/go-check-nil-interface-the-right-way-d142776edef1 if swo != nil && !reflect.ValueOf(swo).IsNil() { swos = append(swos, swo) @@ -118,17 +132,22 @@ func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss [] // getScrapeWorkObjects returns all the ScrapeWork objects for the given aw. func (aw *apiWatcher) getScrapeWorkObjects() []interface{} { - aw.gw.startWatchersForRole(aw.role, aw) - aw.swosByKeyLock.Lock() - defer aw.swosByKeyLock.Unlock() + aw.gw.registerPendingAPIWatchers() + + aw.swosByNamespaceLock.Lock() + defer aw.swosByNamespaceLock.Unlock() size := 0 - for _, swosLocal := range aw.swosByKey { - size += len(swosLocal) + for _, swosByKey := range aw.swosByNamespace { + for _, swosLocal := range swosByKey { + size += len(swosLocal) + } } swos := make([]interface{}, 0, size) - for _, swosLocal := range aw.swosByKey { - swos = append(swos, swosLocal...) + for _, swosByKey := range aw.swosByNamespace { + for _, swosLocal := range swosByKey { + swos = append(swos, swosLocal...) + } } return swos } @@ -171,8 +190,12 @@ func newGroupWatcher(apiServer string, ac *promauth.Config, namespaces []string, } func getGroupWatcher(apiServer string, ac *promauth.Config, namespaces []string, selectors []Selector, proxyURL *url.URL) *groupWatcher { - key := fmt.Sprintf("apiServer=%s, namespaces=%s, selectors=%s, proxyURL=%v, authConfig=%s", - apiServer, namespaces, selectorsKey(selectors), proxyURL, ac.String()) + proxyURLStr := "" + if proxyURL != nil { + proxyURLStr = proxyURL.String() + } + key := fmt.Sprintf("apiServer=%s, namespaces=%s, selectors=%s, proxyURL=%s, authConfig=%s", + apiServer, namespaces, selectorsKey(selectors), proxyURLStr, ac.String()) groupWatchersLock.Lock() gw := groupWatchers[key] if gw == nil { @@ -209,13 +232,63 @@ func (gw *groupWatcher) getObjectByRole(role, namespace, name string) object { // this is needed for testing return nil } + o := gw.getCachedObjectByRole(role, namespace, name) + if o != nil { + // Fast path: the object has been found in the cache. + return o + } + + // The object wasn't found in the cache. Try querying it directly from API server. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1182#issuecomment-813353359 for details. + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_direct_object_loads_total{role=%q}`, role)).Inc() + objectType := getObjectTypeByRole(role) + path := getAPIPath(objectType, namespace, "") + path += "/" + name + requestURL := gw.apiServer + path + resp, err := gw.doRequest(requestURL) + if err != nil { + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_direct_object_load_errors_total{role=%q}`, role)).Inc() + logger.Errorf("cannot obtain data for object %s (namespace=%q, name=%q): %s", role, namespace, name, err) + return nil + } + data, err := ioutil.ReadAll(resp.Body) + _ = resp.Body.Close() + if err != nil { + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_direct_object_load_errors_total{role=%q}`, role)).Inc() + logger.Errorf("cannot read response from %q: %s", requestURL, err) + return nil + } + if resp.StatusCode != http.StatusOK { + if resp.StatusCode == http.StatusNotFound { + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_direct_object_load_misses_total{role=%q}`, role)).Inc() + return nil + } + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_direct_object_load_errors_total{role=%q}`, role)).Inc() + logger.Errorf("unexpected status code when reading response from %q; got %d; want %d; response body: %q", requestURL, resp.StatusCode, http.StatusOK, data) + return nil + } + parseObject, _ := getObjectParsersForRole(role) + o, err = parseObject(data) + if err != nil { + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_direct_object_load_errors_total{role=%q}`, role)).Inc() + logger.Errorf("cannot parse object obtained from %q: %s; response body: %q", requestURL, err, data) + return nil + } + // There is no need in storing the object in urlWatcher cache, since it should be eventually populated there by urlWatcher itself. + return o +} + +func (gw *groupWatcher) getCachedObjectByRole(role, namespace, name string) object { key := namespace + "/" + name gw.startWatchersForRole(role, nil) - gw.mu.Lock() - defer gw.mu.Unlock() - - for _, uw := range gw.m { + uws := gw.getURLWatchers() + for _, uw := range uws { if uw.role != role { + // Role mismatch + continue + } + if uw.namespace != "" && uw.namespace != namespace { + // Namespace mismatch continue } uw.mu.Lock() @@ -229,39 +302,24 @@ func (gw *groupWatcher) getObjectByRole(role, namespace, name string) object { } func (gw *groupWatcher) startWatchersForRole(role string, aw *apiWatcher) { - paths := getAPIPaths(role, gw.namespaces, gw.selectors) - for _, path := range paths { + paths, namespaces := getAPIPathsWithNamespaces(role, gw.namespaces, gw.selectors) + for i, path := range paths { apiURL := gw.apiServer + path gw.mu.Lock() uw := gw.m[apiURL] - if uw == nil { - uw = newURLWatcher(role, apiURL, gw) + needStart := uw == nil + if needStart { + uw = newURLWatcher(role, namespaces[i], apiURL, gw) gw.m[apiURL] = uw } gw.mu.Unlock() - uw.subscribeAPIWatcher(aw) - } -} - -func (gw *groupWatcher) reloadScrapeWorksForAPIWatchers(aws []*apiWatcher, objectsByKey map[string]object) { - if len(aws) == 0 { - return - } - swosByKey := make([]map[string][]interface{}, len(aws)) - for i := range aws { - swosByKey[i] = make(map[string][]interface{}) - } - for key, o := range objectsByKey { - labels := o.getTargetLabels(gw) - for i, aw := range aws { - swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) - if len(swos) > 0 { - swosByKey[i][key] = swos - } + if needStart { + uw.reloadObjects() + go uw.watchForUpdates() + } + if aw != nil { + uw.subscribeAPIWatcher(aw) } - } - for i, aw := range aws { - aw.reloadScrapeWorks(swosByKey[i]) } } @@ -277,19 +335,36 @@ func (gw *groupWatcher) doRequest(requestURL string) (*http.Response, error) { return gw.client.Do(req) } -func (gw *groupWatcher) unsubscribeAPIWatcher(aw *apiWatcher) { +func (gw *groupWatcher) registerPendingAPIWatchers() { + uws := gw.getURLWatchers() + for _, uw := range uws { + uw.registerPendingAPIWatchers() + } +} + +func (gw *groupWatcher) getURLWatchers() []*urlWatcher { gw.mu.Lock() + uws := make([]*urlWatcher, 0, len(gw.m)) for _, uw := range gw.m { - uw.unsubscribeAPIWatcher(aw) + uws = append(uws, uw) } gw.mu.Unlock() + return uws +} + +func (gw *groupWatcher) unsubscribeAPIWatcher(aw *apiWatcher) { + uws := gw.getURLWatchers() + for _, uw := range uws { + uw.unsubscribeAPIWatcher(aw) + } } // urlWatcher watches for an apiURL and updates object states in objectsByKey. type urlWatcher struct { - role string - apiURL string - gw *groupWatcher + role string + namespace string + apiURL string + gw *groupWatcher parseObject parseObjectFunc parseObjectList parseObjectListFunc @@ -297,12 +372,16 @@ type urlWatcher struct { // mu protects aws, awsPending, objectsByKey and resourceVersion mu sync.Mutex + // awsPending contains pending apiWatcher objects, which are registered in a batch. + // Batch registering saves CPU time needed for registering big number of Kubernetes objects + // shared among big number of scrape jobs, since per-object labels are generated only once + // for all the scrape jobs (each scrape job is associated with a single apiWatcher). + // See reloadScrapeWorksForAPIWatchers for details. + awsPending map[*apiWatcher]struct{} + // aws contains registered apiWatcher objects aws map[*apiWatcher]struct{} - // awsPending contains pending apiWatcher objects, which must be moved to aws in a batch - awsPending map[*apiWatcher]struct{} - // objectsByKey contains the latest state for objects obtained from apiURL objectsByKey map[string]object @@ -315,19 +394,20 @@ type urlWatcher struct { staleResourceVersions *metrics.Counter } -func newURLWatcher(role, apiURL string, gw *groupWatcher) *urlWatcher { +func newURLWatcher(role, namespace, apiURL string, gw *groupWatcher) *urlWatcher { parseObject, parseObjectList := getObjectParsersForRole(role) metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_url_watchers{role=%q}`, role)).Inc() uw := &urlWatcher{ - role: role, - apiURL: apiURL, - gw: gw, + role: role, + namespace: namespace, + apiURL: apiURL, + gw: gw, parseObject: parseObject, parseObjectList: parseObjectList, - aws: make(map[*apiWatcher]struct{}), awsPending: make(map[*apiWatcher]struct{}), + aws: make(map[*apiWatcher]struct{}), objectsByKey: make(map[string]object), objectsCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects{role=%q}`, role)), @@ -337,66 +417,47 @@ func newURLWatcher(role, apiURL string, gw *groupWatcher) *urlWatcher { staleResourceVersions: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_stale_resource_versions_total{role=%q}`, role)), } logger.Infof("started %s watcher for %q", uw.role, uw.apiURL) - go uw.watchForUpdates() - go uw.processPendingSubscribers() return uw } func (uw *urlWatcher) subscribeAPIWatcher(aw *apiWatcher) { - if aw == nil { - return - } uw.mu.Lock() if _, ok := uw.aws[aw]; !ok { if _, ok := uw.awsPending[aw]; !ok { uw.awsPending[aw] = struct{}{} - metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="pending"}`, uw.role)).Inc() + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscribers{role=%q,status="pending"}`, uw.role)).Inc() } } uw.mu.Unlock() } +func (uw *urlWatcher) registerPendingAPIWatchers() { + uw.mu.Lock() + awsPending := make([]*apiWatcher, 0, len(uw.awsPending)) + for aw := range uw.awsPending { + awsPending = append(awsPending, aw) + delete(uw.awsPending, aw) + uw.aws[aw] = struct{}{} + } + uw.reloadScrapeWorksForAPIWatchers(awsPending, uw.objectsByKey) + uw.mu.Unlock() + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscribers{role=%q,status="working"}`, uw.role)).Add(len(awsPending)) + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscribers{role=%q,status="pending"}`, uw.role)).Add(-len(awsPending)) +} + func (uw *urlWatcher) unsubscribeAPIWatcher(aw *apiWatcher) { uw.mu.Lock() + if _, ok := uw.awsPending[aw]; ok { + delete(uw.awsPending, aw) + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscribers{role=%q,status="pending"}`, uw.role)).Dec() + } if _, ok := uw.aws[aw]; ok { delete(uw.aws, aw) - metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="permanent"}`, uw.role)).Dec() - } else if _, ok := uw.awsPending[aw]; ok { - delete(uw.awsPending, aw) - metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="pending"}`, uw.role)).Dec() + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscribers{role=%q,status="working"}`, uw.role)).Dec() } uw.mu.Unlock() } -func (uw *urlWatcher) processPendingSubscribers() { - t := time.NewTicker(time.Second) - for range t.C { - var awsPending []*apiWatcher - var objectsByKey map[string]object - - uw.mu.Lock() - if len(uw.awsPending) > 0 { - awsPending = getAPIWatchers(uw.awsPending) - for _, aw := range awsPending { - if _, ok := uw.aws[aw]; ok { - logger.Panicf("BUG: aw=%p already exists in uw.aws", aw) - } - uw.aws[aw] = struct{}{} - delete(uw.awsPending, aw) - } - objectsByKey = make(map[string]object, len(uw.objectsByKey)) - for key, o := range uw.objectsByKey { - objectsByKey[key] = o - } - } - metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="pending"}`, uw.role)).Add(-len(awsPending)) - metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="permanent"}`, uw.role)).Add(len(awsPending)) - uw.mu.Unlock() - - uw.gw.reloadScrapeWorksForAPIWatchers(awsPending, objectsByKey) - } -} - func (uw *urlWatcher) setResourceVersion(resourceVersion string) { uw.mu.Lock() uw.resourceVersion = resourceVersion @@ -457,9 +518,31 @@ func (uw *urlWatcher) reloadObjects() string { aws := getAPIWatchers(uw.aws) uw.mu.Unlock() - uw.gw.reloadScrapeWorksForAPIWatchers(aws, objectsByKey) + uw.reloadScrapeWorksForAPIWatchers(aws, objectsByKey) logger.Infof("reloaded %d objects from %q", len(objectsByKey), requestURL) - return metadata.ResourceVersion + return uw.resourceVersion +} + +func (uw *urlWatcher) reloadScrapeWorksForAPIWatchers(aws []*apiWatcher, objectsByKey map[string]object) { + if len(aws) == 0 { + return + } + swosByKey := make([]map[string][]interface{}, len(aws)) + for i := range aws { + swosByKey[i] = make(map[string][]interface{}) + } + for key, o := range objectsByKey { + labels := o.getTargetLabels(uw.gw) + for i, aw := range aws { + swos := aw.getScrapeWorkObjectsForLabels(labels) + if len(swos) > 0 { + swosByKey[i][key] = swos + } + } + } + for i, aw := range aws { + aw.reloadScrapeWorks(uw.namespace, swosByKey[i]) + } } func getAPIWatchers(awsMap map[*apiWatcher]struct{}) []*apiWatcher { @@ -557,7 +640,7 @@ func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { uw.mu.Unlock() labels := o.getTargetLabels(uw.gw) for _, aw := range aws { - aw.setScrapeWorks(key, labels) + aw.setScrapeWorks(uw.namespace, key, labels) } case "DELETED": o, err := uw.parseObject(we.Object) @@ -574,7 +657,7 @@ func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { aws := getAPIWatchers(uw.aws) uw.mu.Unlock() for _, aw := range aws { - aw.removeScrapeWorks(key) + aw.removeScrapeWorks(uw.namespace, key) } case "BOOKMARK": // See https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks @@ -630,33 +713,33 @@ func parseError(data []byte) (*Error, error) { return &em, nil } -func getAPIPaths(role string, namespaces []string, selectors []Selector) []string { - objectName := getObjectNameByRole(role) - if objectName == "nodes" || len(namespaces) == 0 { +func getAPIPathsWithNamespaces(role string, namespaces []string, selectors []Selector) ([]string, []string) { + objectType := getObjectTypeByRole(role) + if objectType == "nodes" || len(namespaces) == 0 { query := joinSelectors(role, selectors) - path := getAPIPath(objectName, "", query) - return []string{path} + path := getAPIPath(objectType, "", query) + return []string{path}, []string{""} } query := joinSelectors(role, selectors) paths := make([]string, len(namespaces)) for i, namespace := range namespaces { - paths[i] = getAPIPath(objectName, namespace, query) + paths[i] = getAPIPath(objectType, namespace, query) } - return paths + return paths, namespaces } -func getAPIPath(objectName, namespace, query string) string { - suffix := objectName +func getAPIPath(objectType, namespace, query string) string { + suffix := objectType if namespace != "" { - suffix = "namespaces/" + namespace + "/" + objectName + suffix = "namespaces/" + namespace + "/" + objectType } if len(query) > 0 { suffix += "?" + query } - if objectName == "ingresses" { + if objectType == "ingresses" { return "/apis/networking.k8s.io/v1beta1/" + suffix } - if objectName == "endpointslices" { + if objectType == "endpointslices" { return "/apis/discovery.k8s.io/v1beta1/" + suffix } return "/api/v1/" + suffix @@ -685,7 +768,7 @@ func joinSelectors(role string, selectors []Selector) string { return strings.Join(args, "&") } -func getObjectNameByRole(role string) string { +func getObjectTypeByRole(role string) string { switch role { case "node": return "nodes" diff --git a/lib/promscrape/discovery/kubernetes/api_watcher_test.go b/lib/promscrape/discovery/kubernetes/api_watcher_test.go index 7d56ada08..f971512cf 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher_test.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher_test.go @@ -5,52 +5,55 @@ import ( "testing" ) -func TestGetAPIPaths(t *testing.T) { - f := func(role string, namespaces []string, selectors []Selector, expectedPaths []string) { +func TestGetAPIPathsWithNamespaces(t *testing.T) { + f := func(role string, namespaces []string, selectors []Selector, expectedPaths, expectedNamespaces []string) { t.Helper() - paths := getAPIPaths(role, namespaces, selectors) + paths, resultNamespaces := getAPIPathsWithNamespaces(role, namespaces, selectors) if !reflect.DeepEqual(paths, expectedPaths) { t.Fatalf("unexpected paths; got\n%q\nwant\n%q", paths, expectedPaths) } + if !reflect.DeepEqual(resultNamespaces, expectedNamespaces) { + t.Fatalf("unexpected namespaces; got\n%q\nwant\n%q", resultNamespaces, expectedNamespaces) + } } // role=node - f("node", nil, nil, []string{"/api/v1/nodes"}) - f("node", []string{"foo", "bar"}, nil, []string{"/api/v1/nodes"}) + f("node", nil, nil, []string{"/api/v1/nodes"}, []string{""}) + f("node", []string{"foo", "bar"}, nil, []string{"/api/v1/nodes"}, []string{""}) f("node", nil, []Selector{ { Role: "pod", Label: "foo", Field: "bar", }, - }, []string{"/api/v1/nodes"}) + }, []string{"/api/v1/nodes"}, []string{""}) f("node", nil, []Selector{ { Role: "node", Label: "foo", Field: "bar", }, - }, []string{"/api/v1/nodes?labelSelector=foo&fieldSelector=bar"}) + }, []string{"/api/v1/nodes?labelSelector=foo&fieldSelector=bar"}, []string{""}) f("node", []string{"x", "y"}, []Selector{ { Role: "node", Label: "foo", Field: "bar", }, - }, []string{"/api/v1/nodes?labelSelector=foo&fieldSelector=bar"}) + }, []string{"/api/v1/nodes?labelSelector=foo&fieldSelector=bar"}, []string{""}) // role=pod - f("pod", nil, nil, []string{"/api/v1/pods"}) + f("pod", nil, nil, []string{"/api/v1/pods"}, []string{""}) f("pod", []string{"foo", "bar"}, nil, []string{ "/api/v1/namespaces/foo/pods", "/api/v1/namespaces/bar/pods", - }) + }, []string{"foo", "bar"}) f("pod", nil, []Selector{ { Role: "node", Label: "foo", }, - }, []string{"/api/v1/pods"}) + }, []string{"/api/v1/pods"}, []string{""}) f("pod", nil, []Selector{ { Role: "pod", @@ -61,7 +64,7 @@ func TestGetAPIPaths(t *testing.T) { Label: "x", Field: "y", }, - }, []string{"/api/v1/pods?labelSelector=foo%2Cx&fieldSelector=y"}) + }, []string{"/api/v1/pods?labelSelector=foo%2Cx&fieldSelector=y"}, []string{""}) f("pod", []string{"x", "y"}, []Selector{ { Role: "pod", @@ -75,14 +78,14 @@ func TestGetAPIPaths(t *testing.T) { }, []string{ "/api/v1/namespaces/x/pods?labelSelector=foo%2Cx&fieldSelector=y", "/api/v1/namespaces/y/pods?labelSelector=foo%2Cx&fieldSelector=y", - }) + }, []string{"x", "y"}) // role=service - f("service", nil, nil, []string{"/api/v1/services"}) + f("service", nil, nil, []string{"/api/v1/services"}, []string{""}) f("service", []string{"x", "y"}, nil, []string{ "/api/v1/namespaces/x/services", "/api/v1/namespaces/y/services", - }) + }, []string{"x", "y"}) f("service", nil, []Selector{ { Role: "node", @@ -92,7 +95,7 @@ func TestGetAPIPaths(t *testing.T) { Role: "service", Field: "bar", }, - }, []string{"/api/v1/services?fieldSelector=bar"}) + }, []string{"/api/v1/services?fieldSelector=bar"}, []string{""}) f("service", []string{"x", "y"}, []Selector{ { Role: "service", @@ -101,14 +104,14 @@ func TestGetAPIPaths(t *testing.T) { }, []string{ "/api/v1/namespaces/x/services?labelSelector=abc%3Dde", "/api/v1/namespaces/y/services?labelSelector=abc%3Dde", - }) + }, []string{"x", "y"}) // role=endpoints - f("endpoints", nil, nil, []string{"/api/v1/endpoints"}) + f("endpoints", nil, nil, []string{"/api/v1/endpoints"}, []string{""}) f("endpoints", []string{"x", "y"}, nil, []string{ "/api/v1/namespaces/x/endpoints", "/api/v1/namespaces/y/endpoints", - }) + }, []string{"x", "y"}) f("endpoints", []string{"x", "y"}, []Selector{ { Role: "endpoints", @@ -121,10 +124,10 @@ func TestGetAPIPaths(t *testing.T) { }, []string{ "/api/v1/namespaces/x/endpoints?labelSelector=bbb", "/api/v1/namespaces/y/endpoints?labelSelector=bbb", - }) + }, []string{"x", "y"}) // role=endpointslices - f("endpointslices", nil, nil, []string{"/apis/discovery.k8s.io/v1beta1/endpointslices"}) + f("endpointslices", nil, nil, []string{"/apis/discovery.k8s.io/v1beta1/endpointslices"}, []string{""}) f("endpointslices", []string{"x", "y"}, []Selector{ { Role: "endpointslices", @@ -134,10 +137,10 @@ func TestGetAPIPaths(t *testing.T) { }, []string{ "/apis/discovery.k8s.io/v1beta1/namespaces/x/endpointslices?labelSelector=label&fieldSelector=field", "/apis/discovery.k8s.io/v1beta1/namespaces/y/endpointslices?labelSelector=label&fieldSelector=field", - }) + }, []string{"x", "y"}) // role=ingress - f("ingress", nil, nil, []string{"/apis/networking.k8s.io/v1beta1/ingresses"}) + f("ingress", nil, nil, []string{"/apis/networking.k8s.io/v1beta1/ingresses"}, []string{""}) f("ingress", []string{"x", "y"}, []Selector{ { Role: "node", @@ -158,7 +161,7 @@ func TestGetAPIPaths(t *testing.T) { }, []string{ "/apis/networking.k8s.io/v1beta1/namespaces/x/ingresses?labelSelector=cde%2Cbaaa&fieldSelector=abc", "/apis/networking.k8s.io/v1beta1/namespaces/y/ingresses?labelSelector=cde%2Cbaaa&fieldSelector=abc", - }) + }, []string{"x", "y"}) } func TestParseBookmark(t *testing.T) { diff --git a/lib/promscrape/discovery/kubernetes/endpoints.go b/lib/promscrape/discovery/kubernetes/endpoints.go index 805a88b01..af4b16b5e 100644 --- a/lib/promscrape/discovery/kubernetes/endpoints.go +++ b/lib/promscrape/discovery/kubernetes/endpoints.go @@ -139,8 +139,10 @@ func appendEndpointLabelsForAddresses(ms []map[string]string, gw *groupWatcher, eas []EndpointAddress, epp EndpointPort, svc *Service, ready string) []map[string]string { for _, ea := range eas { var p *Pod - if o := gw.getObjectByRole("pod", ea.TargetRef.Namespace, ea.TargetRef.Name); o != nil { - p = o.(*Pod) + if ea.TargetRef.Name != "" { + if o := gw.getObjectByRole("pod", ea.TargetRef.Namespace, ea.TargetRef.Name); o != nil { + p = o.(*Pod) + } } m := getEndpointLabelsForAddressAndPort(podPortsSeen, eps, ea, epp, p, svc, ready) ms = append(ms, m) diff --git a/lib/promscrape/discovery/kubernetes/kubernetes.go b/lib/promscrape/discovery/kubernetes/kubernetes.go index b7dc29b64..ea827b47a 100644 --- a/lib/promscrape/discovery/kubernetes/kubernetes.go +++ b/lib/promscrape/discovery/kubernetes/kubernetes.go @@ -11,15 +11,15 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config type SDConfig struct { - APIServer string `yaml:"api_server,omitempty"` - Role string `yaml:"role"` - BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth,omitempty"` - BearerToken string `yaml:"bearer_token,omitempty"` - BearerTokenFile string `yaml:"bearer_token_file,omitempty"` - ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` - Namespaces Namespaces `yaml:"namespaces,omitempty"` - Selectors []Selector `yaml:"selectors,omitempty"` + APIServer string `yaml:"api_server,omitempty"` + Role string `yaml:"role"` + HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"` + ProxyURL proxy.URL `yaml:"proxy_url,omitempty"` + Namespaces Namespaces `yaml:"namespaces,omitempty"` + Selectors []Selector `yaml:"selectors,omitempty"` + + cfg *apiConfig + startErr error } // Namespaces represents namespaces for SDConfig @@ -40,23 +40,33 @@ type Selector struct { // ScrapeWorkConstructorFunc must construct ScrapeWork object for the given metaLabels. type ScrapeWorkConstructorFunc func(metaLabels map[string]string) interface{} -// GetScrapeWorkObjects returns ScrapeWork objects for the given sdc and baseDir. +// GetScrapeWorkObjects returns ScrapeWork objects for the given sdc. +// +// This function must be called after MustStart call. +func (sdc *SDConfig) GetScrapeWorkObjects() ([]interface{}, error) { + if sdc.cfg == nil { + return nil, sdc.startErr + } + return sdc.cfg.aw.getScrapeWorkObjects(), nil +} + +// MustStart initializes sdc before its usage. // // swcFunc is used for constructing such objects. -func (sdc *SDConfig) GetScrapeWorkObjects(baseDir string, swcFunc ScrapeWorkConstructorFunc) ([]interface{}, error) { - cfg, err := getAPIConfig(sdc, baseDir, swcFunc) +func (sdc *SDConfig) MustStart(baseDir string, swcFunc ScrapeWorkConstructorFunc) { + cfg, err := newAPIConfig(sdc, baseDir, swcFunc) if err != nil { - return nil, fmt.Errorf("cannot create API config: %w", err) + sdc.startErr = fmt.Errorf("cannot create API config for kubernetes: %w", err) + return } - return cfg.aw.getScrapeWorkObjects(), nil + cfg.aw.mustStart() + sdc.cfg = cfg } // MustStop stops further usage for sdc. func (sdc *SDConfig) MustStop() { - v := configMap.Delete(sdc) - if v != nil { - // v can be nil if GetLabels wasn't called yet. - cfg := v.(*apiConfig) - cfg.mustStop() + if sdc.cfg != nil { + // sdc.cfg can be nil on MustStart error. + sdc.cfg.aw.mustStop() } } diff --git a/lib/promscrape/discovery/openstack/api.go b/lib/promscrape/discovery/openstack/api.go index 615fe8971..fecfd3158 100644 --- a/lib/promscrape/discovery/openstack/api.go +++ b/lib/promscrape/discovery/openstack/api.go @@ -77,7 +77,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { port: sdc.Port, } if sdc.TLSConfig != nil { - ac, err := promauth.NewConfig(baseDir, nil, "", "", sdc.TLSConfig) + ac, err := promauth.NewConfig(baseDir, nil, nil, "", "", sdc.TLSConfig) if err != nil { return nil, err } diff --git a/lib/promscrape/discoveryutils/client.go b/lib/promscrape/discoveryutils/client.go index ecddf92cc..a26190ee6 100644 --- a/lib/promscrape/discoveryutils/client.go +++ b/lib/promscrape/discoveryutils/client.go @@ -40,22 +40,21 @@ type Client struct { // blockingClient is used for long-polling requests. blockingClient *fasthttp.HostClient - ac *promauth.Config apiServer string - hostPort string + + hostPort string + authHeader string + proxyAuthHeader string + sendFullURL bool } -// NewClient returns new Client for the given apiServer and the given ac. -func NewClient(apiServer string, ac *promauth.Config, proxyURL proxy.URL) (*Client, error) { - var ( - dialFunc fasthttp.DialFunc - tlsCfg *tls.Config - u fasthttp.URI - err error - ) +// NewClient returns new Client for the given args. +func NewClient(apiServer string, ac *promauth.Config, proxyURL proxy.URL, proxyAC *promauth.Config) (*Client, error) { + var u fasthttp.URI u.Update(apiServer) // special case for unix socket connection + var dialFunc fasthttp.DialFunc if string(u.Scheme()) == "unix" { dialAddr := string(u.Path()) apiServer = "http://" @@ -66,9 +65,25 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL proxy.URL) (*Clie hostPort := string(u.Host()) isTLS := string(u.Scheme()) == "https" + var tlsCfg *tls.Config if isTLS { tlsCfg = ac.NewTLSConfig() } + sendFullURL := !isTLS && proxyURL.IsHTTPOrHTTPS() + proxyAuthHeader := "" + if sendFullURL { + // Send full urls in requests to a proxy host for non-TLS apiServer + // like net/http package from Go does. + // See https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers + pu := proxyURL.URL() + hostPort = pu.Host + isTLS = pu.Scheme == "https" + if isTLS { + tlsCfg = proxyAC.NewTLSConfig() + } + proxyAuthHeader = proxyURL.GetAuthHeader(proxyAC) + proxyURL = proxy.URL{} + } if !strings.Contains(hostPort, ":") { port := "80" if isTLS { @@ -77,7 +92,8 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL proxy.URL) (*Clie hostPort = net.JoinHostPort(hostPort, port) } if dialFunc == nil { - dialFunc, err = proxyURL.NewDialFunc(ac) + var err error + dialFunc, err = proxyURL.NewDialFunc(proxyAC) if err != nil { return nil, err } @@ -104,12 +120,18 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL proxy.URL) (*Clie MaxConns: 64 * 1024, Dial: dialFunc, } + authHeader := "" + if ac != nil { + authHeader = ac.Authorization + } return &Client{ - hc: hc, - blockingClient: blockingClient, - ac: ac, - apiServer: apiServer, - hostPort: hostPort, + hc: hc, + blockingClient: blockingClient, + apiServer: apiServer, + hostPort: hostPort, + authHeader: authHeader, + proxyAuthHeader: proxyAuthHeader, + sendFullURL: sendFullURL, }, nil } @@ -159,11 +181,18 @@ func (c *Client) getAPIResponseWithParamsAndClient(client *fasthttp.HostClient, var u fasthttp.URI u.Update(requestURL) var req fasthttp.Request - req.SetRequestURIBytes(u.RequestURI()) - req.SetHost(c.hostPort) + if c.sendFullURL { + req.SetRequestURIBytes(u.FullURI()) + } else { + req.SetRequestURIBytes(u.RequestURI()) + } + req.Header.SetHost(c.hostPort) req.Header.Set("Accept-Encoding", "gzip") - if c.ac != nil && c.ac.Authorization != "" { - req.Header.Set("Authorization", c.ac.Authorization) + if c.authHeader != "" { + req.Header.Set("Authorization", c.authHeader) + } + if c.proxyAuthHeader != "" { + req.Header.Set("Proxy-Authorization", c.proxyAuthHeader) } var resp fasthttp.Response diff --git a/lib/promscrape/scraper.go b/lib/promscrape/scraper.go index d02f1732f..7dd454891 100644 --- a/lib/promscrape/scraper.go +++ b/lib/promscrape/scraper.go @@ -93,6 +93,7 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest) if err != nil { logger.Fatalf("cannot read %q: %s", configFile, err) } + cfg.mustStart() scs := newScrapeConfigs(pushData) scs.add("static_configs", 0, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getStaticScrapeWork() }) @@ -130,6 +131,7 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest) goto waitForChans } cfg.mustStop() + cfgNew.mustStart() cfg = cfgNew data = dataNew case <-tickerCh: @@ -143,6 +145,7 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest) goto waitForChans } cfg.mustStop() + cfgNew.mustStart() cfg = cfgNew data = dataNew case <-globalStopCh: @@ -231,17 +234,11 @@ func (scfg *scrapeConfig) run() { cfg := <-scfg.cfgCh var swsPrev []*ScrapeWork updateScrapeWork := func(cfg *Config) { - for { - startTime := time.Now() - sws := scfg.getScrapeWork(cfg, swsPrev) - retry := sg.update(sws) - swsPrev = sws - scfg.discoveryDuration.UpdateDuration(startTime) - if !retry { - return - } - time.Sleep(2 * time.Second) - } + startTime := time.Now() + sws := scfg.getScrapeWork(cfg, swsPrev) + sg.update(sws) + swsPrev = sws + scfg.discoveryDuration.UpdateDuration(startTime) } updateScrapeWork(cfg) atomic.AddInt32(&PendingScrapeConfigs, -1) @@ -301,7 +298,7 @@ func (sg *scraperGroup) stop() { sg.wg.Wait() } -func (sg *scraperGroup) update(sws []*ScrapeWork) (retry bool) { +func (sg *scraperGroup) update(sws []*ScrapeWork) { sg.mLock.Lock() defer sg.mLock.Unlock() @@ -358,7 +355,6 @@ func (sg *scraperGroup) update(sws []*ScrapeWork) (retry bool) { sg.changesCount.Add(additionsCount + deletionsCount) logger.Infof("%s: added targets: %d, removed targets: %d; total targets: %d", sg.name, additionsCount, deletionsCount, len(sg.m)) } - return deletionsCount > 0 && len(sg.m) == 0 } type scraper struct { diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index b9da864d5..3a0948298 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -48,6 +48,9 @@ type ScrapeWork struct { // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config HonorTimestamps bool + // Whether to deny redirects during requests to scrape config. + DenyRedirects bool + // OriginalLabels contains original labels before relabeling. // // These labels are needed for relabeling troubleshooting at /targets page. @@ -107,10 +110,10 @@ type ScrapeWork struct { // it can be used for comparing for equality for two ScrapeWork objects. func (sw *ScrapeWork) key() string { // Do not take into account OriginalLabels. - key := fmt.Sprintf("ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, HonorTimestamps=%v, Labels=%s, "+ + key := fmt.Sprintf("ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, HonorTimestamps=%v, DenyRedirects=%v, Labels=%s, "+ "ProxyURL=%s, ProxyAuthConfig=%s, AuthConfig=%s, MetricRelabelConfigs=%s, SampleLimit=%d, DisableCompression=%v, DisableKeepAlive=%v, StreamParse=%v, "+ "ScrapeAlignInterval=%s, ScrapeOffset=%s", - sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels, sw.HonorTimestamps, sw.LabelsString(), + sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels, sw.HonorTimestamps, sw.DenyRedirects, sw.LabelsString(), sw.ProxyURL.String(), sw.ProxyAuthConfig.String(), sw.AuthConfig.String(), sw.MetricRelabelConfigs.String(), sw.SampleLimit, sw.DisableCompression, sw.DisableKeepAlive, sw.StreamParse, sw.ScrapeAlignInterval, sw.ScrapeOffset) diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index 207276cac..c792b6ae2 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -14,6 +14,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/fasthttp" + "golang.org/x/net/proxy" ) // URL implements YAML.Marshaler and yaml.Unmarshaler interfaces for url.URL. @@ -40,6 +41,16 @@ func (u *URL) URL() *url.URL { return u.url } +// IsHTTPOrHTTPS returns true if u is http or https +func (u *URL) IsHTTPOrHTTPS() bool { + pu := u.URL() + if pu == nil { + return false + } + scheme := u.url.Scheme + return scheme == "http" || scheme == "https" +} + // String returns string representation of u. func (u *URL) String() string { pu := u.URL() @@ -49,6 +60,23 @@ func (u *URL) String() string { return pu.String() } +// GetAuthHeader returns Proxy-Authorization auth header for the given u and ac. +func (u *URL) GetAuthHeader(ac *promauth.Config) string { + authHeader := "" + if ac != nil { + authHeader = ac.Authorization + } + if u == nil || u.url == nil { + return authHeader + } + pu := u.url + if pu.User != nil && len(pu.User.Username()) > 0 { + userPasswordEncoded := base64.StdEncoding.EncodeToString([]byte(pu.User.String())) + authHeader = "Basic " + userPasswordEncoded + } + return authHeader +} + // MarshalYAML implements yaml.Marshaler interface. func (u *URL) MarshalYAML() (interface{}, error) { if u.url == nil { @@ -77,22 +105,13 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { return defaultDialFunc, nil } pu := u.url - if pu.Scheme != "http" && pu.Scheme != "https" { - return nil, fmt.Errorf("unknown scheme=%q for proxy_url=%q, must be http or https", pu.Scheme, pu.Redacted()) + switch pu.Scheme { + case "http", "https", "socks5", "tls+socks5": + default: + return nil, fmt.Errorf("unknown scheme=%q for proxy_url=%q, must be http, https, socks5 or tls+socks5", pu.Scheme, pu.Redacted()) } - isTLS := pu.Scheme == "https" + isTLS := (pu.Scheme == "https" || pu.Scheme == "tls+socks5") proxyAddr := addMissingPort(pu.Host, isTLS) - var authHeader string - if ac != nil { - authHeader = ac.Authorization - } - if pu.User != nil && len(pu.User.Username()) > 0 { - userPasswordEncoded := base64.StdEncoding.EncodeToString([]byte(pu.User.String())) - authHeader = "Basic " + userPasswordEncoded - } - if authHeader != "" { - authHeader = "Proxy-Authorization: " + authHeader + "\r\n" - } var tlsCfg *tls.Config if isTLS { tlsCfg = ac.NewTLSConfig() @@ -100,6 +119,13 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { tlsCfg.ServerName = tlsServerName(proxyAddr) } } + if pu.Scheme == "socks5" || pu.Scheme == "tls+socks5" { + return socks5DialFunc(proxyAddr, pu, tlsCfg) + } + authHeader := u.GetAuthHeader(ac) + if authHeader != "" { + authHeader = "Proxy-Authorization: " + authHeader + "\r\n" + } dialFunc := func(addr string) (net.Conn, error) { proxyConn, err := defaultDialFunc(proxyAddr) if err != nil { @@ -118,6 +144,33 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { return dialFunc, nil } +func socks5DialFunc(proxyAddr string, pu *url.URL, tlsCfg *tls.Config) (fasthttp.DialFunc, error) { + var sac *proxy.Auth + if pu.User != nil { + username := pu.User.Username() + password, _ := pu.User.Password() + sac = &proxy.Auth{ + User: username, + Password: password, + } + } + network := netutil.GetTCPNetwork() + var dialer proxy.Dialer = proxy.Direct + if tlsCfg != nil { + dialer = &tls.Dialer{ + Config: tlsCfg, + } + } + d, err := proxy.SOCKS5(network, proxyAddr, sac, dialer) + if err != nil { + return nil, fmt.Errorf("cannot create socks5 proxy for url: %s, err: %w", pu.Redacted(), err) + } + dialFunc := func(addr string) (net.Conn, error) { + return d.Dial(network, addr) + } + return dialFunc, nil +} + func addMissingPort(addr string, isTLS bool) string { if strings.IndexByte(addr, ':') >= 0 { return addr @@ -159,7 +212,7 @@ func sendConnectRequest(proxyConn net.Conn, proxyAddr, dstAddr, authHeader strin return nil, fmt.Errorf("cannot read CONNECT response for dstAddr=%q: %w", dstAddr, err) } if statusCode := res.Header.StatusCode(); statusCode != 200 { - return nil, fmt.Errorf("unexpected status code received: %d; want: 200", statusCode) + return nil, fmt.Errorf("unexpected status code received: %d; want: 200; response body: %q", statusCode, res.Body()) } return conn, nil } diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index eca59a20a..3210b4155 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -719,6 +719,9 @@ func (db *indexDB) SearchTagKeysOnTimeRange(tr TimeRange, maxTagKeys int, deadli func (is *indexSearch) searchTagKeysOnTimeRange(tks map[string]struct{}, tr TimeRange, maxTagKeys int) error { minDate := uint64(tr.MinTimestamp) / msecPerDay maxDate := uint64(tr.MaxTimestamp) / msecPerDay + if minDate > maxDate || maxDate-minDate > maxDaysForPerDaySearch { + return is.searchTagKeys(tks, maxTagKeys) + } var mu sync.Mutex var wg sync.WaitGroup var errGlobal error @@ -914,6 +917,9 @@ func (db *indexDB) SearchTagValuesOnTimeRange(tagKey []byte, tr TimeRange, maxTa func (is *indexSearch) searchTagValuesOnTimeRange(tvs map[string]struct{}, tagKey []byte, tr TimeRange, maxTagValues int) error { minDate := uint64(tr.MinTimestamp) / msecPerDay maxDate := uint64(tr.MaxTimestamp) / msecPerDay + if minDate > maxDate || maxDate-minDate > maxDaysForPerDaySearch { + return is.searchTagValues(tvs, tagKey, maxTagValues) + } var mu sync.Mutex var wg sync.WaitGroup var errGlobal error @@ -1126,7 +1132,7 @@ func (db *indexDB) SearchTagValueSuffixes(tr TimeRange, tagKey, tagValuePrefix [ func (is *indexSearch) searchTagValueSuffixesForTimeRange(tvss map[string]struct{}, tr TimeRange, tagKey, tagValuePrefix []byte, delimiter byte, maxTagValueSuffixes int) error { minDate := uint64(tr.MinTimestamp) / msecPerDay maxDate := uint64(tr.MaxTimestamp) / msecPerDay - if maxDate-minDate > maxDaysForDateMetricIDs { + if minDate > maxDate || maxDate-minDate > maxDaysForPerDaySearch { return is.searchTagValueSuffixesAll(tvss, tagKey, tagValuePrefix, delimiter, maxTagValueSuffixes) } // Query over multiple days in parallel. @@ -2673,7 +2679,7 @@ func (is *indexSearch) getMetricIDsForTimeRange(tr TimeRange, maxMetrics int) (* atomic.AddUint64(&is.db.dateMetricIDsSearchCalls, 1) minDate := uint64(tr.MinTimestamp) / msecPerDay maxDate := uint64(tr.MaxTimestamp) / msecPerDay - if maxDate-minDate > maxDaysForDateMetricIDs { + if minDate > maxDate || maxDate-minDate > maxDaysForPerDaySearch { // Too much dates must be covered. Give up. return nil, errMissingMetricIDsForDate } @@ -2722,17 +2728,13 @@ func (is *indexSearch) getMetricIDsForTimeRange(tr TimeRange, maxMetrics int) (* return metricIDs, nil } -const maxDaysForDateMetricIDs = 40 +const maxDaysForPerDaySearch = 40 func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set, tfs *TagFilters, tr TimeRange, maxMetrics int) error { atomic.AddUint64(&is.db.dateRangeSearchCalls, 1) minDate := uint64(tr.MinTimestamp) / msecPerDay maxDate := uint64(tr.MaxTimestamp) / msecPerDay - if maxDate < minDate { - // Per-day inverted index doesn't cover the selected date range. - return fmt.Errorf("maxDate=%d cannot be smaller than minDate=%d", maxDate, minDate) - } - if maxDate-minDate > maxDaysForDateMetricIDs { + if minDate > maxDate || maxDate-minDate > maxDaysForPerDaySearch { // Too much dates must be covered. Give up, since it may be slow. return errFallbackToGlobalSearch } diff --git a/lib/storage/storage.go b/lib/storage/storage.go index 48af275c1..7c5c82911 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -1485,7 +1485,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra continue } if s.getTSIDFromCache(&r.TSID, mr.MetricNameRaw) { - // Fast path - the TSID for the given MetricName has been found in cache and isn't deleted. + // Fast path - the TSID for the given MetricNameRaw has been found in cache and isn't deleted. // There is no need in checking whether r.TSID.MetricID is deleted, since tsidCache doesn't // contain MetricName->TSID entries for deleted time series. // See Storage.DeleteMetrics code for details. @@ -1533,15 +1533,6 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra r.TSID = prevTSID continue } - if s.getTSIDFromCache(&r.TSID, mr.MetricNameRaw) { - // Fast path - the TSID for the given MetricName has been found in cache and isn't deleted. - // There is no need in checking whether r.TSID.MetricID is deleted, since tsidCache doesn't - // contain MetricName->TSID entries for deleted time series. - // See Storage.DeleteMetrics code for details. - prevTSID = r.TSID - prevMetricNameRaw = mr.MetricNameRaw - continue - } slowInsertsCount++ if err := is.GetOrCreateTSIDByName(&r.TSID, pmr.MetricName); err != nil { // Do not stop adding rows on error - just skip invalid row. @@ -1554,6 +1545,8 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra continue } s.putTSIDToCache(&r.TSID, mr.MetricNameRaw) + prevTSID = r.TSID + prevMetricNameRaw = mr.MetricNameRaw } idb.putIndexSearch(is) putPendingMetricRows(pmrs) diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index d38b3a4ea..6fc75f1f1 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,6 +1,22 @@ # Changes +## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02) + + +### Features + +* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) +* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4)) +* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) +* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) +* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) + + +### Bug Fixes + +* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678)) + ## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23) diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod index 24ebd1a85..4fa03cae5 100644 --- a/vendor/cloud.google.com/go/go.mod +++ b/vendor/cloud.google.com/go/go.mod @@ -17,7 +17,7 @@ require ( golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 golang.org/x/text v0.3.5 golang.org/x/tools v0.1.0 - google.golang.org/api v0.42.0 - google.golang.org/genproto v0.0.0-20210323160006-e668133fea6a - google.golang.org/grpc v1.36.0 + google.golang.org/api v0.43.0 + google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 + google.golang.org/grpc v1.36.1 ) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum index 6e00313a1..d0209b286 100644 --- a/vendor/cloud.google.com/go/go.sum +++ b/vendor/cloud.google.com/go/go.sum @@ -284,7 +284,6 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -372,8 +371,8 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.42.0 h1:uqATLkpxiBrhrvFoebXUjvyzE9nQf+pVyy0Z0IHE+fc= -google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= +google.golang.org/api v0.43.0 h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -419,9 +418,9 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210323160006-e668133fea6a h1:XVaQ1+BDKvrRcgppHhtAaniHCKyV5xJAvymwsPHHFaE= -google.golang.org/genproto v0.0.0-20210323160006-e668133fea6a/go.mod h1:f2Bd7+2PlaVKmvKQ52aspJZXIDaRQBVdOOBfJ5i8OEs= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 h1:E7wSQBXkH3T3diucK+9Z1kjn4+/9tNG7lZLr75oOhh8= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -438,8 +437,9 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index ffe045e4e..ecb5f8efd 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -1,7 +1,7 @@ { "cloud.google.com/go/accessapproval/apiv1": { "distribution_name": "cloud.google.com/go/accessapproval/apiv1", - "description": "", + "description": "Access Approval API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/accessapproval/apiv1", @@ -9,7 +9,7 @@ }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", - "description": "", + "description": "Google Analytics Admin API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/admin/apiv1alpha", @@ -17,7 +17,7 @@ }, "cloud.google.com/go/analytics/data/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/data/apiv1alpha", - "description": "", + "description": "Google Analytics Data API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/data/apiv1alpha", @@ -81,7 +81,7 @@ }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", - "description": "", + "description": "Assured Workloads API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/assuredworkloads/apiv1beta1", @@ -209,7 +209,7 @@ }, "cloud.google.com/go/billing/budgets/apiv1beta1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", - "description": "", + "description": "Cloud Billing Budget API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1beta1", @@ -449,7 +449,7 @@ }, "cloud.google.com/go/functions/apiv1": { "distribution_name": "cloud.google.com/go/functions/apiv1", - "description": "", + "description": "Cloud Functions API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/functions/apiv1", @@ -457,7 +457,7 @@ }, "cloud.google.com/go/gaming/apiv1": { "distribution_name": "cloud.google.com/go/gaming/apiv1", - "description": "", + "description": "Game Services API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1", @@ -465,7 +465,7 @@ }, "cloud.google.com/go/gaming/apiv1beta": { "distribution_name": "cloud.google.com/go/gaming/apiv1beta", - "description": "", + "description": "Game Services API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta", @@ -609,7 +609,7 @@ }, "cloud.google.com/go/monitoring/dashboard/apiv1": { "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", - "description": "", + "description": "Cloud Monitoring API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1", @@ -729,7 +729,7 @@ }, "cloud.google.com/go/pubsublite/apiv1": { "distribution_name": "cloud.google.com/go/pubsublite/apiv1", - "description": "", + "description": "Pub/Sub Lite API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsublite/apiv1", @@ -857,7 +857,7 @@ }, "cloud.google.com/go/security/privateca/apiv1beta1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1beta1", - "description": "", + "description": "Certificate Authority API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/security/privateca/apiv1beta1", @@ -897,7 +897,7 @@ }, "cloud.google.com/go/servicecontrol/apiv1": { "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", - "description": "", + "description": "Service Control API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicecontrol/apiv1", @@ -921,7 +921,7 @@ }, "cloud.google.com/go/servicemanagement/apiv1": { "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", - "description": "", + "description": "Service Management API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicemanagement/apiv1", @@ -985,7 +985,7 @@ }, "cloud.google.com/go/talent/apiv4": { "distribution_name": "cloud.google.com/go/talent/apiv4", - "description": "", + "description": "Cloud Talent Solution API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/talent/apiv4", @@ -1033,7 +1033,7 @@ }, "cloud.google.com/go/video/transcoder/apiv1beta1": { "distribution_name": "cloud.google.com/go/video/transcoder/apiv1beta1", - "description": "", + "description": "Transcoder API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/video/transcoder/apiv1beta1", @@ -1097,7 +1097,7 @@ }, "cloud.google.com/go/workflows/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/apiv1beta", - "description": "", + "description": "Workflows API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/workflows/apiv1beta", @@ -1105,7 +1105,7 @@ }, "cloud.google.com/go/workflows/executions/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", - "description": "", + "description": "Workflow Executions API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/workflows/executions/apiv1beta", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index ee958ece4..ab819d0ae 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -744,6 +744,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -983,6 +984,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1777,6 +1779,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -3359,8 +3362,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "af-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "fips-us-east-2": endpoint{ Hostname: "groundstation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ @@ -3374,6 +3384,7 @@ var awsPartition = partition{ }, }, "me-south-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, @@ -3971,6 +3982,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4126,6 +4138,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4462,6 +4475,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5424,6 +5438,90 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-af-south-1": endpoint{ + Hostname: "s3-accesspoint.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-east-1": endpoint{ + Hostname: "s3-accesspoint.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-1": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-2": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-3": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-south-1": endpoint{ + Hostname: "s3-accesspoint.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-southeast-1": endpoint{ + Hostname: "s3-accesspoint.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-southeast-2": endpoint{ + Hostname: "s3-accesspoint.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ca-central-1": endpoint{ + Hostname: "s3-accesspoint.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-central-1": endpoint{ + Hostname: "s3-accesspoint.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-north-1": endpoint{ + Hostname: "s3-accesspoint.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-south-1": endpoint{ + Hostname: "s3-accesspoint.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-1": endpoint{ + Hostname: "s3-accesspoint.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-2": endpoint{ + Hostname: "s3-accesspoint.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-3": endpoint{ + Hostname: "s3-accesspoint.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-me-south-1": endpoint{ + Hostname: "s3-accesspoint.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-sa-east-1": endpoint{ + Hostname: "s3-accesspoint.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-east-1": endpoint{ + Hostname: "s3-accesspoint.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-east-2": endpoint{ + Hostname: "s3-accesspoint.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-west-1": endpoint{ + Hostname: "s3-accesspoint.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-west-2": endpoint{ + Hostname: "s3-accesspoint.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ @@ -5456,8 +5554,28 @@ var awsPartition = partition{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-accesspoint-ca-central-1": endpoint{ + Hostname: "s3-accesspoint-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-east-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-east-2": endpoint{ + Hostname: "s3-accesspoint-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-west-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-west-2": endpoint{ + Hostname: "s3-accesspoint-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "me-south-1": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", @@ -5760,6 +5878,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6276,12 +6395,10 @@ var awsPartition = partition{ }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "ssm": service{ @@ -7824,6 +7941,14 @@ var awscnPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-cn-north-1": endpoint{ + Hostname: "s3-accesspoint.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-cn-northwest-1": endpoint{ + Hostname: "s3-accesspoint.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + }, "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, @@ -8117,6 +8242,27 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "api.ecr": service{ Endpoints: endpoints{ @@ -9377,6 +9523,22 @@ var awsusgovPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-us-gov-east-1": endpoint{ + Hostname: "s3-accesspoint.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-gov-west-1": endpoint{ + Hostname: "s3-accesspoint.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-gov-east-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-gov-west-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "fips-us-gov-west-1": endpoint{ Hostname: "s3-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -9510,6 +9672,27 @@ var awsusgovPartition = partition{ }, }, }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "sms": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 95758b23f..2dd2f7a93 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.38.5" +const SDKVersion = "1.38.15" diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go index f3871a624..3d7fa560b 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -12,8 +12,14 @@ var ( // EastAsianWidth will be set true if the current locale is CJK EastAsianWidth bool + // StrictEmojiNeutral should be set false if handle broken fonts + StrictEmojiNeutral bool = true + // DefaultCondition is a condition in current locale - DefaultCondition = &Condition{} + DefaultCondition = &Condition{ + EastAsianWidth: false, + StrictEmojiNeutral: true, + } ) func init() { @@ -83,26 +89,52 @@ var nonprint = table{ // Condition have flag EastAsianWidth whether the current locale is CJK or not. type Condition struct { - EastAsianWidth bool + EastAsianWidth bool + StrictEmojiNeutral bool } // NewCondition return new instance of Condition which is current locale. func NewCondition() *Condition { return &Condition{ - EastAsianWidth: EastAsianWidth, + EastAsianWidth: EastAsianWidth, + StrictEmojiNeutral: StrictEmojiNeutral, } } // RuneWidth returns the number of cells in r. // See http://www.unicode.org/reports/tr11/ func (c *Condition) RuneWidth(r rune) int { - switch { - case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned): - return 0 - case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth): - return 2 - default: - return 1 + // optimized version, verified by TestRuneWidthChecksums() + if !c.EastAsianWidth { + switch { + case r < 0x20 || r > 0x10FFFF: + return 0 + case (r >= 0x7F && r <= 0x9F) || r == 0xAD: // nonprint + return 0 + case r < 0x300: + return 1 + case inTable(r, narrow): + return 1 + case inTables(r, nonprint, combining): + return 0 + case inTable(r, doublewidth): + return 2 + default: + return 1 + } + } else { + switch { + case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining): + return 0 + case inTable(r, narrow): + return 1 + case inTables(r, ambiguous, doublewidth): + return 2 + case !c.StrictEmojiNeutral && inTables(r, ambiguous, emoji, narrow): + return 2 + default: + return 1 + } } } diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go index b27d77d89..e5d890c26 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth_table.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -124,8 +124,10 @@ var ambiguous = table{ {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, } -var notassigned = table{ - {0x27E6, 0x27ED}, {0x2985, 0x2986}, +var narrow = table{ + {0x0020, 0x007E}, {0x00A2, 0x00A3}, {0x00A5, 0x00A6}, + {0x00AC, 0x00AC}, {0x00AF, 0x00AF}, {0x27E6, 0x27ED}, + {0x2985, 0x2986}, } var neutral = table{ diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 000000000..3d6f516a5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 000000000..97db2340e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authentication + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 000000000..811c2e4e9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 000000000..3d66bdef9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +type direct struct{} + +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. +var Direct = direct{} + +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 000000000..573fe79e8 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 000000000..9ff4b9a77 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. +func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return forward + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return forward + } + proxy, err := FromURL(proxyURL, forward) + if err != nil { + return forward + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, forward) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5", "socks5h": + addr := u.Hostname() + port := u.Port() + if port == "" { + port = "1080" + } + return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 000000000..c91651f96 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index 2f078f73a..fbcefb474 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -5,6 +5,7 @@ package externalaccount import ( + "bytes" "context" "crypto/hmac" "crypto/sha256" @@ -127,7 +128,7 @@ func canonicalHeaders(req *http.Request) (string, string) { } sort.Strings(headers) - var fullHeaders strings.Builder + var fullHeaders bytes.Buffer for _, header := range headers { headerValue := strings.Join(lowerCaseHeaders[header], ",") fullHeaders.WriteString(header) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 086d69411..007358af8 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -405,10 +405,11 @@ includes_SunOS=' #include #include #include +#include #include -#include #include #include +#include ' @@ -499,10 +500,10 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || - $2 ~ /^ICMP(V6)?_FILTER$/ || + $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || $2 == "SOMAXCONN" || $2 == "NAME_MAX" || $2 == "IFNAMSIZ" || diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 0326a6b3a..3df99f285 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1022,6 +1022,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 3b1b9287b..35de419c6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -974,6 +974,10 @@ const ( HUGETLBFS_MAGIC = 0x958458f6 IBSHIFT = 0x10 ICMPV6_FILTER = 0x1 + ICMPV6_FILTER_BLOCK = 0x1 + ICMPV6_FILTER_BLOCKOTHERS = 0x3 + ICMPV6_FILTER_PASS = 0x2 + ICMPV6_FILTER_PASSONLY = 0x4 ICMP_FILTER = 0x1 ICRNL = 0x100 IFA_F_DADFAILED = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 65fb2c5cd..1afee6a08 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -366,6 +366,7 @@ const ( HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 + ICMP6_FILTER = 0x1 ICRNL = 0x100 IEXTEN = 0x8000 IFF_ADDRCONF = 0x80000 @@ -612,6 +613,7 @@ const ( IP_RECVPKTINFO = 0x1a IP_RECVRETOPTS = 0x6 IP_RECVSLLA = 0xa + IP_RECVTOS = 0xc IP_RECVTTL = 0xb IP_RETOPTS = 0x8 IP_REUSEADDR = 0x104 @@ -704,6 +706,7 @@ const ( O_APPEND = 0x8 O_CLOEXEC = 0x800000 O_CREAT = 0x100 + O_DIRECT = 0x2000000 O_DIRECTORY = 0x1000000 O_DSYNC = 0x40 O_EXCL = 0x400 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index 4117ce08a..c8c790903 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -137,6 +137,7 @@ const ( IP_TTL = 3 IP_UNBLOCK_SOURCE = 11 ICANON = 0x0010 + ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 0e428ecbb..111c10d3a 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1334,7 +1334,11 @@ func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURIT } func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { - sdLen := (int)(selfRelativeSD.Length()) + sdLen := int(selfRelativeSD.Length()) + const min = int(unsafe.Sizeof(SECURITY_DESCRIPTOR{})) + if sdLen < min { + sdLen = min + } var src []byte h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) @@ -1342,7 +1346,15 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() h.Len = sdLen h.Cap = sdLen - dst := make([]byte, sdLen) + const psize = int(unsafe.Sizeof(uintptr(0))) + + var dst []byte + h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + alloc := make([]uintptr, (sdLen+psize-1)/psize) + h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data + h.Len = sdLen + h.Cap = sdLen + copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index e4c62289f..8a7392c4a 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.10 // +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index 02b9e1e9d..bb0a92001 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.10 // +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index d8c94e1bd..42fa8d72c 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 16b11db53..56a0e1ea2 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index 647f2d427..baacf32b4 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index c937d0976..f248effae 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index 0ca0193eb..f517fdb20 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index 26fbd55a1..f5a078827 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index 2c58f09ba..cb7239c43 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 7e1ae096e..11b273300 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 9ea1b4214..96a130d30 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 942906929..0175eae50 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package norm diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 50799301f..27e393d94 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -2400,6 +2400,11 @@ type BucketAccessControlsDeleteCall struct { // Delete: Permanently deletes the ACL entry for the specified entity on // the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2449,7 +2454,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2538,6 +2543,11 @@ type BucketAccessControlsGetCall struct { // Get: Returns the ACL entry for the specified entity on the specified // bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2597,7 +2607,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2715,6 +2725,8 @@ type BucketAccessControlsInsertCall struct { } // Insert: Creates a new ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2764,7 +2776,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2879,6 +2891,8 @@ type BucketAccessControlsListCall struct { } // List: Retrieves ACL entries on the specified bucket. +// +// - bucket: Name of a bucket. func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2937,7 +2951,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3048,6 +3062,11 @@ type BucketAccessControlsPatchCall struct { } // Patch: Patches an ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3098,7 +3117,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3222,6 +3241,11 @@ type BucketAccessControlsUpdateCall struct { } // Update: Updates an ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3272,7 +3296,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3394,6 +3418,8 @@ type BucketsDeleteCall struct { } // Delete: Permanently deletes an empty bucket. +// +// - bucket: Name of a bucket. func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3458,7 +3484,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3550,6 +3576,8 @@ type BucketsGetCall struct { } // Get: Returns metadata for the specified bucket. +// +// - bucket: Name of a bucket. func (r *BucketsService) Get(bucket string) *BucketsGetCall { c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3637,7 +3665,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3775,6 +3803,8 @@ type BucketsGetIamPolicyCall struct { } // GetIamPolicy: Returns an IAM policy for the specified bucket. +// +// - bucket: Name of a bucket. func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3843,7 +3873,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3959,6 +3989,8 @@ type BucketsInsertCall struct { } // Insert: Creates a new bucket. +// +// - project: A valid API project identifier. func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("project", projectid) @@ -4060,7 +4092,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4225,6 +4257,8 @@ type BucketsListCall struct { } // List: Retrieves a list of buckets for a given project. +// +// - project: A valid API project identifier. func (r *BucketsService) List(projectid string) *BucketsListCall { c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("project", projectid) @@ -4317,7 +4351,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4478,6 +4512,10 @@ type BucketsLockRetentionPolicyCall struct { } // LockRetentionPolicy: Locks retention policy on a bucket. +// +// - bucket: Name of a bucket. +// - ifMetagenerationMatch: Makes the operation conditional on whether +// bucket's current metageneration matches the given value. func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -4527,7 +4565,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4645,6 +4683,8 @@ type BucketsPatchCall struct { // Patch: Patches a bucket. Changes to the bucket will be readable // immediately after writing, but configuration changes may take time to // propagate. +// +// - bucket: Name of a bucket. func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -4762,7 +4802,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4942,6 +4982,8 @@ type BucketsSetIamPolicyCall struct { } // SetIamPolicy: Updates an IAM policy for the specified bucket. +// +// - bucket: Name of a bucket. func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -4991,7 +5033,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5107,6 +5149,9 @@ type BucketsTestIamPermissionsCall struct { // TestIamPermissions: Tests a set of permissions on the given bucket to // see which, if any, are held by the caller. +// +// - bucket: Name of a bucket. +// - permissions: Permissions to test. func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5166,7 +5211,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5289,6 +5334,8 @@ type BucketsUpdateCall struct { // Update: Updates a bucket. Changes to the bucket will be readable // immediately after writing, but configuration changes may take time to // propagate. +// +// - bucket: Name of a bucket. func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5406,7 +5453,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5618,7 +5665,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5686,6 +5733,11 @@ type DefaultObjectAccessControlsDeleteCall struct { // Delete: Permanently deletes the default object ACL entry for the // specified entity on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5735,7 +5787,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5824,6 +5876,11 @@ type DefaultObjectAccessControlsGetCall struct { // Get: Returns the default object ACL entry for the specified entity on // the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5883,7 +5940,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6002,6 +6059,8 @@ type DefaultObjectAccessControlsInsertCall struct { // Insert: Creates a new default object ACL entry on the specified // bucket. +// +// - bucket: Name of a bucket. func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6051,7 +6110,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6166,6 +6225,8 @@ type DefaultObjectAccessControlsListCall struct { } // List: Retrieves default object ACL entries on the specified bucket. +// +// - bucket: Name of a bucket. func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6241,7 +6302,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6364,6 +6425,11 @@ type DefaultObjectAccessControlsPatchCall struct { } // Patch: Patches a default object ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6414,7 +6480,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6538,6 +6604,11 @@ type DefaultObjectAccessControlsUpdateCall struct { } // Update: Updates a default object ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6588,7 +6659,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6711,6 +6782,9 @@ type NotificationsDeleteCall struct { } // Delete: Permanently deletes a notification subscription. +// +// - bucket: The parent bucket of the notification. +// - notification: ID of the notification to delete. func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6760,7 +6834,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6849,6 +6923,9 @@ type NotificationsGetCall struct { } // Get: View a notification configuration. +// +// - bucket: The parent bucket of the notification. +// - notification: Notification ID. func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6908,7 +6985,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7029,6 +7106,8 @@ type NotificationsInsertCall struct { } // Insert: Creates a notification subscription for a given bucket. +// +// - bucket: The parent bucket of the notification. func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7078,7 +7157,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7195,6 +7274,8 @@ type NotificationsListCall struct { // List: Retrieves a list of notification subscriptions for a given // bucket. +// +// - bucket: Name of a Google Cloud Storage bucket. func (r *NotificationsService) List(bucket string) *NotificationsListCall { c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7253,7 +7334,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7368,6 +7449,13 @@ type ObjectAccessControlsDeleteCall struct { // Delete: Permanently deletes the ACL entry for the specified entity on // the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7426,7 +7514,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7530,6 +7618,13 @@ type ObjectAccessControlsGetCall struct { // Get: Returns the ACL entry for the specified entity on the specified // object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7598,7 +7693,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7731,6 +7826,10 @@ type ObjectAccessControlsInsertCall struct { } // Insert: Creates a new ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7789,7 +7888,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7919,6 +8018,10 @@ type ObjectAccessControlsListCall struct { } // List: Retrieves ACL entries on the specified object. +// +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7986,7 +8089,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8112,6 +8215,13 @@ type ObjectAccessControlsPatchCall struct { } // Patch: Patches an ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8171,7 +8281,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8310,6 +8420,13 @@ type ObjectAccessControlsUpdateCall struct { } // Update: Updates an ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8369,7 +8486,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8508,6 +8625,12 @@ type ObjectsComposeCall struct { // Compose: Concatenates a list of existing objects into a new object in // the same bucket. +// +// - destinationBucket: Name of the bucket containing the source +// objects. The destination object is stored in this bucket. +// - destinationObject: Name of the new object. For information about +// how to URL encode object names to be path safe, see Encoding URI +// Path Parts. func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket @@ -8606,7 +8729,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8772,6 +8895,19 @@ type ObjectsCopyCall struct { // Copy: Copies a source object to a destination object. Optionally // overrides metadata. +// +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any.For information about how to URL encode object names to be path +// safe, see Encoding URI Path Parts. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts. func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -8949,7 +9085,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9184,6 +9320,10 @@ type ObjectsDeleteCall struct { // Delete: Deletes an object and its metadata. Deletions are permanent // if versioning is not enabled for the bucket, or if the generation // parameter is used. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9277,7 +9417,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9396,6 +9536,10 @@ type ObjectsGetCall struct { } // Get: Retrieves an object or its metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9510,7 +9654,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9693,6 +9837,10 @@ type ObjectsGetIamPolicyCall struct { } // GetIamPolicy: Returns an IAM policy for the specified object. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9760,7 +9908,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9888,6 +10036,9 @@ type ObjectsInsertCall struct { } // Insert: Stores a new object and metadata. +// +// - bucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any. func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10077,7 +10228,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10310,6 +10461,8 @@ type ObjectsListCall struct { } // List: Retrieves a list of objects matching the criteria. +// +// - bucket: Name of the bucket in which to look for objects. func (r *ObjectsService) List(bucket string) *ObjectsListCall { c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10450,7 +10603,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10642,6 +10795,10 @@ type ObjectsPatchCall struct { } // Patch: Patches an object's metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10767,7 +10924,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10958,6 +11115,19 @@ type ObjectsRewriteCall struct { // Rewrite: Rewrites a source object to a destination object. Optionally // overrides metadata. +// +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts. func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -11159,7 +11329,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11404,6 +11574,10 @@ type ObjectsSetIamPolicyCall struct { } // SetIamPolicy: Updates an IAM policy for the specified object. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11462,7 +11636,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11594,6 +11768,11 @@ type ObjectsTestIamPermissionsCall struct { // TestIamPermissions: Tests a set of permissions on the given object to // see which, if any, are held by the caller. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. +// - permissions: Permissions to test. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11662,7 +11841,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11798,6 +11977,10 @@ type ObjectsUpdateCall struct { } // Update: Updates an object's metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11923,7 +12106,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12110,6 +12293,8 @@ type ObjectsWatchAllCall struct { } // WatchAll: Watch for changes on all objects in a bucket. +// +// - bucket: Name of the bucket in which to look for objects. func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -12241,7 +12426,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12416,6 +12601,9 @@ type ProjectsHmacKeysCreateCall struct { } // Create: Creates a new HMAC key for the specified service account. +// +// - projectId: Project ID owning the service account. +// - serviceAccountEmail: Email address of the service account. func (r *ProjectsHmacKeysService) Create(projectId string, serviceAccountEmail string) *ProjectsHmacKeysCreateCall { c := &ProjectsHmacKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12457,7 +12645,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12566,6 +12754,9 @@ type ProjectsHmacKeysDeleteCall struct { } // Delete: Deletes an HMAC key. +// +// - accessId: Name of the HMAC key to be deleted. +// - projectId: Project ID owning the requested key. func (r *ProjectsHmacKeysService) Delete(projectId string, accessId string) *ProjectsHmacKeysDeleteCall { c := &ProjectsHmacKeysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12607,7 +12798,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12691,6 +12882,10 @@ type ProjectsHmacKeysGetCall struct { } // Get: Retrieves an HMAC key's metadata +// +// - accessId: Name of the HMAC key. +// - projectId: Project ID owning the service account of the requested +// key. func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12742,7 +12937,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12857,6 +13052,8 @@ type ProjectsHmacKeysListCall struct { } // List: Retrieves a list of HMAC keys matching the criteria. +// +// - projectId: Name of the project in which to look for HMAC keys. func (r *ProjectsHmacKeysService) List(projectId string) *ProjectsHmacKeysListCall { c := &ProjectsHmacKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12942,7 +13139,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13095,6 +13292,10 @@ type ProjectsHmacKeysUpdateCall struct { // Update: Updates the state of an HMAC key. See the HMAC Key resource // descriptor for valid states. +// +// - accessId: Name of the HMAC key being updated. +// - projectId: Project ID owning the service account of the updated +// key. func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -13137,7 +13338,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13256,6 +13457,8 @@ type ProjectsServiceAccountGetCall struct { // Get: Get the email address of this project's Google Cloud Storage // service account. +// +// - projectId: Project ID. func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -13314,7 +13517,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 788759bde..ab531f4c0 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -101,6 +101,9 @@ type SubConn interface { // a new connection will be created. // // This will trigger a state transition for the SubConn. + // + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -143,6 +146,13 @@ type ClientConn interface { // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has // changed. diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index e0d34288c..c883efa0b 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" + "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -41,7 +42,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]balancer.SubConn), + subConns: make(map[resolver.Address]subConnInfo), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -57,6 +58,11 @@ func (bb *baseBuilder) Name() string { return bb.name } +type subConnInfo struct { + subConn balancer.SubConn + attrs *attributes.Attributes +} + type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -64,7 +70,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn // `attributes` is stripped from the keys of this map (the addresses) + subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -114,7 +120,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { aNoAttrs := a aNoAttrs.Attributes = nil addrsSet[aNoAttrs] = struct{}{} - if sc, ok := b.subConns[aNoAttrs]; !ok { + if scInfo, ok := b.subConns[aNoAttrs]; !ok { // a is a new address (not existing in b.subConns). // // When creating SubConn, the original address with attributes is @@ -125,7 +131,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = sc + b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} b.scStates[sc] = connectivity.Idle sc.Connect() } else { @@ -135,13 +141,15 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // The SubConn does a reflect.DeepEqual of the new and old // addresses. So this is a noop if the current address is the same // as the old one (including attributes). - sc.UpdateAddresses([]resolver.Address{a}) + scInfo.attrs = a.Attributes + b.subConns[aNoAttrs] = scInfo + b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, sc := range b.subConns { + for a, scInfo := range b.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(sc) + b.cc.RemoveSubConn(scInfo.subConn) delete(b.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -184,9 +192,10 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[sc] = SubConnInfo{Address: addr} + for addr, scInfo := range b.subConns { + if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { + addr.Attributes = scInfo.attrs + readySCs[scInfo.subConn] = SubConnInfo{Address: addr} } } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 11e592aab..41061d6d3 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -163,6 +163,14 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.mu.Lock() defer ccb.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index cab74e557..b177cfa66 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -4,7 +4,7 @@ go 1.11 require ( github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 - github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.2 github.com/google/go-cmp v0.5.0 diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 77ee70b44..bb25cd491 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -12,8 +12,8 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index e69900400..5e7f36703 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -24,6 +24,7 @@ import ( "sync" "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -51,6 +52,74 @@ type RPCConfig struct { Context context.Context MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is unimplementable; do not use. +type ServerInterceptor interface { + notDefined() } type csKeyType string diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 8902b7f90..d5bbe720d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -414,6 +414,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { buf: newRecvBuffer(), headerChan: make(chan struct{}), contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -832,6 +833,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) // This will unblock write. close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } } // Close kicks off the shutdown process of the transport. This should be called diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0cf1cc320..7c6c89d4f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -26,6 +26,7 @@ import ( "io" "math" "net" + "net/http" "strconv" "sync" "sync/atomic" @@ -402,6 +403,20 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return true } t.maxStreamID = streamID + if state.data.httpMethod != http.MethodPost { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + } + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + s.cancel() + return false + } t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 7e41d1183..c7dee140c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -111,6 +111,7 @@ type parsedHeaderData struct { timeoutSet bool timeout time.Duration method string + httpMethod string // key-value metadata map from the peer. mdata map[string][]string statsTags []byte @@ -363,6 +364,8 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { } d.data.statsTrace = v d.addMetadata(f.Name, string(v)) + case ":method": + d.data.httpMethod = f.Value default: if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { break diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 9c8f79cb4..5cf7c5f80 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -241,6 +241,7 @@ type Stream struct { ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) method string // the associated RPC method of the stream recvCompress string @@ -611,6 +612,8 @@ type CallHdr struct { ContentSubtype string PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished } // ClientTransport is the common interface for all gRPC client-side transport diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 56e33f6c7..b858c2a5e 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -84,7 +84,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) b.sc.Connect() } else { - b.sc.UpdateAddresses(cs.ResolverState.Addresses) + b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) b.sc.Connect() } return nil diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index eda1248d6..77d25742c 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -166,7 +166,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - c := defaultCallInfo() // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. if err := cc.waitForResolvedAddrs(ctx); err != nil { @@ -175,18 +174,40 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - rpcConfig, err := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method}) - if err != nil { - return nil, status.Convert(err).Err() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + return nil, toRPCErr(err) + } + if rpcConfig != nil { if rpcConfig.Context != nil { ctx = rpcConfig.Context } mc = rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } } + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } @@ -223,6 +244,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth Host: cc.authority, Method: method, ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, } // Set our outgoing compression according to the UseCompressor CallOption, if diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 51024d6b3..c3b87eb5a 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.36.0" +const Version = "1.37.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index b41df6dc8..dcd939bb3 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -28,7 +28,8 @@ cleanup() { } trap cleanup EXIT -PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version if [[ "$1" = "-install" ]]; then # Check for module support @@ -107,7 +108,7 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" -golint ./... 2>&1 | not grep -vE "\.pb\.go:" +golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" go vet -all ./... misspell -error . @@ -141,8 +142,11 @@ not grep -Fv '.CredsBundle .NewAddress .NewServiceConfig .Type is deprecated: use Attributes +BuildVersion is deprecated balancer.ErrTransientFailure balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated grpc.CallCustomCodec grpc.Code grpc.Compressor @@ -164,13 +168,7 @@ grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier info.SecurityVersion -resolver.Backend -resolver.GRPCLB -extDesc.Filename is deprecated -BuildVersion is deprecated -github.com/golang/protobuf/jsonpb is deprecated proto is deprecated -xxx_messageInfo_ proto.InternalMessageInfo is deprecated proto.EnumName is deprecated proto.ErrInternalBadWireType is deprecated @@ -184,7 +182,12 @@ proto.RegisterExtension is deprecated proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated' "${SC_OUT}" +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" # - special golint on package comments. lint_package_comment_per_package() { diff --git a/vendor/modules.txt b/vendor/modules.txt index 5ec064c12..c94b9cd46 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,4 @@ -# cloud.google.com/go v0.80.0 -## explicit +# cloud.google.com/go v0.81.0 cloud.google.com/go cloud.google.com/go/compute/metadata cloud.google.com/go/iam @@ -27,7 +26,7 @@ github.com/VictoriaMetrics/metricsql github.com/VictoriaMetrics/metricsql/binaryop # github.com/VividCortex/ewma v1.1.1 github.com/VividCortex/ewma -# github.com/aws/aws-sdk-go v1.38.5 +# github.com/aws/aws-sdk-go v1.38.15 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -101,9 +100,10 @@ github.com/go-kit/kit/log github.com/go-kit/kit/log/level # github.com/go-logfmt/logfmt v0.5.0 github.com/go-logfmt/logfmt -# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e +# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da +## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.1 +# github.com/golang/protobuf v1.5.2 github.com/golang/protobuf/internal/gengogrpc github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go @@ -141,7 +141,7 @@ github.com/klauspost/compress/zstd/internal/xxhash github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.12 github.com/mattn/go-isatty -# github.com/mattn/go-runewidth v0.0.10 +# github.com/mattn/go-runewidth v0.0.12 ## explicit github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.1 @@ -239,7 +239,7 @@ golang.org/x/lint/golint ## explicit golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20210324205630-d1beb07c2056 +# golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 ## explicit golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -247,9 +247,11 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries +golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 +# golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 ## explicit golang.org/x/oauth2 golang.org/x/oauth2/google @@ -259,13 +261,14 @@ golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20210324051608-47abb6519492 +# golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 ## explicit golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.3.5 +# golang.org/x/text v0.3.6 +## explicit golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi @@ -286,7 +289,7 @@ golang.org/x/tools/internal/imports # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.43.0 +# google.golang.org/api v0.44.0 ## explicit google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -313,14 +316,15 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14 +# google.golang.org/genproto v0.0.0-20210406143921-e86de6bf7a46 ## explicit google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr -# google.golang.org/grpc v1.36.0 +# google.golang.org/grpc v1.37.0 +## explicit google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff