diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 812d26f60..000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,172 +0,0 @@ -# CHANGELOG - -# tip - -* FEATURE: vmagent: add `-promscrape.dropOriginalLabels` command-line option, which can be used for reducing memory usage when scraping big number of targets. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-724308361 for details. - - -# [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0) - -* FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label//values` when `start` and `end` args are set. -* FEATURE: reduce memory usage when query touches big number of time series. -* FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thouthands) and the majority of these targets (99%) - are dropped during relabeling. Previously labels for all the dropped targets were displayed at `/api/v1/targets` page. Now only up to `-promscrape.maxDroppedTargets` such - targets are displayed. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/878 for details. -* FEATURE: vmagent: reduce memory usage when scraping big number of targets with big number of temporary labels starting with `__`. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 -* FEATURE: vmagent: add `/ready` HTTP endpoint, which returns 200 OK status code when all the service discovery has been initialized. - This may be useful during rolling upgrades. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/875 - -* BUGFIX: vmagent: eliminate data race when `-promscrape.streamParse` command-line is set. Previously this mode could result in scraped metrics with garbage labels. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247 for details. -* BUGFIX: properly calculate `topk_*` and `bottomk_*` functions from [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) for time series with gaps. - See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/883 - - -# [v1.45.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.45.0) - -* FEATURE: allow setting `-retentionPeriod` smaller than one month. I.e. `-retentionPeriod=3d`, `-retentionPeriod=2w`, etc. is supported now. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/173 -* FEATURE: optimize more cases according to https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization . Now the following cases are optimized too: - * `rollup_func(foo{filters}[d]) op bar` -> `rollup_func(foo{filters}[d]) op bar{filters}` - * `transform_func(foo{filters}) op bar` -> `transform_func(foo{filters}) op bar{filters}` - * `num_or_scalar op foo{filters} op bar` -> `num_or_scalar op foo{filters} op bar{filters}` -* FEATURE: improve time series search for queries with multiple label filters. I.e. `foo{label1="value", label2=~"regexp"}`. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/781 -* FEATURE: vmagent: add `stream parse` mode. This mode allows reducing memory usage when individual scrape targets expose tens of millions of metrics. - For example, during scraping Prometheus in [federation](https://prometheus.io/docs/prometheus/latest/federation/) mode. - See `-promscrape.streamParse` command-line option and `stream_parse: true` config option for `scrape_config` section in `-promscrape.config`. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 and [troubleshooting docs for vmagent](https://victoriametrics.github.io/vmagent.html#troubleshooting). -* FEATURE: vmalert: add `-dryRun` command-line option for validating the provided config files without the need to start `vmalert` service. -* FEATURE: accept optional third argument of string type at `topk_*` and `bottomk_*` functions. This is label name for additional time series to return with the sum of time series outside top/bottom K. See [MetricsQL docs](https://victoriametrics.github.io/MetricsQL.html) for more details. -* FEATURE: vmagent: expose `/api/v1/targets` page according to [the corresponding Prometheus API](https://prometheus.io/docs/prometheus/latest/querying/api/#targets). - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/643 - -* BUGFIX: vmagent: properly handle OpenStack endpoint ending with `v3.0` such as `https://ostack.example.com:5000/v3.0` - in the same way as Prometheus does. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/728#issuecomment-709914803 -* BUGFIX: drop trailing data points for time series with a single raw sample. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/748 -* BUGFIX: do not drop trailing data points for instant queries to `/api/v1/query`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/845 -* BUGFIX: vmbackup: fix panic when `-origin` isn't specified. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/856 -* BUGFIX: vmalert: skip automatically added labels on alerts restore. Label `alertgroup` was introduced in [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/611) - and automatically added to generated time series. By mistake, this new label wasn't correctly purged on restore event and affected alert's ID uniqueness. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/870 -* BUGFIX: vmagent: fix panic at scrape error body formating. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/864 -* BUGFIX: vmagent: add leading missing slash to metrics path like Prometheus does. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/835 -* BUGFIX: vmagent: drop packet if remote storage returns 4xx status code. This make the behaviour consistent with Prometheus. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/873 -* BUGFIX: vmagent: properly handle 301 redirects. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/869 - - -# [v1.44.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.44.0) - -* FEATURE: automatically add missing label filters to binary operands as described at https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization . - This should improve performance for queries with missing label filters in binary operands. For example, the following query should work faster now, because it shouldn't - fetch and discard time series for `node_filesystem_files_free` metric without matching labels for the left side of the expression: - ``` - node_filesystem_files{ host="$host", mountpoint="/" } - node_filesystem_files_free - ``` -* FEATURE: vmagent: add Docker Swarm service discovery (aka [dockerswarm_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config)). - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/656 -* FEATURE: add ability to export data in CSV format. See [these docs](https://victoriametrics.github.io/#how-to-export-csv-data) for details. -* FEATURE: vmagent: add `-promscrape.suppressDuplicateScrapeTargetErrors` command-line flag for suppressing `duplicate scrape target` errors. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/651 and https://victoriametrics.github.io/vmagent.html#troubleshooting . -* FEATURE: vmagent: show original labels before relabeling is applied on `duplicate scrape target` errors. This should simplify debugging for incorrect relabeling. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/651 -* FEATURE: vmagent: `/targets` page now accepts optional `show_original_labels=1` query arg for displaying original labels for each target before relabeling is applied. - This should simplify debugging for target relabeling configs. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/651 -* FEATURE: add `-finalMergeDelay` command-line flag for configuring the delay before final merge for per-month partitions. - The final merge is started after no new data is ingested into per-month partition during `-finalMergeDelay`. -* FEATURE: add `vm_rows_added_to_storage_total` metric, which shows the total number of rows added to storage since app start. - The `sum(rate(vm_rows_added_to_storage_total))` can be smaller than `sum(rate(vm_rows_inserted_total))` if certain metrics are dropped - due to [relabeling](https://victoriametrics.github.io/#relabeling). The `sum(rate(vm_rows_added_to_storage_total))` can be bigger - than `sum(rate(vm_rows_inserted_total))` if [replication](https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#replication-and-data-safety) is enabled. -* FEATURE: keep metric name after applying [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) functions, which don't change time series meaning. - The list of such functions: - * `keep_last_value` - * `keep_next_value` - * `interpolate` - * `running_min` - * `running_max` - * `running_avg` - * `range_min` - * `range_max` - * `range_avg` - * `range_first` - * `range_last` - * `range_quantile` - * `smooth_exponential` - * `ceil` - * `floor` - * `round` - * `clamp_min` - * `clamp_max` - * `max_over_time` - * `min_over_time` - * `avg_over_time` - * `quantile_over_time` - * `mode_over_time` - * `geomean_over_time` - * `holt_winters` - * `predict_linear` - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/674 - -* BUGFIX: properly handle stale time series after K8S deployment. Previously such time series could be double-counted. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/748 -* BUGFIX: return a single time series at max from `absent()` function like Prometheus does. -* BUGFIX: vmalert: accept days, weeks and years in `for: ` part of config like Prometheus does. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/817 -* BUGFIX: fix `mode_over_time(m[d])` calculations. Previously the function could return incorrect results. - - -# [v1.43.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.43.0) - -* FEATURE: reduce CPU usage for repeated queries over sliding time window when no new time series are added to the database. - Typical use cases: repeated evaluation of alerting rules in [vmalert](https://victoriametrics.github.io/vmalert.html) or dashboard auto-refresh in Grafana. -* FEATURE: vmagent: add OpenStack service discovery aka [openstack_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config). - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/728 . -* FEATURE: vmalert: make `-maxIdleConnections` configurable for datasource HTTP client. This option can be used for minimizing connection churn. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/795 . -* FEATURE: add `-influx.maxLineSize` command-line flag for configuring the maximum size for a single Influx line during parsing. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/807 - -* BUGFIX: properly handle `inf` values during [background merge of LSM parts](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). - Previously `Inf` values could result in `NaN` values for adjancent samples in time series. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/805 . -* BUGFIX: fill gaps on graphs for `range_*` and `running_*` functions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/806 . -* BUGFIX: make a copy of label with new name during relabeling with `action: labelmap` in the same way as Prometheus does. - Previously the original label name has been replaced. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/812 . -* BUGFIX: support parsing floating-point timestamp like Graphite Carbon does. Such timestmaps are truncated to seconds. - - -# [v1.42.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0) - -* FEATURE: use all the available CPU cores when accepting data via a single TCP connection - for [all the supported protocols](https://victoriametrics.github.io/#how-to-import-time-series-data). - Previously data ingested via a single TCP connection could use only a single CPU core. This could limit data ingestion performance. - The main benefit of this feature is that data can be imported at max speed via a single connection - there is no need to open multiple concurrent - connections to VictoriaMetrics or [vmagent](https://victoriametrics.github.io/vmagent.html) in order to achieve the maximum data ingestion speed. -* FEATURE: cluster: improve performance for data ingestion path from `vminsert` to `vmstorage` nodes. The maximum data ingestion performance - for a single connection between `vminsert` and `vmstorage` node scales with the number of available CPU cores on `vmstorage` side. - This should help with https://github.com/VictoriaMetrics/VictoriaMetrics/issues/791 . -* FEATURE: add ability to export / import data in native format via `/api/v1/export/native` and `/api/v1/import/native`. - This is the most optimized approach for data migration between VictoriaMetrics instances. Both single-node and cluster instances are supported. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/787#issuecomment-700632551 . -* FEATURE: add `reduce_mem_usage` query option to `/api/v1/export` in order to reduce memory usage during data export / import. - See [these docs](https://victoriametrics.github.io/#how-to-export-data-in-json-line-format) for details. -* FEATURE: improve performance for `/api/v1/series` handler when it returns big number of time series. -* FEATURE: add `vm_merge_need_free_disk_space` metric, which can be used for estimating the number of deferred background data merges due to the lack of free disk space. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/686 . -* FEATURE: add OpenBSD support. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/785 . - -* BUGFIX: properly apply `-search.maxStalenessInterval` command-line flag value. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/784 . -* BUGFIX: fix displaying data in Grafana tables. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/720 . -* BUGFIX: do not adjust the number of detected CPU cores found at `/sys/devices/system/cpu/online`. - The adjustement was increasing the resulting GOMAXPROC by 1, which looked confusing to users. - See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685#issuecomment-698595309 . -* BUGFIX: vmagent: do not show `-remoteWrite.url` in initial logs if `-remoteWrite.showURL` isn't set. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/773 . -* BUGFIX: properly handle case when [/metrics/find](https://victoriametrics.github.io/#graphite-metrics-api-usage) finds both a leaf and a node for the given `query=prefix.*`. - In this case only the node must be returned with stripped dot in the end of id as carbonapi does. - - -# Previous releases - -See [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). diff --git a/Makefile b/Makefile index 946631778..3257e7777 100644 --- a/Makefile +++ b/Makefile @@ -122,8 +122,8 @@ benchmark-pure: GO111MODULE=on CGO_ENABLED=0 go test -mod=vendor -bench=. ./app/... vendor-update: - GO111MODULE=on go get -u ./lib/... - GO111MODULE=on go get -u ./app/... + GO111MODULE=on go get -u -d ./lib/... + GO111MODULE=on go get -u -d ./app/... GO111MODULE=on go mod tidy GO111MODULE=on go mod vendor @@ -156,4 +156,3 @@ docs-sync: cp app/vmbackup/README.md docs/vmbackup.md cp app/vmrestore/README.md docs/vmrestore.md cp README.md docs/Single-server-VictoriaMetrics.md - cp CHANGELOG.md docs/ diff --git a/README.md b/README.md index 3bd12a676..f581651eb 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ ## VictoriaMetrics -VictoriaMetrics is fast, cost-effective and scalable time-series database. +VictoriaMetrics is fast, cost-effective and scalable monitoring solution and time series database. It is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), [docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and @@ -21,11 +21,13 @@ Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaM See our [Wiki](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki) for additional documentation. [Contact us](mailto:info@victoriametrics.com) if you need paid enterprise support for VictoriaMetrics. -See [features available for enterprise customers](https://github.com/VictoriaMetrics/VictoriaMetrics/issues?q=is%3Aissue+label%3Aenterprise). +See [features available for enterprise customers](https://victoriametrics.com/enterprise.html). ## Case studies and talks +Click on a link in order to read the corresponding case study + * [Adidas](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#adidas) * [CERN](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#cern) * [COLOPL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#colopl) @@ -46,8 +48,8 @@ See [features available for enterprise customers](https://github.com/VictoriaMet * VictoriaMetrics can be used as long-term storage for Prometheus or for [vmagent](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmagent/README.md). See [these docs](#prometheus-setup) for details. * Supports [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/), so it can be used as Prometheus drop-in replacement in Grafana. - VictoriaMetrics implements [MetricsQL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL) query language, which is inspired by PromQL. -* Supports global query view. Multiple Prometheus instances may write data into VictoriaMetrics. Later this data may be used in a single query. + VictoriaMetrics implements [MetricsQL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL) query language, which inspired by PromQL. MetricsQL is backwards-compatible with PromQL. +* Supports global query view. Multiple Prometheus instances or any other data sources may write data into VictoriaMetrics. Later this data may be queried in a single query. * High performance and good scalability for both [inserts](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and [selects](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4). [Outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae). @@ -104,7 +106,9 @@ See [features available for enterprise customers](https://github.com/VictoriaMet * [How to send data from OpenTSDB-compatible agents](#how-to-send-data-from-opentsdb-compatible-agents) * [Prometheus querying API usage](#prometheus-querying-api-usage) * [Prometheus querying API enhancements](#prometheus-querying-api-enhancements) -* [Graphite Metrics API usage](#graphite-metrics-api-usage) +* [Graphite API usage](#graphite-api-usage) + * [Graphite Metrics API usage](#graphite-metrics-api-usage) + * [Graphite Tags API usage](#graphite-tags-api-usage) * [How to build from sources](#how-to-build-from-sources) * [Development build](#development-build) * [Production build](#production-build) @@ -410,6 +414,7 @@ Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via t * [Prometheus querying API](#prometheus-querying-api-usage) * Metric names can be explored via [Graphite metrics API](#graphite-metrics-api-usage) +* Tags can be explored via [Graphite tags API](#graphite-tags-api-usage) * [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/master/cmd/carbonapi/carbonapi.example.prometheus.yaml) ### How to send data from OpenTSDB-compatible agents @@ -495,7 +500,9 @@ VictoriaMetrics supports the following handlers from [Prometheus querying API](h * [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers) * [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names) * [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values) -* [/api/v1/status/tsdb](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats) +* [/api/v1/status/tsdb](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats). VictoriaMetrics accepts optional `topN=N` and `date=YYYY-MM-DD` + query args for this handler, where `N` is the number of top entries to return in the response and `YYYY-MM-DD` is the date for collecting the stats. + By default top 10 entries are returned and the stats is collected for the current day. * [/api/v1/targets](https://prometheus.io/docs/prometheus/latest/querying/api/#targets) - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for more details. These handlers can be queried from Prometheus-compatible clients such as Grafana or curl. @@ -522,7 +529,15 @@ Additionally VictoriaMetrics provides the following handlers: * `/api/v1/status/active_queries` - it returns a list of currently running queries. -### Graphite Metrics API usage +### Graphite API usage + +VictoriaMetrics supports the following Graphite APIs: + +* Metrics API - see [these docs](#graphite-metrics-api-usage). +* Tags API - see [these docs](#graphite-tags-api-usage). + + +#### Graphite Metrics API usage VictoriaMetrics supports the following handlers from [Graphite Metrics API](https://graphite-api.readthedocs.io/en/latest/api.html#the-metrics-api): @@ -536,6 +551,19 @@ VictoriaMetrics accepts the following additional query args at `/metrics/find` a that start with `node_`. By default `delimiter=.`. +#### Graphite Tags API usage + +VictoriaMetrics supports the following handlers from [Graphite Tags API](https://graphite.readthedocs.io/en/stable/tags.html): + +* [/tags/tagSeries](https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb) +* [/tags/tagMultiSeries](https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb) +* [/tags](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags) +* [/tags/tag_name](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags) +* [/tags/findSeries](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags) +* [/tags/autoComplete/tags](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support) +* [/tags/autoComplete/values](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support) + + ### How to build from sources We recommend using either [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or @@ -695,7 +723,16 @@ VictoriaMetrics provides the following handlers for exporting data: Send a request to `http://:8428/api/v1/export/native?match[]=`, where `` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) -for metrics to export. Use `{__name__!=""}` selector for fetching all the time series. +for metrics to export. Use `{__name__=~".*"}` selector for fetching all the time series. + +On large databases you may experience problems with limit on unique timeseries (default value is 300000). In this case you need to adjust `-search.maxUniqueTimeseries` parameter: + +```bash +# count unique timeseries in database +wget -O- -q 'http://your_victoriametrics_instance:8428/api/v1/series/count' | jq '.data[0]' + +# relaunch victoriametrics with search.maxUniqueTimeseries more than value from previous command +``` Optional `start` and `end` args may be added to the request in order to limit the time frame for the exported data. These args may contain either unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values. @@ -1196,13 +1233,18 @@ VictoriaMetrics also exposes currently running queries with their execution time * It is recommended inspecting logs during troubleshooting, since they may contain useful information. +* VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage. + This may lead to the following "issues": + * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage + by requesting `/internal/force_flush` http handler. + * The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset). + See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704). + * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, then it is likely you have too many active time series for the current amount of RAM. VictoriaMetrics [exposes](#monitoring) `vm_slow_*` metrics, which could be used as an indicator of low amounts of RAM. It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve ingestion and query performance in this case. - Another option is to increase `-memory.allowedPercent` command-line flag value. Be careful with this - option, since too big value for `-memory.allowedPercent` may result in high I/O usage. * VictoriaMetrics prioritizes data ingestion over data querying. So if it has no enough resources for data ingestion, then data querying may slow down significantly. @@ -1217,9 +1259,9 @@ VictoriaMetrics also exposes currently running queries with their execution time which would start background merge if they had more free disk space. * If VictoriaMetrics doesn't work because of certain parts are corrupted due to disk errors, - then just remove directories with broken parts. This will recover VictoriaMetrics at the cost - of data loss stored in the broken parts. In the future, `vmrecover` tool will be created - for automatic recovering from such errors. + then just remove directories with broken parts. It is safe removing subdirectories under `<-storageDataPath>/data/{big,small}/YYYY_MM` directories + when VictoriaMetrics isn't running. This recovers VictoriaMetrics at the cost of data loss stored in the deleted broken parts. + In the future, `vmrecover` tool will be created for automatic recovering from such errors. * If you see gaps on the graphs, try resetting the cache by sending request to `/internal/resetRollupResultCache`. If this removes gaps on the graphs, then it is likely data with timestamps older than `-search.cacheTimestampOffset` @@ -1241,6 +1283,11 @@ VictoriaMetrics also exposes currently running queries with their execution time This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total` metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload. +* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then `-search.treatDotsAsIsInRegexps` command-line flag could be useful. + By default `.` chars in regexps match any char. If you need matching only dots, then the `\\.` must be used in regexp filters. + When `-search.treatDotsAsIsInRegexps` option is enabled, then dots in regexps are automatically escaped in order to match only dots instead of arbitrary chars. + This may significantly increase performance when locating time series for the given label filters. + * VictoriaMetrics ignores `NaN` values during data ingestion. diff --git a/app/vmagent/README.md b/app/vmagent/README.md index b21928d40..d9ad2772e 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -63,6 +63,22 @@ Then send Influx data to `http://vmagent-host:8429`. See [these docs](https://gi Pass `-help` to `vmagent` in order to see the full list of supported command-line flags with their descriptions. +### Configuration update + +`vmagent` should be restarted in order to update config options set via command-line args. + +`vmagent` supports multiple approaches for reloading configs from updated config files such as `-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`: + +* Sending `SUGHUP` signal to `vmagent` process: + ```bash + kill -SIGHUP `pidof vmagent` + ``` + +* Sending HTTP request to `http://vmagent:8429/-/reload` endpoint. + +There is also `-promscrape.configCheckInterval` command-line option, which can be used for automatic reloading configs from updated `-promscrape.config` file. + + ### Use cases @@ -197,6 +213,7 @@ The relabeling can be defined in the following places: Read more about relabeling in the following articles: +* [How to use Relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2) * [Life of a label](https://www.robustperception.io/life-of-a-label) * [Discarding targets and timeseries with relabeling](https://www.robustperception.io/relabelling-can-discard-targets-timeseries-and-alerts) * [Dropping labels at scrape time](https://www.robustperception.io/dropping-metrics-at-scrape-time-with-prometheus) diff --git a/app/vmagent/main.go b/app/vmagent/main.go index 4d2b7acb5..04c6cd8b2 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -208,13 +208,13 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool { return true case "/targets": promscrapeTargetsRequests.Inc() - w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Type", "text/plain; charset=utf-8") showOriginalLabels, _ := strconv.ParseBool(r.FormValue("show_original_labels")) promscrape.WriteHumanReadableTargetsStatus(w, showOriginalLabels) return true case "/api/v1/targets": promscrapeAPIV1TargetsRequests.Inc() - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") state := r.FormValue("state") promscrape.WriteAPIV1Targets(w, state) return true @@ -228,7 +228,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool { errMsg := fmt.Sprintf("waiting for scrapes to init, left: %d", rdy) http.Error(w, errMsg, http.StatusTooEarly) } else { - w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusOK) w.Write([]byte("OK")) } diff --git a/app/vmalert/datasource/vm.go b/app/vmalert/datasource/vm.go index bf57a04be..7e0bc4ce4 100644 --- a/app/vmalert/datasource/vm.go +++ b/app/vmalert/datasource/vm.go @@ -82,7 +82,7 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) { if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Type", "application/json; charset=utf-8") if s.basicAuthPass != "" { req.SetBasicAuth(s.basicAuthUser, s.basicAuthPass) } diff --git a/app/vmalert/notifier/alertmanager.go b/app/vmalert/notifier/alertmanager.go index f7dd03615..a24f5f723 100644 --- a/app/vmalert/notifier/alertmanager.go +++ b/app/vmalert/notifier/alertmanager.go @@ -28,7 +28,7 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error { if err != nil { return err } - req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Type", "application/json; charset=utf-8") req = req.WithContext(ctx) if am.basicAuthPass != "" { req.SetBasicAuth(am.basicAuthUser, am.basicAuthPass) diff --git a/app/vmalert/web.go b/app/vmalert/web.go index dbd90fa9a..a095356d9 100644 --- a/app/vmalert/web.go +++ b/app/vmalert/web.go @@ -40,7 +40,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool { httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) return true } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Write(data) return true case "/api/v1/alerts": @@ -49,7 +49,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool { httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) return true } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Write(data) return true case "/-/reload": @@ -67,7 +67,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool { httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) return true } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Write(data) return true } diff --git a/app/vminsert/graphite/tags.go b/app/vminsert/graphite/tags.go new file mode 100644 index 000000000..fe49fcebf --- /dev/null +++ b/app/vminsert/graphite/tags.go @@ -0,0 +1,102 @@ +package graphite + +import ( + "fmt" + "net/http" + "sort" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" + graphiteparser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" + "github.com/VictoriaMetrics/metrics" +) + +// TagsTagSeriesHandler implements /tags/tagSeries handler. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb +func TagsTagSeriesHandler(w http.ResponseWriter, r *http.Request) error { + return registerMetrics(w, r, false) +} + +// TagsTagMultiSeriesHandler implements /tags/tagMultiSeries handler. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb +func TagsTagMultiSeriesHandler(w http.ResponseWriter, r *http.Request) error { + return registerMetrics(w, r, true) +} + +func registerMetrics(w http.ResponseWriter, r *http.Request, isJSONResponse bool) error { + startTime := time.Now() + if err := r.ParseForm(); err != nil { + return fmt.Errorf("cannot parse form values: %w", err) + } + paths := r.Form["path"] + var row graphiteparser.Row + var labels []prompb.Label + var b []byte + var tagsPool []graphiteparser.Tag + mrs := make([]storage.MetricRow, len(paths)) + ct := time.Now().UnixNano() / 1e6 + canonicalPaths := make([]string, len(paths)) + for i, path := range paths { + var err error + tagsPool, err = row.UnmarshalMetricAndTags(path, tagsPool[:0]) + if err != nil { + return fmt.Errorf("cannot parse path=%q: %w", path, err) + } + + // Construct canonical path according to https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb + sort.Slice(row.Tags, func(i, j int) bool { + return row.Tags[i].Key < row.Tags[j].Key + }) + b = append(b[:0], row.Metric...) + for _, tag := range row.Tags { + b = append(b, ';') + b = append(b, tag.Key...) + b = append(b, '=') + b = append(b, tag.Value...) + } + canonicalPaths[i] = string(b) + + // Convert parsed metric and tags to labels. + labels = append(labels[:0], prompb.Label{ + Name: []byte("__name__"), + Value: []byte(row.Metric), + }) + for _, tag := range row.Tags { + labels = append(labels, prompb.Label{ + Name: []byte(tag.Key), + Value: []byte(tag.Value), + }) + } + + // Put labels with the current timestamp to MetricRow + mr := &mrs[i] + mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], labels) + mr.Timestamp = ct + } + if err := vmstorage.RegisterMetricNames(mrs); err != nil { + return fmt.Errorf("cannot register paths: %w", err) + } + + // Return response + contentType := "text/plain; charset=utf-8" + if isJSONResponse { + contentType = "application/json; charset=utf-8" + } + w.Header().Set("Content-Type", contentType) + WriteTagsTagMultiSeriesResponse(w, canonicalPaths, isJSONResponse) + if isJSONResponse { + tagsTagMultiSeriesDuration.UpdateDuration(startTime) + } else { + tagsTagSeriesDuration.UpdateDuration(startTime) + } + return nil +} + +var ( + tagsTagSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/tagSeries"}`) + tagsTagMultiSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/tagMultiSeries"}`) +) diff --git a/app/vminsert/graphite/tags_tag_multi_series_response.qtpl b/app/vminsert/graphite/tags_tag_multi_series_response.qtpl new file mode 100644 index 000000000..9491cf1d9 --- /dev/null +++ b/app/vminsert/graphite/tags_tag_multi_series_response.qtpl @@ -0,0 +1,14 @@ +{% stripspace %} + +TagsTagMultiSeriesResponse generates response for /tags/tagMultiSeries . +See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb +{% func TagsTagMultiSeriesResponse(canonicalPaths []string, isJSONResponse bool) %} + {% if isJSONResponse %}[{% endif %} + {% for i, path := range canonicalPaths %} + {%q= path %} + {% if i+1 < len(canonicalPaths) %},{% endif %} + {% endfor %} + {% if isJSONResponse %}]{% endif %} +{% endfunc %} + +{% endstripspace %} diff --git a/app/vminsert/graphite/tags_tag_multi_series_response.qtpl.go b/app/vminsert/graphite/tags_tag_multi_series_response.qtpl.go new file mode 100644 index 000000000..773a6ce4e --- /dev/null +++ b/app/vminsert/graphite/tags_tag_multi_series_response.qtpl.go @@ -0,0 +1,75 @@ +// Code generated by qtc from "tags_tag_multi_series_response.qtpl". DO NOT EDIT. +// See https://github.com/valyala/quicktemplate for details. + +// TagsTagMultiSeriesResponse generates response for /tags/tagMultiSeries .See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb + +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:5 +package graphite + +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:5 +import ( + qtio422016 "io" + + qt422016 "github.com/valyala/quicktemplate" +) + +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:5 +var ( + _ = qtio422016.Copy + _ = qt422016.AcquireByteBuffer +) + +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:5 +func StreamTagsTagMultiSeriesResponse(qw422016 *qt422016.Writer, canonicalPaths []string, isJSONResponse bool) { +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:6 + if isJSONResponse { +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:6 + qw422016.N().S(`[`) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:6 + } +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:7 + for i, path := range canonicalPaths { +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:8 + qw422016.N().Q(path) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:9 + if i+1 < len(canonicalPaths) { +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:9 + qw422016.N().S(`,`) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:9 + } +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:10 + } +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:11 + if isJSONResponse { +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:11 + qw422016.N().S(`]`) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:11 + } +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 +} + +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 +func WriteTagsTagMultiSeriesResponse(qq422016 qtio422016.Writer, canonicalPaths []string, isJSONResponse bool) { +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 + qw422016 := qt422016.AcquireWriter(qq422016) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 + StreamTagsTagMultiSeriesResponse(qw422016, canonicalPaths, isJSONResponse) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 + qt422016.ReleaseWriter(qw422016) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 +} + +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 +func TagsTagMultiSeriesResponse(canonicalPaths []string, isJSONResponse bool) string { +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 + qb422016 := qt422016.AcquireByteBuffer() +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 + WriteTagsTagMultiSeriesResponse(qb422016, canonicalPaths, isJSONResponse) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 + qs422016 := string(qb422016.B) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 + qt422016.ReleaseByteBuffer(qb422016) +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 + return qs422016 +//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 +} diff --git a/app/vminsert/main.go b/app/vminsert/main.go index 1ab37655d..1716b1c4e 100644 --- a/app/vminsert/main.go +++ b/app/vminsert/main.go @@ -153,15 +153,31 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { influxQueryRequests.Inc() fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`) return true + case "/tags/tagSeries": + graphiteTagsTagSeriesRequests.Inc() + if err := graphite.TagsTagSeriesHandler(w, r); err != nil { + graphiteTagsTagSeriesErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true + case "/tags/tagMultiSeries": + graphiteTagsTagMultiSeriesRequests.Inc() + if err := graphite.TagsTagMultiSeriesHandler(w, r); err != nil { + graphiteTagsTagMultiSeriesErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true case "/targets": promscrapeTargetsRequests.Inc() - w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Type", "text/plain; charset=utf-8") showOriginalLabels, _ := strconv.ParseBool(r.FormValue("show_original_labels")) promscrape.WriteHumanReadableTargetsStatus(w, showOriginalLabels) return true case "/api/v1/targets": promscrapeAPIV1TargetsRequests.Inc() - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") state := r.FormValue("state") promscrape.WriteAPIV1Targets(w, state) return true @@ -175,7 +191,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { errMsg := fmt.Sprintf("waiting for scrape config to init targets, configs left: %d", rdy) http.Error(w, errMsg, http.StatusTooEarly) } else { - w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusOK) w.Write([]byte("OK")) } @@ -207,6 +223,12 @@ var ( influxQueryRequests = metrics.NewCounter(`vm_http_requests_total{path="/query", protocol="influx"}`) + graphiteTagsTagSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/tagSeries", protocol="graphite"}`) + graphiteTagsTagSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/tagSeries", protocol="graphite"}`) + + graphiteTagsTagMultiSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/tagMultiSeries", protocol="graphite"}`) + graphiteTagsTagMultiSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/tagMultiSeries", protocol="graphite"}`) + promscrapeTargetsRequests = metrics.NewCounter(`vm_http_requests_total{path="/targets"}`) promscrapeAPIV1TargetsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/targets"}`) diff --git a/app/vmselect/graphite/graphite.go b/app/vmselect/graphite/metrics_api.go similarity index 97% rename from app/vmselect/graphite/graphite.go rename to app/vmselect/graphite/metrics_api.go index ac736fa51..3eb3e7f11 100644 --- a/app/vmselect/graphite/graphite.go +++ b/app/vmselect/graphite/metrics_api.go @@ -84,10 +84,7 @@ func MetricsFindHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ } paths = deduplicatePaths(paths, delimiter) sortPaths(paths, delimiter) - contentType := "application/json" - if jsonp != "" { - contentType = "text/javascript" - } + contentType := getContentType(jsonp) w.Header().Set("Content-Type", contentType) bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) @@ -166,10 +163,7 @@ func MetricsExpandHandler(startTime time.Time, w http.ResponseWriter, r *http.Re } m[query] = paths } - contentType := "application/json" - if jsonp != "" { - contentType = "text/javascript" - } + contentType := getContentType(jsonp) w.Header().Set("Content-Type", contentType) if groupByExpr { for _, paths := range m { @@ -215,10 +209,7 @@ func MetricsIndexHandler(startTime time.Time, w http.ResponseWriter, r *http.Req if err != nil { return fmt.Errorf(`cannot obtain metric names: %w`, err) } - contentType := "application/json" - if jsonp != "" { - contentType = "text/javascript" - } + contentType := getContentType(jsonp) w.Header().Set("Content-Type", contentType) bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) @@ -417,3 +408,10 @@ var regexpCache = make(map[regexpCacheKey]*regexpCacheEntry) var regexpCacheLock sync.Mutex const maxRegexpCacheSize = 10000 + +func getContentType(jsonp string) string { + if jsonp == "" { + return "application/json; charset=utf-8" + } + return "text/javascript; charset=utf-8" +} diff --git a/app/vmselect/graphite/graphite_test.go b/app/vmselect/graphite/metrics_api_test.go similarity index 100% rename from app/vmselect/graphite/graphite_test.go rename to app/vmselect/graphite/metrics_api_test.go diff --git a/app/vmselect/graphite/tag_values_response.qtpl b/app/vmselect/graphite/tag_values_response.qtpl new file mode 100644 index 000000000..9899df7ff --- /dev/null +++ b/app/vmselect/graphite/tag_values_response.qtpl @@ -0,0 +1,20 @@ +{% stripspace %} + +Tags generates response for /tags/ handler +See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags +{% func TagValuesResponse(tagName string, tagValues []string) %} +{ + "tag":{%q= tagName %}, + "values":[ + {% for i, value := range tagValues %} + { + "count":1, + "value":{%q= value %} + } + {% if i+1 < len(tagValues) %},{% endif %} + {% endfor %} + ] +} +{% endfunc %} + +{% endstripspace %} diff --git a/app/vmselect/graphite/tag_values_response.qtpl.go b/app/vmselect/graphite/tag_values_response.qtpl.go new file mode 100644 index 000000000..6fd33b7e7 --- /dev/null +++ b/app/vmselect/graphite/tag_values_response.qtpl.go @@ -0,0 +1,75 @@ +// Code generated by qtc from "tag_values_response.qtpl". DO NOT EDIT. +// See https://github.com/valyala/quicktemplate for details. + +// Tags generates response for /tags/ handlerSee https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags + +//line app/vmselect/graphite/tag_values_response.qtpl:5 +package graphite + +//line app/vmselect/graphite/tag_values_response.qtpl:5 +import ( + qtio422016 "io" + + qt422016 "github.com/valyala/quicktemplate" +) + +//line app/vmselect/graphite/tag_values_response.qtpl:5 +var ( + _ = qtio422016.Copy + _ = qt422016.AcquireByteBuffer +) + +//line app/vmselect/graphite/tag_values_response.qtpl:5 +func StreamTagValuesResponse(qw422016 *qt422016.Writer, tagName string, tagValues []string) { +//line app/vmselect/graphite/tag_values_response.qtpl:5 + qw422016.N().S(`{"tag":`) +//line app/vmselect/graphite/tag_values_response.qtpl:7 + qw422016.N().Q(tagName) +//line app/vmselect/graphite/tag_values_response.qtpl:7 + qw422016.N().S(`,"values":[`) +//line app/vmselect/graphite/tag_values_response.qtpl:9 + for i, value := range tagValues { +//line app/vmselect/graphite/tag_values_response.qtpl:9 + qw422016.N().S(`{"count":1,"value":`) +//line app/vmselect/graphite/tag_values_response.qtpl:12 + qw422016.N().Q(value) +//line app/vmselect/graphite/tag_values_response.qtpl:12 + qw422016.N().S(`}`) +//line app/vmselect/graphite/tag_values_response.qtpl:14 + if i+1 < len(tagValues) { +//line app/vmselect/graphite/tag_values_response.qtpl:14 + qw422016.N().S(`,`) +//line app/vmselect/graphite/tag_values_response.qtpl:14 + } +//line app/vmselect/graphite/tag_values_response.qtpl:15 + } +//line app/vmselect/graphite/tag_values_response.qtpl:15 + qw422016.N().S(`]}`) +//line app/vmselect/graphite/tag_values_response.qtpl:18 +} + +//line app/vmselect/graphite/tag_values_response.qtpl:18 +func WriteTagValuesResponse(qq422016 qtio422016.Writer, tagName string, tagValues []string) { +//line app/vmselect/graphite/tag_values_response.qtpl:18 + qw422016 := qt422016.AcquireWriter(qq422016) +//line app/vmselect/graphite/tag_values_response.qtpl:18 + StreamTagValuesResponse(qw422016, tagName, tagValues) +//line app/vmselect/graphite/tag_values_response.qtpl:18 + qt422016.ReleaseWriter(qw422016) +//line app/vmselect/graphite/tag_values_response.qtpl:18 +} + +//line app/vmselect/graphite/tag_values_response.qtpl:18 +func TagValuesResponse(tagName string, tagValues []string) string { +//line app/vmselect/graphite/tag_values_response.qtpl:18 + qb422016 := qt422016.AcquireByteBuffer() +//line app/vmselect/graphite/tag_values_response.qtpl:18 + WriteTagValuesResponse(qb422016, tagName, tagValues) +//line app/vmselect/graphite/tag_values_response.qtpl:18 + qs422016 := string(qb422016.B) +//line app/vmselect/graphite/tag_values_response.qtpl:18 + qt422016.ReleaseByteBuffer(qb422016) +//line app/vmselect/graphite/tag_values_response.qtpl:18 + return qs422016 +//line app/vmselect/graphite/tag_values_response.qtpl:18 +} diff --git a/app/vmselect/graphite/tags_api.go b/app/vmselect/graphite/tags_api.go new file mode 100644 index 000000000..df0d613e7 --- /dev/null +++ b/app/vmselect/graphite/tags_api.go @@ -0,0 +1,368 @@ +package graphite + +import ( + "fmt" + "net/http" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" + "github.com/VictoriaMetrics/metrics" +) + +// TagsAutoCompleteValuesHandler implements /tags/autoComplete/values endpoint from Graphite Tags API. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support +func TagsAutoCompleteValuesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { + deadline := searchutils.GetDeadlineForQuery(r, startTime) + if err := r.ParseForm(); err != nil { + return fmt.Errorf("cannot parse form values: %w", err) + } + limit, err := getInt(r, "limit") + if err != nil { + return err + } + if limit <= 0 { + // Use limit=100 by default. See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support + limit = 100 + } + tag := r.FormValue("tag") + if len(tag) == 0 { + return fmt.Errorf("missing `tag` query arg") + } + valuePrefix := r.FormValue("valuePrefix") + exprs := r.Form["expr"] + var tagValues []string + if len(exprs) == 0 { + // Fast path: there are no `expr` filters, so use netstorage.GetGraphiteTagValues. + // Escape special chars in tagPrefix as Graphite does. + // See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L228 + filter := regexp.QuoteMeta(valuePrefix) + tagValues, err = netstorage.GetGraphiteTagValues(tag, filter, limit, deadline) + if err != nil { + return err + } + } else { + // Slow path: use netstorage.SearchMetricNames for applying `expr` filters. + sq, err := getSearchQueryForExprs(exprs) + if err != nil { + return err + } + mns, err := netstorage.SearchMetricNames(sq, deadline) + if err != nil { + return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err) + } + m := make(map[string]struct{}) + if tag == "name" { + tag = "__name__" + } + for _, mn := range mns { + tagValue := mn.GetTagValue(tag) + if len(tagValue) == 0 { + continue + } + m[string(tagValue)] = struct{}{} + } + if len(valuePrefix) > 0 { + for tagValue := range m { + if !strings.HasPrefix(tagValue, valuePrefix) { + delete(m, tagValue) + } + } + } + tagValues = make([]string, 0, len(m)) + for tagValue := range m { + tagValues = append(tagValues, tagValue) + } + sort.Strings(tagValues) + if limit > 0 && limit < len(tagValues) { + tagValues = tagValues[:limit] + } + } + + jsonp := r.FormValue("jsonp") + contentType := getContentType(jsonp) + w.Header().Set("Content-Type", contentType) + bw := bufferedwriter.Get(w) + defer bufferedwriter.Put(bw) + WriteTagsAutoCompleteResponse(bw, tagValues, jsonp) + if err := bw.Flush(); err != nil { + return err + } + tagsAutoCompleteValuesDuration.UpdateDuration(startTime) + return nil +} + +var tagsAutoCompleteValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/autoComplete/values"}`) + +// TagsAutoCompleteTagsHandler implements /tags/autoComplete/tags endpoint from Graphite Tags API. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support +func TagsAutoCompleteTagsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { + deadline := searchutils.GetDeadlineForQuery(r, startTime) + if err := r.ParseForm(); err != nil { + return fmt.Errorf("cannot parse form values: %w", err) + } + limit, err := getInt(r, "limit") + if err != nil { + return err + } + if limit <= 0 { + // Use limit=100 by default. See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support + limit = 100 + } + tagPrefix := r.FormValue("tagPrefix") + exprs := r.Form["expr"] + var labels []string + if len(exprs) == 0 { + // Fast path: there are no `expr` filters, so use netstorage.GetGraphiteTags. + + // Escape special chars in tagPrefix as Graphite does. + // See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L181 + filter := regexp.QuoteMeta(tagPrefix) + labels, err = netstorage.GetGraphiteTags(filter, limit, deadline) + if err != nil { + return err + } + } else { + // Slow path: use netstorage.SearchMetricNames for applying `expr` filters. + sq, err := getSearchQueryForExprs(exprs) + if err != nil { + return err + } + mns, err := netstorage.SearchMetricNames(sq, deadline) + if err != nil { + return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err) + } + m := make(map[string]struct{}) + for _, mn := range mns { + m["name"] = struct{}{} + for _, tag := range mn.Tags { + m[string(tag.Key)] = struct{}{} + } + } + if len(tagPrefix) > 0 { + for label := range m { + if !strings.HasPrefix(label, tagPrefix) { + delete(m, label) + } + } + } + labels = make([]string, 0, len(m)) + for label := range m { + labels = append(labels, label) + } + sort.Strings(labels) + if limit > 0 && limit < len(labels) { + labels = labels[:limit] + } + } + + jsonp := r.FormValue("jsonp") + contentType := getContentType(jsonp) + w.Header().Set("Content-Type", contentType) + bw := bufferedwriter.Get(w) + defer bufferedwriter.Put(bw) + WriteTagsAutoCompleteResponse(bw, labels, jsonp) + if err := bw.Flush(); err != nil { + return err + } + tagsAutoCompleteTagsDuration.UpdateDuration(startTime) + return nil +} + +var tagsAutoCompleteTagsDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/autoComplete/tags"}`) + +// TagsFindSeriesHandler implements /tags/findSeries endpoint from Graphite Tags API. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags +func TagsFindSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { + deadline := searchutils.GetDeadlineForQuery(r, startTime) + if err := r.ParseForm(); err != nil { + return fmt.Errorf("cannot parse form values: %w", err) + } + limit, err := getInt(r, "limit") + if err != nil { + return err + } + exprs := r.Form["expr"] + if len(exprs) == 0 { + return fmt.Errorf("expecting at least one `expr` query arg") + } + sq, err := getSearchQueryForExprs(exprs) + if err != nil { + return err + } + mns, err := netstorage.SearchMetricNames(sq, deadline) + if err != nil { + return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err) + } + paths := getCanonicalPaths(mns) + if limit > 0 && limit < len(paths) { + paths = paths[:limit] + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + bw := bufferedwriter.Get(w) + defer bufferedwriter.Put(bw) + WriteTagsFindSeriesResponse(bw, paths) + if err := bw.Flush(); err != nil { + return err + } + tagsFindSeriesDuration.UpdateDuration(startTime) + return nil +} + +func getCanonicalPaths(mns []storage.MetricName) []string { + paths := make([]string, 0, len(mns)) + var b []byte + var tags []storage.Tag + for _, mn := range mns { + b = append(b[:0], mn.MetricGroup...) + tags = append(tags[:0], mn.Tags...) + sort.Slice(tags, func(i, j int) bool { + return string(tags[i].Key) < string(tags[j].Key) + }) + for _, tag := range tags { + b = append(b, ';') + b = append(b, tag.Key...) + b = append(b, '=') + b = append(b, tag.Value...) + } + paths = append(paths, string(b)) + } + sort.Strings(paths) + return paths +} + +var tagsFindSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/findSeries"}`) + +// TagValuesHandler implements /tags/ endpoint from Graphite Tags API. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags +func TagValuesHandler(startTime time.Time, tagName string, w http.ResponseWriter, r *http.Request) error { + deadline := searchutils.GetDeadlineForQuery(r, startTime) + if err := r.ParseForm(); err != nil { + return fmt.Errorf("cannot parse form values: %w", err) + } + limit, err := getInt(r, "limit") + if err != nil { + return err + } + filter := r.FormValue("filter") + tagValues, err := netstorage.GetGraphiteTagValues(tagName, filter, limit, deadline) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + bw := bufferedwriter.Get(w) + defer bufferedwriter.Put(bw) + WriteTagValuesResponse(bw, tagName, tagValues) + if err := bw.Flush(); err != nil { + return err + } + tagValuesDuration.UpdateDuration(startTime) + return nil +} + +var tagValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/"}`) + +// TagsHandler implements /tags endpoint from Graphite Tags API. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags +func TagsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { + deadline := searchutils.GetDeadlineForQuery(r, startTime) + if err := r.ParseForm(); err != nil { + return fmt.Errorf("cannot parse form values: %w", err) + } + limit, err := getInt(r, "limit") + if err != nil { + return err + } + filter := r.FormValue("filter") + labels, err := netstorage.GetGraphiteTags(filter, limit, deadline) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + bw := bufferedwriter.Get(w) + defer bufferedwriter.Put(bw) + WriteTagsResponse(bw, labels) + if err := bw.Flush(); err != nil { + return err + } + tagsDuration.UpdateDuration(startTime) + return nil +} + +var tagsDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags"}`) + +func getInt(r *http.Request, argName string) (int, error) { + argValue := r.FormValue(argName) + if len(argValue) == 0 { + return 0, nil + } + n, err := strconv.Atoi(argValue) + if err != nil { + return 0, fmt.Errorf("cannot parse %q=%q: %w", argName, argValue, err) + } + return n, nil +} + +func getSearchQueryForExprs(exprs []string) (*storage.SearchQuery, error) { + tfs, err := exprsToTagFilters(exprs) + if err != nil { + return nil, err + } + ct := time.Now().UnixNano() / 1e6 + sq := storage.NewSearchQuery(0, ct, [][]storage.TagFilter{tfs}) + return sq, nil +} + +func exprsToTagFilters(exprs []string) ([]storage.TagFilter, error) { + tfs := make([]storage.TagFilter, 0, len(exprs)) + for _, expr := range exprs { + tf, err := parseFilterExpr(expr) + if err != nil { + return nil, fmt.Errorf("cannot parse `expr` query arg: %w", err) + } + tfs = append(tfs, *tf) + } + return tfs, nil +} + +func parseFilterExpr(s string) (*storage.TagFilter, error) { + n := strings.Index(s, "=") + if n < 0 { + return nil, fmt.Errorf("missing tag value in filter expression %q", s) + } + tagName := s[:n] + tagValue := s[n+1:] + isNegative := false + if strings.HasSuffix(tagName, "!") { + isNegative = true + tagName = tagName[:len(tagName)-1] + } + if tagName == "name" { + tagName = "" + } + isRegexp := false + if strings.HasPrefix(tagValue, "~") { + isRegexp = true + tagValue = "^(?:" + tagValue[1:] + ").*" + } + return &storage.TagFilter{ + Key: []byte(tagName), + Value: []byte(tagValue), + IsNegative: isNegative, + IsRegexp: isRegexp, + }, nil +} diff --git a/app/vmselect/graphite/tags_autocomplete_response.qtpl b/app/vmselect/graphite/tags_autocomplete_response.qtpl new file mode 100644 index 000000000..a6ad1ef05 --- /dev/null +++ b/app/vmselect/graphite/tags_autocomplete_response.qtpl @@ -0,0 +1,16 @@ +{% stripspace %} + +TagsAutoCompleteResponse generates responses for /tags/autoComplete/{tags,values} handlers in Graphite Tags API. +See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support +{% func TagsAutoCompleteResponse(ss []string, jsonp string) %} + {% if jsonp != "" %}{%s= jsonp %}({% endif %} + [ + {% for i, s := range ss %} + {%q= s %} + {% if i+1 < len(ss) %},{% endif %} + {% endfor %} + ] + {% if jsonp != "" %}){% endif %} +{% endfunc %} + +{% endstripspace %} diff --git a/app/vmselect/graphite/tags_autocomplete_response.qtpl.go b/app/vmselect/graphite/tags_autocomplete_response.qtpl.go new file mode 100644 index 000000000..a7cf0f21c --- /dev/null +++ b/app/vmselect/graphite/tags_autocomplete_response.qtpl.go @@ -0,0 +1,81 @@ +// Code generated by qtc from "tags_autocomplete_response.qtpl". DO NOT EDIT. +// See https://github.com/valyala/quicktemplate for details. + +// TagsAutoCompleteResponse generates responses for /tags/autoComplete/{tags,values} handlers in Graphite Tags API.See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support + +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:5 +package graphite + +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:5 +import ( + qtio422016 "io" + + qt422016 "github.com/valyala/quicktemplate" +) + +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:5 +var ( + _ = qtio422016.Copy + _ = qt422016.AcquireByteBuffer +) + +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:5 +func StreamTagsAutoCompleteResponse(qw422016 *qt422016.Writer, ss []string, jsonp string) { +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:6 + if jsonp != "" { +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:6 + qw422016.N().S(jsonp) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:6 + qw422016.N().S(`(`) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:6 + } +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:6 + qw422016.N().S(`[`) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:8 + for i, s := range ss { +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:9 + qw422016.N().Q(s) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:10 + if i+1 < len(ss) { +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:10 + qw422016.N().S(`,`) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:10 + } +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:11 + } +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:11 + qw422016.N().S(`]`) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:13 + if jsonp != "" { +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:13 + qw422016.N().S(`)`) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:13 + } +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 +} + +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 +func WriteTagsAutoCompleteResponse(qq422016 qtio422016.Writer, ss []string, jsonp string) { +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 + qw422016 := qt422016.AcquireWriter(qq422016) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 + StreamTagsAutoCompleteResponse(qw422016, ss, jsonp) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 + qt422016.ReleaseWriter(qw422016) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 +} + +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 +func TagsAutoCompleteResponse(ss []string, jsonp string) string { +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 + qb422016 := qt422016.AcquireByteBuffer() +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 + WriteTagsAutoCompleteResponse(qb422016, ss, jsonp) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 + qs422016 := string(qb422016.B) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 + qt422016.ReleaseByteBuffer(qb422016) +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 + return qs422016 +//line app/vmselect/graphite/tags_autocomplete_response.qtpl:14 +} diff --git a/app/vmselect/graphite/tags_find_series_response.qtpl b/app/vmselect/graphite/tags_find_series_response.qtpl new file mode 100644 index 000000000..a22df4359 --- /dev/null +++ b/app/vmselect/graphite/tags_find_series_response.qtpl @@ -0,0 +1,12 @@ +{% stripspace %} + +{% func TagsFindSeriesResponse(paths []string) %} +[ + {% for i, path := range paths %} + {%q= path %} + {% if i+1 < len(paths) %},{% endif %} + {% endfor %} +] +{% endfunc %} + +{% endstripspace %} diff --git a/app/vmselect/graphite/tags_find_series_response.qtpl.go b/app/vmselect/graphite/tags_find_series_response.qtpl.go new file mode 100644 index 000000000..670f1f530 --- /dev/null +++ b/app/vmselect/graphite/tags_find_series_response.qtpl.go @@ -0,0 +1,65 @@ +// Code generated by qtc from "tags_find_series_response.qtpl". DO NOT EDIT. +// See https://github.com/valyala/quicktemplate for details. + +//line app/vmselect/graphite/tags_find_series_response.qtpl:3 +package graphite + +//line app/vmselect/graphite/tags_find_series_response.qtpl:3 +import ( + qtio422016 "io" + + qt422016 "github.com/valyala/quicktemplate" +) + +//line app/vmselect/graphite/tags_find_series_response.qtpl:3 +var ( + _ = qtio422016.Copy + _ = qt422016.AcquireByteBuffer +) + +//line app/vmselect/graphite/tags_find_series_response.qtpl:3 +func StreamTagsFindSeriesResponse(qw422016 *qt422016.Writer, paths []string) { +//line app/vmselect/graphite/tags_find_series_response.qtpl:3 + qw422016.N().S(`[`) +//line app/vmselect/graphite/tags_find_series_response.qtpl:5 + for i, path := range paths { +//line app/vmselect/graphite/tags_find_series_response.qtpl:6 + qw422016.N().Q(path) +//line app/vmselect/graphite/tags_find_series_response.qtpl:7 + if i+1 < len(paths) { +//line app/vmselect/graphite/tags_find_series_response.qtpl:7 + qw422016.N().S(`,`) +//line app/vmselect/graphite/tags_find_series_response.qtpl:7 + } +//line app/vmselect/graphite/tags_find_series_response.qtpl:8 + } +//line app/vmselect/graphite/tags_find_series_response.qtpl:8 + qw422016.N().S(`]`) +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 +} + +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 +func WriteTagsFindSeriesResponse(qq422016 qtio422016.Writer, paths []string) { +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 + qw422016 := qt422016.AcquireWriter(qq422016) +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 + StreamTagsFindSeriesResponse(qw422016, paths) +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 + qt422016.ReleaseWriter(qw422016) +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 +} + +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 +func TagsFindSeriesResponse(paths []string) string { +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 + qb422016 := qt422016.AcquireByteBuffer() +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 + WriteTagsFindSeriesResponse(qb422016, paths) +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 + qs422016 := string(qb422016.B) +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 + qt422016.ReleaseByteBuffer(qb422016) +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 + return qs422016 +//line app/vmselect/graphite/tags_find_series_response.qtpl:10 +} diff --git a/app/vmselect/graphite/tags_response.qtpl b/app/vmselect/graphite/tags_response.qtpl new file mode 100644 index 000000000..c562d409e --- /dev/null +++ b/app/vmselect/graphite/tags_response.qtpl @@ -0,0 +1,16 @@ +{% stripspace %} + +Tags generates response for /tags handler +See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags +{% func TagsResponse(tags []string) %} +[ + {% for i, tag := range tags %} + { + "tag":{%q= tag %} + } + {% if i+1 < len(tags) %},{% endif %} + {% endfor %} +] +{% endfunc %} + +{% endstripspace %} diff --git a/app/vmselect/graphite/tags_response.qtpl.go b/app/vmselect/graphite/tags_response.qtpl.go new file mode 100644 index 000000000..f95b63d4a --- /dev/null +++ b/app/vmselect/graphite/tags_response.qtpl.go @@ -0,0 +1,71 @@ +// Code generated by qtc from "tags_response.qtpl". DO NOT EDIT. +// See https://github.com/valyala/quicktemplate for details. + +// Tags generates response for /tags handlerSee https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags + +//line app/vmselect/graphite/tags_response.qtpl:5 +package graphite + +//line app/vmselect/graphite/tags_response.qtpl:5 +import ( + qtio422016 "io" + + qt422016 "github.com/valyala/quicktemplate" +) + +//line app/vmselect/graphite/tags_response.qtpl:5 +var ( + _ = qtio422016.Copy + _ = qt422016.AcquireByteBuffer +) + +//line app/vmselect/graphite/tags_response.qtpl:5 +func StreamTagsResponse(qw422016 *qt422016.Writer, tags []string) { +//line app/vmselect/graphite/tags_response.qtpl:5 + qw422016.N().S(`[`) +//line app/vmselect/graphite/tags_response.qtpl:7 + for i, tag := range tags { +//line app/vmselect/graphite/tags_response.qtpl:7 + qw422016.N().S(`{"tag":`) +//line app/vmselect/graphite/tags_response.qtpl:9 + qw422016.N().Q(tag) +//line app/vmselect/graphite/tags_response.qtpl:9 + qw422016.N().S(`}`) +//line app/vmselect/graphite/tags_response.qtpl:11 + if i+1 < len(tags) { +//line app/vmselect/graphite/tags_response.qtpl:11 + qw422016.N().S(`,`) +//line app/vmselect/graphite/tags_response.qtpl:11 + } +//line app/vmselect/graphite/tags_response.qtpl:12 + } +//line app/vmselect/graphite/tags_response.qtpl:12 + qw422016.N().S(`]`) +//line app/vmselect/graphite/tags_response.qtpl:14 +} + +//line app/vmselect/graphite/tags_response.qtpl:14 +func WriteTagsResponse(qq422016 qtio422016.Writer, tags []string) { +//line app/vmselect/graphite/tags_response.qtpl:14 + qw422016 := qt422016.AcquireWriter(qq422016) +//line app/vmselect/graphite/tags_response.qtpl:14 + StreamTagsResponse(qw422016, tags) +//line app/vmselect/graphite/tags_response.qtpl:14 + qt422016.ReleaseWriter(qw422016) +//line app/vmselect/graphite/tags_response.qtpl:14 +} + +//line app/vmselect/graphite/tags_response.qtpl:14 +func TagsResponse(tags []string) string { +//line app/vmselect/graphite/tags_response.qtpl:14 + qb422016 := qt422016.AcquireByteBuffer() +//line app/vmselect/graphite/tags_response.qtpl:14 + WriteTagsResponse(qb422016, tags) +//line app/vmselect/graphite/tags_response.qtpl:14 + qs422016 := string(qb422016.B) +//line app/vmselect/graphite/tags_response.qtpl:14 + qt422016.ReleaseByteBuffer(qb422016) +//line app/vmselect/graphite/tags_response.qtpl:14 + return qs422016 +//line app/vmselect/graphite/tags_response.qtpl:14 +} diff --git a/app/vmselect/main.go b/app/vmselect/main.go index d184eba77..cc8aab099 100644 --- a/app/vmselect/main.go +++ b/app/vmselect/main.go @@ -132,6 +132,16 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { return true } } + if strings.HasPrefix(path, "/tags/") && !isGraphiteTagsPath(path) { + tagName := r.URL.Path[len("/tags/"):] + graphiteTagValuesRequests.Inc() + if err := graphite.TagValuesHandler(startTime, tagName, w, r); err != nil { + graphiteTagValuesErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true + } switch path { case "/api/v1/query": @@ -259,22 +269,56 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { return true } return true + case "/tags": + graphiteTagsRequests.Inc() + if err := graphite.TagsHandler(startTime, w, r); err != nil { + graphiteTagsErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true + case "/tags/findSeries": + graphiteTagsFindSeriesRequests.Inc() + if err := graphite.TagsFindSeriesHandler(startTime, w, r); err != nil { + graphiteTagsFindSeriesErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true + case "/tags/autoComplete/tags": + graphiteTagsAutoCompleteTagsRequests.Inc() + httpserver.EnableCORS(w, r) + if err := graphite.TagsAutoCompleteTagsHandler(startTime, w, r); err != nil { + graphiteTagsAutoCompleteTagsErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true + case "/tags/autoComplete/values": + graphiteTagsAutoCompleteValuesRequests.Inc() + httpserver.EnableCORS(w, r) + if err := graphite.TagsAutoCompleteValuesHandler(startTime, w, r); err != nil { + graphiteTagsAutoCompleteValuesErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true case "/api/v1/rules": // Return dumb placeholder rulesRequests.Inc() - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "%s", `{"status":"success","data":{"groups":[]}}`) return true case "/api/v1/alerts": // Return dumb placehloder alertsRequests.Inc() - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "%s", `{"status":"success","data":{"alerts":[]}}`) return true case "/api/v1/metadata": // Return dumb placeholder metadataRequests.Inc() - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "%s", `{"status":"success","data":{}}`) return true case "/api/v1/admin/tsdb/delete_series": @@ -296,10 +340,22 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { } } +func isGraphiteTagsPath(path string) bool { + switch path { + // See https://graphite.readthedocs.io/en/stable/tags.html for a list of Graphite Tags API paths. + // Do not include `/tags/` here, since this will fool the caller. + case "/tags/tagSeries", "/tags/tagMultiSeries", "/tags/findSeries", + "/tags/autoComplete/tags", "/tags/autoComplete/values", "/tags/delSeries": + return true + default: + return false + } +} + func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) { logger.Warnf("error in %q: %s", r.RequestURI, err) - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") statusCode := http.StatusUnprocessableEntity var esc *httpserver.ErrorWithStatusCode if errors.As(err, &esc) { @@ -360,6 +416,21 @@ var ( graphiteMetricsIndexRequests = metrics.NewCounter(`vm_http_requests_total{path="/metrics/index.json"}`) graphiteMetricsIndexErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/metrics/index.json"}`) + graphiteTagsRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags"}`) + graphiteTagsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags"}`) + + graphiteTagValuesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/"}`) + graphiteTagValuesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/"}`) + + graphiteTagsFindSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/findSeries"}`) + graphiteTagsFindSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/findSeries"}`) + + graphiteTagsAutoCompleteTagsRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/autoComplete/tags"}`) + graphiteTagsAutoCompleteTagsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/autoComplete/tags"}`) + + graphiteTagsAutoCompleteValuesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/autoComplete/values"}`) + graphiteTagsAutoCompleteValuesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/autoComplete/values"}`) + rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`) alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`) metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/metadata"}`) diff --git a/app/vmselect/netstorage/netstorage.go b/app/vmselect/netstorage/netstorage.go index ccffc49e7..8af6c3f82 100644 --- a/app/vmselect/netstorage/netstorage.go +++ b/app/vmselect/netstorage/netstorage.go @@ -5,6 +5,7 @@ import ( "errors" "flag" "fmt" + "regexp" "runtime" "sort" "sync" @@ -523,6 +524,35 @@ func GetLabelsOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) ( return labels, nil } +// GetGraphiteTags returns Graphite tags until the given deadline. +func GetGraphiteTags(filter string, limit int, deadline searchutils.Deadline) ([]string, error) { + if deadline.Exceeded() { + return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String()) + } + labels, err := GetLabels(deadline) + if err != nil { + return nil, err + } + // Substitute "__name__" with "name" for Graphite compatibility + for i := range labels { + if labels[i] == "__name__" { + labels[i] = "name" + sort.Strings(labels) + break + } + } + if len(filter) > 0 { + labels, err = applyGraphiteRegexpFilter(filter, labels) + if err != nil { + return nil, err + } + } + if limit > 0 && limit < len(labels) { + labels = labels[:limit] + } + return labels, nil +} + // GetLabels returns labels until the given deadline. func GetLabels(deadline searchutils.Deadline) ([]string, error) { if deadline.Exceeded() { @@ -599,6 +629,30 @@ func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline return labelValues, nil } +// GetGraphiteTagValues returns tag values for the given tagName until the given deadline. +func GetGraphiteTagValues(tagName, filter string, limit int, deadline searchutils.Deadline) ([]string, error) { + if deadline.Exceeded() { + return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String()) + } + if tagName == "name" { + tagName = "" + } + tagValues, err := GetLabelValues(tagName, deadline) + if err != nil { + return nil, err + } + if len(filter) > 0 { + tagValues, err = applyGraphiteRegexpFilter(filter, tagValues) + if err != nil { + return nil, err + } + } + if limit > 0 && limit < len(tagValues) { + tagValues = tagValues[:limit] + } + return tagValues, nil +} + // GetLabelValues returns label values for the given labelName // until the given deadline. func GetLabelValues(labelName string, deadline searchutils.Deadline) ([]string, error) { @@ -819,6 +873,32 @@ var exportWorkPool = &sync.Pool{ }, } +// SearchMetricNames returns all the metric names matching sq until the given deadline. +func SearchMetricNames(sq *storage.SearchQuery, deadline searchutils.Deadline) ([]storage.MetricName, error) { + if deadline.Exceeded() { + return nil, fmt.Errorf("timeout exceeded before starting to search metric names: %s", deadline.String()) + } + + // Setup search. + tfss, err := setupTfss(sq.TagFilterss) + if err != nil { + return nil, err + } + tr := storage.TimeRange{ + MinTimestamp: sq.MinTimestamp, + MaxTimestamp: sq.MaxTimestamp, + } + if err := vmstorage.CheckTimeRange(tr); err != nil { + return nil, err + } + + mns, err := vmstorage.SearchMetricNames(tfss, tr, *maxMetricsPerSearch, deadline.Deadline()) + if err != nil { + return nil, fmt.Errorf("cannot find metric names: %w", err) + } + return mns, nil +} + // ProcessSearchQuery performs sq until the given deadline. // // Results.RunParallel or Results.Cancel must be called on the returned Results. @@ -951,3 +1031,20 @@ func setupTfss(tagFilterss [][]storage.TagFilter) ([]*storage.TagFilters, error) } return tfss, nil } + +func applyGraphiteRegexpFilter(filter string, ss []string) ([]string, error) { + // Anchor filter regexp to the beginning of the string as Graphite does. + // See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/localdatabase.py#L157 + filter = "^(?:" + filter + ")" + re, err := regexp.Compile(filter) + if err != nil { + return nil, fmt.Errorf("cannot parse regexp filter=%q: %w", filter, err) + } + dst := ss[:0] + for _, s := range ss { + if re.MatchString(s) { + dst = append(dst, s) + } + } + return dst, nil +} diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index cdee46c67..8eba99869 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -78,17 +78,13 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request if err != nil { return err } - sq := &storage.SearchQuery{ - MinTimestamp: start, - MaxTimestamp: end, - TagFilterss: tagFilterss, - } + sq := storage.NewSearchQuery(start, end, tagFilterss) rss, err := netstorage.ProcessSearchQuery(sq, true, deadline) if err != nil { return fmt.Errorf("cannot fetch data for %q: %w", sq, err) } - w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Type", "text/plain; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error { @@ -146,12 +142,8 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques if err != nil { return err } - sq := &storage.SearchQuery{ - MinTimestamp: start, - MaxTimestamp: end, - TagFilterss: tagFilterss, - } - w.Header().Set("Content-Type", "text/csv") + sq := storage.NewSearchQuery(start, end, tagFilterss) + w.Header().Set("Content-Type", "text/csv; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) @@ -227,11 +219,7 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req if err != nil { return err } - sq := &storage.SearchQuery{ - MinTimestamp: start, - MaxTimestamp: end, - TagFilterss: tagFilterss, - } + sq := storage.NewSearchQuery(start, end, tagFilterss) w.Header().Set("Content-Type", "VictoriaMetrics/native") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) @@ -331,9 +319,9 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo WriteExportJSONLine(bb, xb) resultsCh <- bb } - contentType := "application/stream+json" + contentType := "application/stream+json; charset=utf-8" if format == "prometheus" { - contentType = "text/plain" + contentType = "text/plain; charset=utf-8" writeLineFunc = func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) { bb := quicktemplate.AcquireByteBuffer() WriteExportPrometheusLine(bb, xb) @@ -381,11 +369,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo if err != nil { return err } - sq := &storage.SearchQuery{ - MinTimestamp: start, - MaxTimestamp: end, - TagFilterss: tagFilterss, - } + sq := storage.NewSearchQuery(start, end, tagFilterss) w.Header().Set("Content-Type", contentType) bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) @@ -486,9 +470,7 @@ func DeleteHandler(startTime time.Time, r *http.Request) error { if err != nil { return err } - sq := &storage.SearchQuery{ - TagFilterss: tagFilterss, - } + sq := storage.NewSearchQuery(0, 0, tagFilterss) deletedCount, err := netstorage.DeleteSeries(sq) if err != nil { return fmt.Errorf("cannot delete time series matching %q: %w", matches, err) @@ -561,7 +543,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr } } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) WriteLabelValuesResponse(bw, labelValues) @@ -596,32 +578,41 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64 if start >= end { end = start + defaultStep } - sq := &storage.SearchQuery{ - MinTimestamp: start, - MaxTimestamp: end, - TagFilterss: tagFilterss, - } - rss, err := netstorage.ProcessSearchQuery(sq, false, deadline) - if err != nil { - return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err) - } - + sq := storage.NewSearchQuery(start, end, tagFilterss) m := make(map[string]struct{}) - var mLock sync.Mutex - err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error { - labelValue := rs.MetricName.GetTagValue(labelName) - if len(labelValue) == 0 { - return nil + if end-start > 24*3600*1000 { + // It is cheaper to call SearchMetricNames on time ranges exceeding a day. + mns, err := netstorage.SearchMetricNames(sq, deadline) + if err != nil { + return nil, fmt.Errorf("cannot fetch time series for %q: %w", sq, err) + } + for _, mn := range mns { + labelValue := mn.GetTagValue(labelName) + if len(labelValue) == 0 { + continue + } + m[string(labelValue)] = struct{}{} + } + } else { + rss, err := netstorage.ProcessSearchQuery(sq, false, deadline) + if err != nil { + return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err) + } + var mLock sync.Mutex + err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error { + labelValue := rs.MetricName.GetTagValue(labelName) + if len(labelValue) == 0 { + return nil + } + mLock.Lock() + m[string(labelValue)] = struct{}{} + mLock.Unlock() + return nil + }) + if err != nil { + return nil, fmt.Errorf("error when data fetching: %w", err) } - mLock.Lock() - m[string(labelValue)] = struct{}{} - mLock.Unlock() - return nil - }) - if err != nil { - return nil, fmt.Errorf("error when data fetching: %w", err) } - labelValues := make([]string, 0, len(m)) for labelValue := range m { labelValues = append(labelValues, labelValue) @@ -639,7 +630,7 @@ func LabelsCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ if err != nil { return fmt.Errorf(`cannot obtain label entries: %w`, err) } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) WriteLabelsCountResponse(bw, labelEntries) @@ -690,7 +681,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque if err != nil { return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err) } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) WriteTSDBStatusResponse(bw, status) @@ -760,7 +751,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) } } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) WriteLabelsResponse(bw, labels) @@ -782,33 +773,41 @@ func labelsWithMatches(matches []string, start, end int64, deadline searchutils. if start >= end { end = start + defaultStep } - sq := &storage.SearchQuery{ - MinTimestamp: start, - MaxTimestamp: end, - TagFilterss: tagFilterss, - } - rss, err := netstorage.ProcessSearchQuery(sq, false, deadline) - if err != nil { - return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err) - } - + sq := storage.NewSearchQuery(start, end, tagFilterss) m := make(map[string]struct{}) - var mLock sync.Mutex - err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error { - mLock.Lock() - tags := rs.MetricName.Tags - for i := range tags { - t := &tags[i] - m[string(t.Key)] = struct{}{} + if end-start > 24*3600*1000 { + // It is cheaper to call SearchMetricNames on time ranges exceeding a day. + mns, err := netstorage.SearchMetricNames(sq, deadline) + if err != nil { + return nil, fmt.Errorf("cannot fetch time series for %q: %w", sq, err) + } + for _, mn := range mns { + for _, tag := range mn.Tags { + m[string(tag.Key)] = struct{}{} + } + } + if len(mns) > 0 { + m["__name__"] = struct{}{} + } + } else { + rss, err := netstorage.ProcessSearchQuery(sq, false, deadline) + if err != nil { + return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err) + } + var mLock sync.Mutex + err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error { + mLock.Lock() + for _, tag := range rs.MetricName.Tags { + m[string(tag.Key)] = struct{}{} + } + m["__name__"] = struct{}{} + mLock.Unlock() + return nil + }) + if err != nil { + return nil, fmt.Errorf("error when data fetching: %w", err) } - m["__name__"] = struct{}{} - mLock.Unlock() - return nil - }) - if err != nil { - return nil, fmt.Errorf("error when data fetching: %w", err) } - labels := make([]string, 0, len(m)) for label := range m { labels = append(labels, label) @@ -826,7 +825,7 @@ func SeriesCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ if err != nil { return fmt.Errorf("cannot obtain series count: %w", err) } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) WriteSeriesCountResponse(bw, n) @@ -873,17 +872,39 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) if start >= end { end = start + defaultStep } - sq := &storage.SearchQuery{ - MinTimestamp: start, - MaxTimestamp: end, - TagFilterss: tagFilterss, + sq := storage.NewSearchQuery(start, end, tagFilterss) + if end-start > 24*3600*1000 { + // It is cheaper to call SearchMetricNames on time ranges exceeding a day. + mns, err := netstorage.SearchMetricNames(sq, deadline) + if err != nil { + return fmt.Errorf("cannot fetch time series for %q: %w", sq, err) + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + bw := bufferedwriter.Get(w) + defer bufferedwriter.Put(bw) + resultsCh := make(chan *quicktemplate.ByteBuffer) + go func() { + for i := range mns { + bb := quicktemplate.AcquireByteBuffer() + writemetricNameObject(bb, &mns[i]) + resultsCh <- bb + } + close(resultsCh) + }() + // WriteSeriesResponse must consume all the data from resultsCh. + WriteSeriesResponse(bw, resultsCh) + if err := bw.Flush(); err != nil { + return err + } + seriesDuration.UpdateDuration(startTime) + return nil } rss, err := netstorage.ProcessSearchQuery(sq, false, deadline) if err != nil { return fmt.Errorf("cannot fetch data for %q: %w", sq, err) } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) resultsCh := make(chan *quicktemplate.ByteBuffer) @@ -1020,7 +1041,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e } } - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) WriteQueryResponse(bw, result) @@ -1119,7 +1140,7 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/153 result = removeEmptyValuesAndTimeseries(result) - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") bw := bufferedwriter.Get(w) defer bufferedwriter.Put(bw) WriteQueryRangeResponse(bw, result) diff --git a/app/vmselect/promql/eval.go b/app/vmselect/promql/eval.go index affb04e7e..d0c6c99ef 100644 --- a/app/vmselect/promql/eval.go +++ b/app/vmselect/promql/eval.go @@ -653,11 +653,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, } else { minTimestamp -= ec.Step } - sq := &storage.SearchQuery{ - MinTimestamp: minTimestamp, - MaxTimestamp: ec.End, - TagFilterss: [][]storage.TagFilter{tfs}, - } + sq := storage.NewSearchQuery(minTimestamp, ec.End, [][]storage.TagFilter{tfs}) rss, err := netstorage.ProcessSearchQuery(sq, true, ec.Deadline) if err != nil { return nil, err diff --git a/app/vmselect/promql/exec.go b/app/vmselect/promql/exec.go index 789f74ab6..741511503 100644 --- a/app/vmselect/promql/exec.go +++ b/app/vmselect/promql/exec.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "sort" + "strings" "sync" "sync/atomic" "time" @@ -15,7 +16,13 @@ import ( "github.com/VictoriaMetrics/metricsql" ) -var logSlowQueryDuration = flag.Duration("search.logSlowQueryDuration", 5*time.Second, "Log queries with execution time exceeding this value. Zero disables slow query logging") +var ( + logSlowQueryDuration = flag.Duration("search.logSlowQueryDuration", 5*time.Second, "Log queries with execution time exceeding this value. Zero disables slow query logging") + treatDotsAsIsInRegexps = flag.Bool("search.treatDotsAsIsInRegexps", false, "Whether to treat dots as is in regexp label filters used in queries. "+ + `For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped `+ + `in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. `+ + `Such escaping can be useful when querying Graphite data`) +) var slowQueries = metrics.NewCounter(`vm_slow_queries_total`) @@ -177,6 +184,9 @@ func parsePromQLWithCache(q string) (metricsql.Expr, error) { if err == nil { e = metricsql.Optimize(e) e = adjustCmpOps(e) + if *treatDotsAsIsInRegexps { + e = escapeDotsInRegexpLabelFilters(e) + } } pcv = &parseCacheValue{ e: e, @@ -190,6 +200,41 @@ func parsePromQLWithCache(q string) (metricsql.Expr, error) { return pcv.e, nil } +func escapeDotsInRegexpLabelFilters(e metricsql.Expr) metricsql.Expr { + metricsql.VisitAll(e, func(expr metricsql.Expr) { + me, ok := expr.(*metricsql.MetricExpr) + if !ok { + return + } + for i := range me.LabelFilters { + f := &me.LabelFilters[i] + if f.IsRegexp { + f.Value = escapeDots(f.Value) + } + } + }) + return e +} + +func escapeDots(s string) string { + dotsCount := strings.Count(s, ".") + if dotsCount <= 0 { + return s + } + result := make([]byte, 0, len(s)+2*dotsCount) + for i := 0; i < len(s); i++ { + if s[i] == '.' && (i == 0 || s[i-1] != '\\') && (i+1 == len(s) || i+1 < len(s) && s[i+1] != '*' && s[i+1] != '+' && s[i+1] != '{') { + // Escape a dot if the following conditions are met: + // - if it isn't escaped already, i.e. if there is no `\` char before the dot. + // - if there is no regexp modifiers such as '+', '*' or '{' after the dot. + result = append(result, '\\', '.') + } else { + result = append(result, s[i]) + } + } + return string(result) +} + var parseCacheV = func() *parseCache { pc := &parseCache{ m: make(map[string]*parseCacheValue), diff --git a/app/vmselect/promql/exec_test.go b/app/vmselect/promql/exec_test.go index 977ae0575..44a9cf70c 100644 --- a/app/vmselect/promql/exec_test.go +++ b/app/vmselect/promql/exec_test.go @@ -7,8 +7,46 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" + "github.com/VictoriaMetrics/metricsql" ) +func TestEscapeDots(t *testing.T) { + f := func(s, resultExpected string) { + t.Helper() + result := escapeDots(s) + if result != resultExpected { + t.Fatalf("unexpected result for escapeDots(%q); got\n%s\nwant\n%s", s, result, resultExpected) + } + } + f("", "") + f("a", "a") + f("foobar", "foobar") + f(".", `\.`) + f(".*", `.*`) + f(".+", `.+`) + f("..", `\.\.`) + f("foo.b.{2}ar..+baz.*", `foo\.b.{2}ar\..+baz.*`) +} + +func TestEscapeDotsInRegexpLabelFilters(t *testing.T) { + f := func(s, resultExpected string) { + t.Helper() + e, err := metricsql.Parse(s) + if err != nil { + t.Fatalf("unexpected error in metricsql.Parse(%q): %s", s, err) + } + e = escapeDotsInRegexpLabelFilters(e) + result := e.AppendString(nil) + if string(result) != resultExpected { + t.Fatalf("unexpected result for escapeDotsInRegexpLabelFilters(%q);\ngot\n%s\nwant\n%s", s, result, resultExpected) + } + } + f("2", "2") + f(`foo.bar + 123`, `foo.bar + 123`) + f(`foo{bar=~"baz.xx.yyy"}`, `foo{bar=~"baz\\.xx\\.yyy"}`) + f(`foo(a.b{c="d.e",x=~"a.b.+[.a]",y!~"aaa.bb|cc.dd"}) + x.y(1,sum({x=~"aa.bb"}))`, `foo(a.b{c="d.e", x=~"a\\.b.+[\\.a]", y!~"aaa\\.bb|cc\\.dd"}) + x.y(1, sum({x=~"aa\\.bb"}))`) +} + func TestExecSuccess(t *testing.T) { start := int64(1000e3) end := int64(2000e3) diff --git a/app/vmselect/promql/rollup.go b/app/vmselect/promql/rollup.go index 44d5fca63..c711f04ad 100644 --- a/app/vmselect/promql/rollup.go +++ b/app/vmselect/promql/rollup.go @@ -15,7 +15,7 @@ import ( "github.com/valyala/histogram" ) -var minStalenessInterval = flag.Duration("search.minStalenessInterval", 0, "The mimimum interval for staleness calculations. "+ +var minStalenessInterval = flag.Duration("search.minStalenessInterval", 0, "The minimum interval for staleness calculations. "+ "This flag could be useful for removing gaps on graphs generated from time series with irregular intervals between samples. "+ "See also '-search.maxStalenessInterval'") @@ -326,14 +326,32 @@ func getRollupFunc(funcName string) newRollupFunc { } type rollupFuncArg struct { - prevValue float64 - prevTimestamp int64 - values []float64 - timestamps []int64 + // The value preceeding values if it fits staleness interval. + prevValue float64 + // The timestamp for prevValue. + prevTimestamp int64 + + // Values that fit window ending at currTimestamp. + values []float64 + + // Timestamps for values. + timestamps []int64 + + // Real value preceeding values without restrictions on staleness interval. + realPrevValue float64 + + // Real value which goes after values. + realNextValue float64 + + // Current timestamp for rollup evaluation. currTimestamp int64 - idx int - window int64 + + // Index for the currently evaluated point relative to time range for query evaluation. + idx int + + // Time window for rollup calculations. + window int64 tsm *timeseriesMap } @@ -507,7 +525,9 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu ni := 0 nj := 0 stalenessInterval := int64(float64(scrapeInterval) * 0.9) - canDropLastSample := rc.CanDropLastSample + // Do not drop trailing data points for queries, which return 2 or 1 point (aka instant queries). + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/845 + canDropLastSample := rc.CanDropLastSample && len(rc.Timestamps) > 2 for _, tEnd := range rc.Timestamps { tStart := tEnd - window ni = seekFirstTimestampIdxAfter(timestamps[i:], tStart, ni) @@ -526,17 +546,26 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu } rfa.values = values[i:j] rfa.timestamps = timestamps[i:j] - if canDropLastSample && j == len(timestamps) && j > 0 && (tEnd-timestamps[j-1] > stalenessInterval || i == j && len(timestamps) == 1) && rc.End-tEnd >= 2*rc.Step { + if canDropLastSample && j == len(timestamps) && j > 0 && (tEnd-timestamps[j-1] > stalenessInterval || i == j && len(timestamps) == 1) { // Drop trailing data points in the following cases: // - if the distance between the last raw sample and tEnd exceeds stalenessInterval // - if time series contains only a single raw sample // This should prevent from double counting when a label changes in time series (for instance, // during new deployment in K8S). See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/748 - // Do not drop trailing data points for instant queries. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/845 rfa.prevValue = nan rfa.values = nil rfa.timestamps = nil } + if i > 0 { + rfa.realPrevValue = values[i-1] + } else { + rfa.realPrevValue = nan + } + if j < len(values) { + rfa.realNextValue = values[j] + } else { + rfa.realNextValue = nan + } rfa.currTimestamp = tEnd value := rc.Func(rfa) rfa.idx++ @@ -1243,6 +1272,12 @@ func rollupDelta(rfa *rollupFuncArg) float64 { if len(values) == 0 { return nan } + if !math.IsNaN(rfa.realPrevValue) { + // Assume that the value didn't change during the current gap. + // This should fix high delta() and increase() values at the end of gaps. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/894 + return values[len(values)-1] - rfa.realPrevValue + } // Assume that the previous non-existing value was 0 // only if the first value doesn't exceed too much the delta with the next value. // @@ -1255,6 +1290,8 @@ func rollupDelta(rfa *rollupFuncArg) float64 { d := float64(10) if len(values) > 1 { d = values[1] - values[0] + } else if !math.IsNaN(rfa.realNextValue) { + d = rfa.realNextValue - values[0] } if math.Abs(values[0]) < 10*(math.Abs(d)+1) { prevValue = 0 diff --git a/app/vmselect/promql/rollup_test.go b/app/vmselect/promql/rollup_test.go index fb7083d80..dff89e2c8 100644 --- a/app/vmselect/promql/rollup_test.go +++ b/app/vmselect/promql/rollup_test.go @@ -1103,11 +1103,13 @@ func testRowsEqual(t *testing.T, values []float64, timestamps []int64, valuesExp } func TestRollupDelta(t *testing.T) { - f := func(prevValue float64, values []float64, resultExpected float64) { + f := func(prevValue, realPrevValue, realNextValue float64, values []float64, resultExpected float64) { t.Helper() rfa := &rollupFuncArg{ - prevValue: prevValue, - values: values, + prevValue: prevValue, + values: values, + realPrevValue: realPrevValue, + realNextValue: realNextValue, } result := rollupDelta(rfa) if math.IsNaN(result) { @@ -1120,22 +1122,36 @@ func TestRollupDelta(t *testing.T) { t.Fatalf("unexpected result; got %v; want %v", result, resultExpected) } } - f(nan, nil, nan) + f(nan, nan, nan, nil, nan) // Small initial value - f(nan, []float64{1}, 1) - f(nan, []float64{10}, 10) - f(nan, []float64{100}, 100) - f(nan, []float64{1, 2, 3}, 3) - f(1, []float64{1, 2, 3}, 2) - f(nan, []float64{5, 6, 8}, 8) - f(2, []float64{5, 6, 8}, 6) + f(nan, nan, nan, []float64{1}, 1) + f(nan, nan, nan, []float64{10}, 10) + f(nan, nan, nan, []float64{100}, 100) + f(nan, nan, nan, []float64{1, 2, 3}, 3) + f(1, nan, nan, []float64{1, 2, 3}, 2) + f(nan, nan, nan, []float64{5, 6, 8}, 8) + f(2, nan, nan, []float64{5, 6, 8}, 6) // Too big initial value must be skipped. - f(nan, []float64{1000}, 0) - f(nan, []float64{1000, 1001, 1002}, 2) + f(nan, nan, nan, []float64{1000}, 0) + f(nan, nan, nan, []float64{1000, 1001, 1002}, 2) + + // Non-nan realPrevValue + f(nan, 900, nan, []float64{1000}, 100) + f(nan, 1000, nan, []float64{1000}, 0) + f(nan, 1100, nan, []float64{1000}, -100) + f(nan, 900, nan, []float64{1000, 1001, 1002}, 102) + + // Small delta between realNextValue and values + f(nan, nan, 990, []float64{1000}, 0) + f(nan, nan, 1005, []float64{1000}, 0) + + // Big delta between relaNextValue and values + f(nan, nan, 800, []float64{1000}, 1000) + f(nan, nan, 1300, []float64{1000}, 1000) // Empty values - f(1, nil, 0) - f(100, nil, 0) + f(1, nan, nan, nil, 0) + f(100, nan, nan, nil, 0) } diff --git a/app/vmstorage/main.go b/app/vmstorage/main.go index 82b58aacd..ca9378663 100644 --- a/app/vmstorage/main.go +++ b/app/vmstorage/main.go @@ -24,6 +24,7 @@ var ( retentionPeriod = flagutil.NewDuration("retentionPeriod", 1, "Data with timestamps outside the retentionPeriod is automatically deleted") snapshotAuthKey = flag.String("snapshotAuthKey", "", "authKey, which must be passed in query string to /snapshot* pages") forceMergeAuthKey = flag.String("forceMergeAuthKey", "", "authKey, which must be passed in query string to /internal/force_merge pages") + forceFlushAuthKey = flag.String("forceFlushAuthKey", "", "authKey, which must be passed in query string to /internal/force_flush pages") precisionBits = flag.Int("precisionBits", 64, "The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss") @@ -115,6 +116,14 @@ func AddRows(mrs []storage.MetricRow) error { return err } +// RegisterMetricNames registers all the metrics from mrs in the storage. +func RegisterMetricNames(mrs []storage.MetricRow) error { + WG.Add(1) + err := Storage.RegisterMetricNames(mrs) + WG.Done() + return err +} + // DeleteMetrics deletes metrics matching tfss. // // Returns the number of deleted metrics. @@ -125,6 +134,14 @@ func DeleteMetrics(tfss []*storage.TagFilters) (int, error) { return n, err } +// SearchMetricNames returns metric names for the given tfss on the given tr. +func SearchMetricNames(tfss []*storage.TagFilters, tr storage.TimeRange, maxMetrics int, deadline uint64) ([]storage.MetricName, error) { + WG.Add(1) + mns, err := Storage.SearchMetricNames(tfss, tr, maxMetrics, deadline) + WG.Done() + return mns, err +} + // SearchTagKeysOnTimeRange searches for tag keys on tr. func SearchTagKeysOnTimeRange(tr storage.TimeRange, maxTagKeys int, deadline uint64) ([]string, error) { WG.Add(1) @@ -226,6 +243,16 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { }() return true } + if path == "/internal/force_flush" { + authKey := r.FormValue("authKey") + if authKey != *forceFlushAuthKey { + httpserver.Errorf(w, r, "invalid authKey %q. It must match the value from -forceFlushAuthKey command line flag", authKey) + return true + } + logger.Infof("flushing storage to make pending data available for reading") + Storage.DebugFlush() + return true + } prometheusCompatibleResponse := false if path == "/api/v1/admin/tsdb/snapshot" { // Handle Prometheus API - https://prometheus.io/docs/prometheus/latest/querying/api/#snapshot . @@ -244,7 +271,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { switch path { case "/create": - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") snapshotPath, err := Storage.CreateSnapshot() if err != nil { err = fmt.Errorf("cannot create snapshot: %w", err) @@ -258,7 +285,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { } return true case "/list": - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") snapshots, err := Storage.ListSnapshots() if err != nil { err = fmt.Errorf("cannot list snapshots: %w", err) @@ -275,7 +302,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { fmt.Fprintf(w, `]}`) return true case "/delete": - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") snapshotName := r.FormValue("snapshot") if err := Storage.DeleteSnapshot(snapshotName); err != nil { err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err) @@ -285,7 +312,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { fmt.Fprintf(w, `{"status":"ok"}`) return true case "/delete_all": - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json; charset=utf-8") snapshots, err := Storage.ListSnapshots() if err != nil { err = fmt.Errorf("cannot list snapshots: %w", err) diff --git a/deployment/docker/Makefile b/deployment/docker/Makefile index 0d1eb66cd..9e60a4e3d 100644 --- a/deployment/docker/Makefile +++ b/deployment/docker/Makefile @@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics ROOT_IMAGE ?= alpine:3.12.1 CERTS_IMAGE := alpine:3.12.1 -GO_BUILDER_IMAGE := golang:1.15.4 +GO_BUILDER_IMAGE := golang:1.15.5 BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr : _) BASE_IMAGE := local/base:1.1.1-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo $(CERTS_IMAGE) | tr : _) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 812d26f60..8cd081e14 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -2,8 +2,35 @@ # tip + +# [v1.47.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.47.0) + +* FEATURE: vmselect: return the original error from `vmstorage` node in query response if `-search.denyPartialResponse` is set. + See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/891 +* FEATURE: vmselect: add `"isPartial":{true|false}` field in JSON output for `/api/v1/*` functions + from [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/). `"isPartial":true` is set if the response contains partial data + because of a part of `vmstorage` nodes were unavailable during query processing. +* FEATURE: improve performance for `/api/v1/series`, `/api/v1/labels` and `/api/v1/label//values` on time ranges exceeding one day. +* FEATURE: vmagent: reduce memory usage when service discovery detects big number of scrape targets and the set of discovered targets changes over time. + See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 * FEATURE: vmagent: add `-promscrape.dropOriginalLabels` command-line option, which can be used for reducing memory usage when scraping big number of targets. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-724308361 for details. +* FEATURE: vmalert: explicitly set extra labels to alert entities. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/870 +* FEATURE: add `-search.treatDotsAsIsInRegexps` command-line flag, which can be used for automatic escaping of dots in regexp label filters used in queries. + For example, if `-search.treatDotsAsIsInRegexps` is set, then the query `foo{bar=~"aaa.bb.cc|dd.eee"}` is automatically converted to `foo{bar=~"aaa\\.bb\\.cc|dd\\.eee"}`. + This may be useful for querying Graphite data. +* FEATURE: consistently return text-based HTTP responses such as `plain/text` and `application/json` with `charset=utf-8`. + See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/897 +* FEATURE: update Go builder from v1.15.4 to v1.15.5. This should fix [these issues in Go](https://github.com/golang/go/issues?q=milestone%3AGo1.15.5+label%3ACherryPickApproved). +* FEATURE: added `/internal/force_flush` http handler for flushing recently ingested data from in-memory buffers to persistent storage. + See [troubleshooting docs](https://victoriametrics.github.io/#troubleshooting) for more details. +* FEATURE: added [Graphite Tags API](https://graphite.readthedocs.io/en/stable/tags.html) support. + See [these docs](https://victoriametrics.github.io/#graphite-tags-api-usage) for details. + +* BUGFIX: do not return data points in the end of the selected time range for time series ending in the middle of the selected time range. + See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/887 and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/845 +* BUGFIX: remove spikes at the end of time series gaps for `increase()` or `delta()` functions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/894 +* BUGFIX: vminsert: properly return HTTP 503 status code when all the vmstorage nodes are unavailable. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896 # [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0) diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md index 8c7668dec..1214efa5b 100644 --- a/docs/Cluster-VictoriaMetrics.md +++ b/docs/Cluster-VictoriaMetrics.md @@ -181,7 +181,7 @@ or [an alternative dashboard for VictoriaMetrics cluster](https://grafana.com/gr - `prometheus/api/v1/import/csv` - for importing arbitrary CSV data. See [these docs](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-import-csv-data) for details. - `prometheus/api/v1/import/prometheus` - for importing data in Prometheus exposition format. See [these docs](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-import-data-in-prometheus-exposition-format) for details. -* URLs for [Prmetheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/): `http://:8481/select//prometheus/`, where: +* URLs for [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/): `http://:8481/select//prometheus/`, where: - `` is an arbitrary number identifying data namespace for the query (aka tenant) - `` may have the following values: - `api/v1/query` - performs [PromQL instant query](https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries). @@ -194,6 +194,8 @@ or [an alternative dashboard for VictoriaMetrics cluster](https://grafana.com/gr - `api/v1/export/native` - exports raw data in native binary format. It may be imported into another VictoriaMetrics via `api/v1/import/native` (see above). - `api/v1/export/csv` - exports data in CSV. It may be imported into another VictoriaMetrics via `api/v1/import/csv` (see above). - `api/v1/status/tsdb` - for time series stats. See [these docs](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats) for details. + VictoriaMetrics accepts optional `topN=N` and `date=YYYY-MM-DD` query args for this handler, where `N` is the number of top entries to return in the response + and `YYYY-MM-DD` is the date for collecting the stats. By default the stats is collected for the current day. - `api/v1/status/active_queries` - for currently executed active queries. Note that every `vmselect` maintains an independent list of active queries, which is returned in the response. @@ -203,6 +205,11 @@ or [an alternative dashboard for VictoriaMetrics cluster](https://grafana.com/gr - `metrics/find` - searches Graphite metrics. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find). - `metrics/expand` - expands Graphite metrics. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-expand). - `metrics/index.json` - returns all the metric names. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-index-json). + - `tags` - returns tag names. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags). + - `tags/` - returns tag values for the given ``. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags). + - `tags/findSeries` - returns series matching the given `expr`. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags). + - `tags/autoComplete/tags` - returns tags matching the given `tagPrefix` and/or `expr`. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support). + - `tags/autoComplete/values` - returns tag values matching the given `valuePrefix` and/or `expr`. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support). * URL for time series deletion: `http://:8481/delete//prometheus/api/v1/admin/tsdb/delete_series?match[]=`. Note that the `delete_series` handler should be used only in exceptional cases such as deletion of accidentally ingested incorrect time series. It shouldn't diff --git a/docs/Release-Guide.md b/docs/Release-Guide.md index 01ac033b2..24afb76ff 100644 --- a/docs/Release-Guide.md +++ b/docs/Release-Guide.md @@ -2,7 +2,7 @@ Release process guidance ## Release version and Docker images -0. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/CHANGELOG.md). +0. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md). 1. Create release tag with `git tag v1.xx.y`. 2. Run `make release` for creating `*.tar.gz` release archive with the corresponding `_checksums.txt` inside `bin` directory. 3. Run `make publish` for creating and publishing Docker images. diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index 3bd12a676..f581651eb 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -10,7 +10,7 @@ ## VictoriaMetrics -VictoriaMetrics is fast, cost-effective and scalable time-series database. +VictoriaMetrics is fast, cost-effective and scalable monitoring solution and time series database. It is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), [docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and @@ -21,11 +21,13 @@ Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaM See our [Wiki](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki) for additional documentation. [Contact us](mailto:info@victoriametrics.com) if you need paid enterprise support for VictoriaMetrics. -See [features available for enterprise customers](https://github.com/VictoriaMetrics/VictoriaMetrics/issues?q=is%3Aissue+label%3Aenterprise). +See [features available for enterprise customers](https://victoriametrics.com/enterprise.html). ## Case studies and talks +Click on a link in order to read the corresponding case study + * [Adidas](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#adidas) * [CERN](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#cern) * [COLOPL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#colopl) @@ -46,8 +48,8 @@ See [features available for enterprise customers](https://github.com/VictoriaMet * VictoriaMetrics can be used as long-term storage for Prometheus or for [vmagent](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmagent/README.md). See [these docs](#prometheus-setup) for details. * Supports [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/), so it can be used as Prometheus drop-in replacement in Grafana. - VictoriaMetrics implements [MetricsQL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL) query language, which is inspired by PromQL. -* Supports global query view. Multiple Prometheus instances may write data into VictoriaMetrics. Later this data may be used in a single query. + VictoriaMetrics implements [MetricsQL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL) query language, which inspired by PromQL. MetricsQL is backwards-compatible with PromQL. +* Supports global query view. Multiple Prometheus instances or any other data sources may write data into VictoriaMetrics. Later this data may be queried in a single query. * High performance and good scalability for both [inserts](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and [selects](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4). [Outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae). @@ -104,7 +106,9 @@ See [features available for enterprise customers](https://github.com/VictoriaMet * [How to send data from OpenTSDB-compatible agents](#how-to-send-data-from-opentsdb-compatible-agents) * [Prometheus querying API usage](#prometheus-querying-api-usage) * [Prometheus querying API enhancements](#prometheus-querying-api-enhancements) -* [Graphite Metrics API usage](#graphite-metrics-api-usage) +* [Graphite API usage](#graphite-api-usage) + * [Graphite Metrics API usage](#graphite-metrics-api-usage) + * [Graphite Tags API usage](#graphite-tags-api-usage) * [How to build from sources](#how-to-build-from-sources) * [Development build](#development-build) * [Production build](#production-build) @@ -410,6 +414,7 @@ Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read via t * [Prometheus querying API](#prometheus-querying-api-usage) * Metric names can be explored via [Graphite metrics API](#graphite-metrics-api-usage) +* Tags can be explored via [Graphite tags API](#graphite-tags-api-usage) * [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/master/cmd/carbonapi/carbonapi.example.prometheus.yaml) ### How to send data from OpenTSDB-compatible agents @@ -495,7 +500,9 @@ VictoriaMetrics supports the following handlers from [Prometheus querying API](h * [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers) * [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names) * [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values) -* [/api/v1/status/tsdb](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats) +* [/api/v1/status/tsdb](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats). VictoriaMetrics accepts optional `topN=N` and `date=YYYY-MM-DD` + query args for this handler, where `N` is the number of top entries to return in the response and `YYYY-MM-DD` is the date for collecting the stats. + By default top 10 entries are returned and the stats is collected for the current day. * [/api/v1/targets](https://prometheus.io/docs/prometheus/latest/querying/api/#targets) - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for more details. These handlers can be queried from Prometheus-compatible clients such as Grafana or curl. @@ -522,7 +529,15 @@ Additionally VictoriaMetrics provides the following handlers: * `/api/v1/status/active_queries` - it returns a list of currently running queries. -### Graphite Metrics API usage +### Graphite API usage + +VictoriaMetrics supports the following Graphite APIs: + +* Metrics API - see [these docs](#graphite-metrics-api-usage). +* Tags API - see [these docs](#graphite-tags-api-usage). + + +#### Graphite Metrics API usage VictoriaMetrics supports the following handlers from [Graphite Metrics API](https://graphite-api.readthedocs.io/en/latest/api.html#the-metrics-api): @@ -536,6 +551,19 @@ VictoriaMetrics accepts the following additional query args at `/metrics/find` a that start with `node_`. By default `delimiter=.`. +#### Graphite Tags API usage + +VictoriaMetrics supports the following handlers from [Graphite Tags API](https://graphite.readthedocs.io/en/stable/tags.html): + +* [/tags/tagSeries](https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb) +* [/tags/tagMultiSeries](https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb) +* [/tags](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags) +* [/tags/tag_name](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags) +* [/tags/findSeries](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags) +* [/tags/autoComplete/tags](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support) +* [/tags/autoComplete/values](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support) + + ### How to build from sources We recommend using either [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or @@ -695,7 +723,16 @@ VictoriaMetrics provides the following handlers for exporting data: Send a request to `http://:8428/api/v1/export/native?match[]=`, where `` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) -for metrics to export. Use `{__name__!=""}` selector for fetching all the time series. +for metrics to export. Use `{__name__=~".*"}` selector for fetching all the time series. + +On large databases you may experience problems with limit on unique timeseries (default value is 300000). In this case you need to adjust `-search.maxUniqueTimeseries` parameter: + +```bash +# count unique timeseries in database +wget -O- -q 'http://your_victoriametrics_instance:8428/api/v1/series/count' | jq '.data[0]' + +# relaunch victoriametrics with search.maxUniqueTimeseries more than value from previous command +``` Optional `start` and `end` args may be added to the request in order to limit the time frame for the exported data. These args may contain either unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values. @@ -1196,13 +1233,18 @@ VictoriaMetrics also exposes currently running queries with their execution time * It is recommended inspecting logs during troubleshooting, since they may contain useful information. +* VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage. + This may lead to the following "issues": + * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage + by requesting `/internal/force_flush` http handler. + * The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset). + See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704). + * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, then it is likely you have too many active time series for the current amount of RAM. VictoriaMetrics [exposes](#monitoring) `vm_slow_*` metrics, which could be used as an indicator of low amounts of RAM. It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve ingestion and query performance in this case. - Another option is to increase `-memory.allowedPercent` command-line flag value. Be careful with this - option, since too big value for `-memory.allowedPercent` may result in high I/O usage. * VictoriaMetrics prioritizes data ingestion over data querying. So if it has no enough resources for data ingestion, then data querying may slow down significantly. @@ -1217,9 +1259,9 @@ VictoriaMetrics also exposes currently running queries with their execution time which would start background merge if they had more free disk space. * If VictoriaMetrics doesn't work because of certain parts are corrupted due to disk errors, - then just remove directories with broken parts. This will recover VictoriaMetrics at the cost - of data loss stored in the broken parts. In the future, `vmrecover` tool will be created - for automatic recovering from such errors. + then just remove directories with broken parts. It is safe removing subdirectories under `<-storageDataPath>/data/{big,small}/YYYY_MM` directories + when VictoriaMetrics isn't running. This recovers VictoriaMetrics at the cost of data loss stored in the deleted broken parts. + In the future, `vmrecover` tool will be created for automatic recovering from such errors. * If you see gaps on the graphs, try resetting the cache by sending request to `/internal/resetRollupResultCache`. If this removes gaps on the graphs, then it is likely data with timestamps older than `-search.cacheTimestampOffset` @@ -1241,6 +1283,11 @@ VictoriaMetrics also exposes currently running queries with their execution time This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total` metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload. +* If you store Graphite metrics like `foo.bar.baz` in VictoriaMetrics, then `-search.treatDotsAsIsInRegexps` command-line flag could be useful. + By default `.` chars in regexps match any char. If you need matching only dots, then the `\\.` must be used in regexp filters. + When `-search.treatDotsAsIsInRegexps` option is enabled, then dots in regexps are automatically escaped in order to match only dots instead of arbitrary chars. + This may significantly increase performance when locating time series for the given label filters. + * VictoriaMetrics ignores `NaN` values during data ingestion. diff --git a/docs/vmagent.md b/docs/vmagent.md index b21928d40..d9ad2772e 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -63,6 +63,22 @@ Then send Influx data to `http://vmagent-host:8429`. See [these docs](https://gi Pass `-help` to `vmagent` in order to see the full list of supported command-line flags with their descriptions. +### Configuration update + +`vmagent` should be restarted in order to update config options set via command-line args. + +`vmagent` supports multiple approaches for reloading configs from updated config files such as `-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`: + +* Sending `SUGHUP` signal to `vmagent` process: + ```bash + kill -SIGHUP `pidof vmagent` + ``` + +* Sending HTTP request to `http://vmagent:8429/-/reload` endpoint. + +There is also `-promscrape.configCheckInterval` command-line option, which can be used for automatic reloading configs from updated `-promscrape.config` file. + + ### Use cases @@ -197,6 +213,7 @@ The relabeling can be defined in the following places: Read more about relabeling in the following articles: +* [How to use Relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2) * [Life of a label](https://www.robustperception.io/life-of-a-label) * [Discarding targets and timeseries with relabeling](https://www.robustperception.io/relabelling-can-discard-targets-timeseries-and-alerts) * [Dropping labels at scrape time](https://www.robustperception.io/dropping-metrics-at-scrape-time-with-prometheus) diff --git a/go.mod b/go.mod index 1a9e48bc2..290536faf 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics require ( - cloud.google.com/go v0.71.0 // indirect + cloud.google.com/go v0.72.0 // indirect cloud.google.com/go/storage v1.12.0 github.com/VictoriaMetrics/fastcache v1.5.7 @@ -10,12 +10,13 @@ require ( github.com/VictoriaMetrics/fasthttp v1.0.7 github.com/VictoriaMetrics/metrics v1.12.3 github.com/VictoriaMetrics/metricsql v0.7.2 - github.com/aws/aws-sdk-go v1.35.23 + github.com/aws/aws-sdk-go v1.35.28 github.com/cespare/xxhash/v2 v2.1.1 github.com/go-kit/kit v0.10.0 github.com/golang/snappy v0.0.2 - github.com/klauspost/compress v1.11.2 + github.com/klauspost/compress v1.11.3 github.com/prometheus/client_golang v1.8.0 // indirect + github.com/prometheus/common v0.15.0 // indirect github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9 github.com/valyala/fastjson v1.6.1 github.com/valyala/fastrand v1.0.0 @@ -23,15 +24,13 @@ require ( github.com/valyala/gozstd v1.8.3 github.com/valyala/histogram v1.1.2 github.com/valyala/quicktemplate v1.6.3 - golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 // indirect - golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 - golang.org/x/sys v0.0.0-20201106081118-db71ae66460a - golang.org/x/text v0.3.4 // indirect - golang.org/x/tools v0.0.0-20201105220310-78b158585360 // indirect + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b // indirect + golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 + golang.org/x/sys v0.0.0-20201116161645-c061ba923fbb + golang.org/x/tools v0.0.0-20201116182000-1d699438d2cf // indirect google.golang.org/api v0.35.0 google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20201106154455-f9bfe239b0ba // indirect - google.golang.org/grpc v1.33.2 // indirect + google.golang.org/genproto v0.0.0-20201116144945-7adebfbe6a3f // indirect gopkg.in/yaml.v2 v2.3.0 ) diff --git a/go.sum b/go.sum index 588af6be8..a8c8c9c48 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= -cloud.google.com/go v0.71.0 h1:2ha722Z08cmRa0orJrzBaszYQcLbLFcsZHsGSj/kIF4= -cloud.google.com/go v0.71.0/go.mod h1:qZfY4Y7AEIQwG/fQYD3xrxLNkQZ0Xzf3HGeqCkA6LVM= +cloud.google.com/go v0.72.0 h1:eWRCuwubtDrCJG0oSUMgnsbD4CmPFQF2ei4OFbXvwww= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -116,8 +116,8 @@ github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:o github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/aws/aws-sdk-go v1.35.23 h1:SCP0d0XvyJTDmfnHEQPvBaYi3kea1VNUo7uQmkVgFts= -github.com/aws/aws-sdk-go v1.35.23/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go v1.35.28 h1:S2LuRnfC8X05zgZLC8gy/Sb82TGv2Cpytzbzz7tkeHc= +github.com/aws/aws-sdk-go v1.35.28/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -464,8 +464,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.2 h1:MiK62aErc3gIiVEtyzKfeOHgW7atJb5g/KNX5m3c2nQ= -github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8h3jc= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -614,8 +614,9 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4= github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -837,16 +838,17 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 h1:42cLlJJdEh+ySyeUUbEQ5bsTiq8voBeTuweGVkY6Puw= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 h1:Mj83v+wSRNEar42a/MQgxk9X42TdEmrOl9i+y8WbxLo= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -920,8 +922,8 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201106081118-db71ae66460a h1:ALUFBKlIyeY7y5ZgPJmblk/vKz+zBQSnNiPkt41sgeg= -golang.org/x/sys v0.0.0-20201106081118-db71ae66460a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201116161645-c061ba923fbb h1:+EHGEcgeA7ESswi5i4ojbo7sRzlz7vWoxFGcMuEZtu8= +golang.org/x/sys v0.0.0-20201116161645-c061ba923fbb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1001,9 +1003,9 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201105220310-78b158585360 h1:/9CzsU8hOpnSUCtem1vfWNgsVeCTgkMdx+VE5YIYxnU= -golang.org/x/tools v0.0.0-20201105220310-78b158585360/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201116182000-1d699438d2cf h1:sDQg8i3k24bqfv1V4MugOhRCHMRzkrHdLJX5QraRSt4= +golang.org/x/tools v0.0.0-20201116182000-1d699438d2cf/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1034,8 +1036,6 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.34.0 h1:k40adF3uR+6x/+hO5Dh4ZFUqFp67vxvbpafFiJxl10A= -google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1084,9 +1084,9 @@ google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201106154455-f9bfe239b0ba h1:HocWKLuilwaaLY56cHV38rw84wJ1nscA0Rs7OnO8mm8= -google.golang.org/genproto v0.0.0-20201106154455-f9bfe239b0ba/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201116144945-7adebfbe6a3f h1:YZKfGrT39pgYIg+3cfyIdK1z4VLjUPVboS1Ob49DyDA= +google.golang.org/genproto v0.0.0-20201116144945-7adebfbe6a3f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1106,8 +1106,6 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/lib/httpserver/httpserver.go b/lib/httpserver/httpserver.go index 1cf52c77a..daa8c8e59 100644 --- a/lib/httpserver/httpserver.go +++ b/lib/httpserver/httpserver.go @@ -208,7 +208,7 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques r.URL.Path = path switch r.URL.Path { case "/health": - w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Type", "text/plain; charset=utf-8") deadline := atomic.LoadInt64(&s.shutdownDelayDeadline) if deadline <= 0 { w.Write([]byte("OK")) @@ -244,7 +244,7 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques return } startTime := time.Now() - w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Type", "text/plain; charset=utf-8") WritePrometheusMetrics(w) metricsHandlerDuration.UpdateDuration(startTime) return @@ -395,7 +395,7 @@ func (zrw *gzipResponseWriter) Write(p []byte) (int, error) { if h.Get("Content-Type") == "" { // Disable auto-detection of content-type, since it // is incorrectly detected after the compression. - h.Set("Content-Type", "text/html") + h.Set("Content-Type", "text/html; charset=utf-8") } } zrw.writeHeader() diff --git a/lib/promauth/config.go b/lib/promauth/config.go index 838b42be6..184f872bf 100644 --- a/lib/promauth/config.go +++ b/lib/promauth/config.go @@ -13,18 +13,18 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config type TLSConfig struct { - CAFile string `yaml:"ca_file"` - CertFile string `yaml:"cert_file"` - KeyFile string `yaml:"key_file"` - ServerName string `yaml:"server_name"` - InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + CAFile string `yaml:"ca_file,omitempty"` + CertFile string `yaml:"cert_file,omitempty"` + KeyFile string `yaml:"key_file,omitempty"` + ServerName string `yaml:"server_name,omitempty"` + InsecureSkipVerify bool `yaml:"insecure_skip_verify,omitempty"` } // BasicAuthConfig represents basic auth config. type BasicAuthConfig struct { Username string `yaml:"username"` - Password string `yaml:"password"` - PasswordFile string `yaml:"password_file"` + Password string `yaml:"password,omitempty"` + PasswordFile string `yaml:"password_file,omitempty"` } // Config is auth config. diff --git a/lib/promrelabel/config.go b/lib/promrelabel/config.go index 911eea2a3..abd9419a4 100644 --- a/lib/promrelabel/config.go +++ b/lib/promrelabel/config.go @@ -14,13 +14,13 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config type RelabelConfig struct { - SourceLabels []string `yaml:"source_labels"` - Separator *string `yaml:"separator"` - TargetLabel string `yaml:"target_label"` - Regex *string `yaml:"regex"` - Modulus uint64 `yaml:"modulus"` - Replacement *string `yaml:"replacement"` - Action string `yaml:"action"` + SourceLabels []string `yaml:"source_labels,flow,omitempty"` + Separator *string `yaml:"separator,omitempty"` + TargetLabel string `yaml:"target_label,omitempty"` + Regex *string `yaml:"regex,omitempty"` + Modulus uint64 `yaml:"modulus,omitempty"` + Replacement *string `yaml:"replacement,omitempty"` + Action string `yaml:"action,omitempty"` } // LoadRelabelConfigs loads relabel configs from the given path. diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index 6d2f1ea63..9ada106c6 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -50,9 +50,9 @@ type Config struct { // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/ type GlobalConfig struct { - ScrapeInterval time.Duration `yaml:"scrape_interval"` - ScrapeTimeout time.Duration `yaml:"scrape_timeout"` - ExternalLabels map[string]string `yaml:"external_labels"` + ScrapeInterval time.Duration `yaml:"scrape_interval,omitempty"` + ScrapeTimeout time.Duration `yaml:"scrape_timeout,omitempty"` + ExternalLabels map[string]string `yaml:"external_labels,omitempty"` } // ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config. @@ -60,34 +60,34 @@ type GlobalConfig struct { // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config type ScrapeConfig struct { JobName string `yaml:"job_name"` - ScrapeInterval time.Duration `yaml:"scrape_interval"` - ScrapeTimeout time.Duration `yaml:"scrape_timeout"` - MetricsPath string `yaml:"metrics_path"` - HonorLabels bool `yaml:"honor_labels"` - HonorTimestamps bool `yaml:"honor_timestamps"` - Scheme string `yaml:"scheme"` - Params map[string][]string `yaml:"params"` - BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth"` - BearerToken string `yaml:"bearer_token"` - BearerTokenFile string `yaml:"bearer_token_file"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config"` - StaticConfigs []StaticConfig `yaml:"static_configs"` - FileSDConfigs []FileSDConfig `yaml:"file_sd_configs"` - KubernetesSDConfigs []kubernetes.SDConfig `yaml:"kubernetes_sd_configs"` - OpenStackSDConfigs []openstack.SDConfig `yaml:"openstack_sd_configs"` - ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs"` - DockerSwarmConfigs []dockerswarm.SDConfig `yaml:"dockerswarm_sd_configs"` - DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs"` - EC2SDConfigs []ec2.SDConfig `yaml:"ec2_sd_configs"` - GCESDConfigs []gce.SDConfig `yaml:"gce_sd_configs"` - RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs"` - MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs"` - SampleLimit int `yaml:"sample_limit"` + ScrapeInterval time.Duration `yaml:"scrape_interval,omitempty"` + ScrapeTimeout time.Duration `yaml:"scrape_timeout,omitempty"` + MetricsPath string `yaml:"metrics_path,omitempty"` + HonorLabels bool `yaml:"honor_labels,omitempty"` + HonorTimestamps bool `yaml:"honor_timestamps,omitempty"` + Scheme string `yaml:"scheme,omitempty"` + Params map[string][]string `yaml:"params,omitempty"` + BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth,omitempty"` + BearerToken string `yaml:"bearer_token,omitempty"` + BearerTokenFile string `yaml:"bearer_token_file,omitempty"` + TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` + StaticConfigs []StaticConfig `yaml:"static_configs,omitempty"` + FileSDConfigs []FileSDConfig `yaml:"file_sd_configs,omitempty"` + KubernetesSDConfigs []kubernetes.SDConfig `yaml:"kubernetes_sd_configs,omitempty"` + OpenStackSDConfigs []openstack.SDConfig `yaml:"openstack_sd_configs,omitempty"` + ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"` + DockerSwarmConfigs []dockerswarm.SDConfig `yaml:"dockerswarm_sd_configs,omitempty"` + DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"` + EC2SDConfigs []ec2.SDConfig `yaml:"ec2_sd_configs,omitempty"` + GCESDConfigs []gce.SDConfig `yaml:"gce_sd_configs,omitempty"` + RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"` + MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs,omitempty"` + SampleLimit int `yaml:"sample_limit,omitempty"` // These options are supported only by lib/promscrape. - DisableCompression bool `yaml:"disable_compression"` - DisableKeepAlive bool `yaml:"disable_keepalive"` - StreamParse bool `yaml:"stream_parse"` + DisableCompression bool `yaml:"disable_compression,omitempty"` + DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"` + StreamParse bool `yaml:"stream_parse,omitempty"` // This is set in loadConfig swc *scrapeWorkConfig @@ -106,7 +106,7 @@ type FileSDConfig struct { // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config type StaticConfig struct { Targets []string `yaml:"targets"` - Labels map[string]string `yaml:"labels"` + Labels map[string]string `yaml:"labels,omitempty"` } func loadStaticConfigs(path string) ([]StaticConfig, error) { @@ -184,7 +184,7 @@ func getSWSByJob(sws []ScrapeWork) map[string][]ScrapeWork { // getKubernetesSDScrapeWork returns `kubernetes_sd_configs` ScrapeWork from cfg. func (cfg *Config) getKubernetesSDScrapeWork(prev []ScrapeWork) []ScrapeWork { swsPrevByJob := getSWSByJob(prev) - var dst []ScrapeWork + dst := make([]ScrapeWork, 0, len(prev)) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] dstLen := len(dst) @@ -212,7 +212,7 @@ func (cfg *Config) getKubernetesSDScrapeWork(prev []ScrapeWork) []ScrapeWork { // getOpenStackSDScrapeWork returns `openstack_sd_configs` ScrapeWork from cfg. func (cfg *Config) getOpenStackSDScrapeWork(prev []ScrapeWork) []ScrapeWork { swsPrevByJob := getSWSByJob(prev) - var dst []ScrapeWork + dst := make([]ScrapeWork, 0, len(prev)) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] dstLen := len(dst) @@ -240,7 +240,7 @@ func (cfg *Config) getOpenStackSDScrapeWork(prev []ScrapeWork) []ScrapeWork { // getDockerSwarmSDScrapeWork returns `dockerswarm_sd_configs` ScrapeWork from cfg. func (cfg *Config) getDockerSwarmSDScrapeWork(prev []ScrapeWork) []ScrapeWork { swsPrevByJob := getSWSByJob(prev) - var dst []ScrapeWork + dst := make([]ScrapeWork, 0, len(prev)) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] dstLen := len(dst) @@ -268,7 +268,7 @@ func (cfg *Config) getDockerSwarmSDScrapeWork(prev []ScrapeWork) []ScrapeWork { // getConsulSDScrapeWork returns `consul_sd_configs` ScrapeWork from cfg. func (cfg *Config) getConsulSDScrapeWork(prev []ScrapeWork) []ScrapeWork { swsPrevByJob := getSWSByJob(prev) - var dst []ScrapeWork + dst := make([]ScrapeWork, 0, len(prev)) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] dstLen := len(dst) @@ -296,7 +296,7 @@ func (cfg *Config) getConsulSDScrapeWork(prev []ScrapeWork) []ScrapeWork { // getDNSSDScrapeWork returns `dns_sd_configs` ScrapeWork from cfg. func (cfg *Config) getDNSSDScrapeWork(prev []ScrapeWork) []ScrapeWork { swsPrevByJob := getSWSByJob(prev) - var dst []ScrapeWork + dst := make([]ScrapeWork, 0, len(prev)) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] dstLen := len(dst) @@ -324,7 +324,7 @@ func (cfg *Config) getDNSSDScrapeWork(prev []ScrapeWork) []ScrapeWork { // getEC2SDScrapeWork returns `ec2_sd_configs` ScrapeWork from cfg. func (cfg *Config) getEC2SDScrapeWork(prev []ScrapeWork) []ScrapeWork { swsPrevByJob := getSWSByJob(prev) - var dst []ScrapeWork + dst := make([]ScrapeWork, 0, len(prev)) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] dstLen := len(dst) @@ -352,7 +352,7 @@ func (cfg *Config) getEC2SDScrapeWork(prev []ScrapeWork) []ScrapeWork { // getGCESDScrapeWork returns `gce_sd_configs` ScrapeWork from cfg. func (cfg *Config) getGCESDScrapeWork(prev []ScrapeWork) []ScrapeWork { swsPrevByJob := getSWSByJob(prev) - var dst []ScrapeWork + dst := make([]ScrapeWork, 0, len(prev)) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] dstLen := len(dst) @@ -390,7 +390,7 @@ func (cfg *Config) getFileSDScrapeWork(prev []ScrapeWork) []ScrapeWork { swsMapPrev[filepath] = append(swsMapPrev[filepath], *sw) } } - var dst []ScrapeWork + dst := make([]ScrapeWork, 0, len(prev)) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] for j := range sc.FileSDConfigs { diff --git a/lib/promscrape/config_test.go b/lib/promscrape/config_test.go index 380c481e7..b215359bc 100644 --- a/lib/promscrape/config_test.go +++ b/lib/promscrape/config_test.go @@ -475,7 +475,7 @@ scrape_configs: - job_name: foo static_configs: - targets: ["xxx"] -`, nil) +`, []ScrapeWork{}) f(` scrape_configs: - job_name: foo diff --git a/lib/promscrape/discovery/consul/consul.go b/lib/promscrape/discovery/consul/consul.go index 25b8c501a..735190839 100644 --- a/lib/promscrape/discovery/consul/consul.go +++ b/lib/promscrape/discovery/consul/consul.go @@ -10,18 +10,18 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config type SDConfig struct { - Server string `yaml:"server"` + Server string `yaml:"server,omitempty"` Token *string `yaml:"token"` Datacenter string `yaml:"datacenter"` - Scheme string `yaml:"scheme"` + Scheme string `yaml:"scheme,omitempty"` Username string `yaml:"username"` Password string `yaml:"password"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config"` - Services []string `yaml:"services"` - Tags []string `yaml:"tags"` - NodeMeta map[string]string `yaml:"node_meta"` - TagSeparator *string `yaml:"tag_separator"` - AllowStale bool `yaml:"allow_stale"` + TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` + Services []string `yaml:"services,omitempty"` + Tags []string `yaml:"tags,omitempty"` + NodeMeta map[string]string `yaml:"node_meta,omitempty"` + TagSeparator *string `yaml:"tag_separator,omitempty"` + AllowStale bool `yaml:"allow_stale,omitempty"` // RefreshInterval time.Duration `yaml:"refresh_interval"` // refresh_interval is obtained from `-promscrape.consulSDCheckInterval` command-line option. } diff --git a/lib/promscrape/discovery/dns/dns.go b/lib/promscrape/discovery/dns/dns.go index 03615b3c0..40d8c580d 100644 --- a/lib/promscrape/discovery/dns/dns.go +++ b/lib/promscrape/discovery/dns/dns.go @@ -17,8 +17,8 @@ import ( // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config type SDConfig struct { Names []string `yaml:"names"` - Type string `yaml:"type"` - Port *int `yaml:"port"` + Type string `yaml:"type,omitempty"` + Port *int `yaml:"port,omitempty"` // RefreshInterval time.Duration `yaml:"refresh_interval"` // refresh_interval is obtained from `-promscrape.dnsSDCheckInterval` command-line option. } diff --git a/lib/promscrape/discovery/dockerswarm/dockerswarm.go b/lib/promscrape/discovery/dockerswarm/dockerswarm.go index 59baecbe9..3d49365a3 100644 --- a/lib/promscrape/discovery/dockerswarm/dockerswarm.go +++ b/lib/promscrape/discovery/dockerswarm/dockerswarm.go @@ -12,13 +12,13 @@ import ( type SDConfig struct { Host string `yaml:"host"` // TODO: add support for proxy_url - TLSConfig *promauth.TLSConfig `yaml:"tls_config"` + TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` Role string `yaml:"role"` - Port int `yaml:"port"` + Port int `yaml:"port,omitempty"` // refresh_interval is obtained from `-promscrape.dockerswarmSDCheckInterval` command-line option - BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth"` - BearerToken string `yaml:"bearer_token"` - BearerTokenFile string `yaml:"bearer_token_file"` + BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth,omitempty"` + BearerToken string `yaml:"bearer_token,omitempty"` + BearerTokenFile string `yaml:"bearer_token_file,omitempty"` } // GetLabels returns dockerswarm labels according to sdc. diff --git a/lib/promscrape/discovery/ec2/ec2.go b/lib/promscrape/discovery/ec2/ec2.go index c81d790b0..8ffa1697e 100644 --- a/lib/promscrape/discovery/ec2/ec2.go +++ b/lib/promscrape/discovery/ec2/ec2.go @@ -8,17 +8,17 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config type SDConfig struct { - Region string `yaml:"region"` - Endpoint string `yaml:"endpoint"` - AccessKey string `yaml:"access_key"` - SecretKey string `yaml:"secret_key"` + Region string `yaml:"region,omitempty"` + Endpoint string `yaml:"endpoint,omitempty"` + AccessKey string `yaml:"access_key,omitempty"` + SecretKey string `yaml:"secret_key,omitempty"` // TODO add support for Profile, not working atm - Profile string `yaml:"profile"` - RoleARN string `yaml:"role_arn"` + Profile string `yaml:"profile,omitempty"` + RoleARN string `yaml:"role_arn,omitempty"` // RefreshInterval time.Duration `yaml:"refresh_interval"` // refresh_interval is obtained from `-promscrape.ec2SDCheckInterval` command-line option. - Port *int `yaml:"port"` - Filters []Filter `yaml:"filters"` + Port *int `yaml:"port,omitempty"` + Filters []Filter `yaml:"filters,omitempty"` } // Filter is ec2 filter. diff --git a/lib/promscrape/discovery/gce/gce.go b/lib/promscrape/discovery/gce/gce.go index 9227fe813..f0629d2ef 100644 --- a/lib/promscrape/discovery/gce/gce.go +++ b/lib/promscrape/discovery/gce/gce.go @@ -10,11 +10,11 @@ import ( type SDConfig struct { Project string `yaml:"project"` Zone ZoneYAML `yaml:"zone"` - Filter string `yaml:"filter"` + Filter string `yaml:"filter,omitempty"` // RefreshInterval time.Duration `yaml:"refresh_interval"` // refresh_interval is obtained from `-promscrape.gceSDCheckInterval` command-line option. - Port *int `yaml:"port"` - TagSeparator *string `yaml:"tag_separator"` + Port *int `yaml:"port,omitempty"` + TagSeparator *string `yaml:"tag_separator,omitempty"` } // ZoneYAML holds info about zones. diff --git a/lib/promscrape/discovery/kubernetes/kubernetes.go b/lib/promscrape/discovery/kubernetes/kubernetes.go index 1b8d76809..87c381963 100644 --- a/lib/promscrape/discovery/kubernetes/kubernetes.go +++ b/lib/promscrape/discovery/kubernetes/kubernetes.go @@ -10,14 +10,14 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config type SDConfig struct { - APIServer string `yaml:"api_server"` + APIServer string `yaml:"api_server,omitempty"` Role string `yaml:"role"` - BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth"` - BearerToken string `yaml:"bearer_token"` - BearerTokenFile string `yaml:"bearer_token_file"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config"` - Namespaces Namespaces `yaml:"namespaces"` - Selectors []Selector `yaml:"selectors"` + BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth,omitempty"` + BearerToken string `yaml:"bearer_token,omitempty"` + BearerTokenFile string `yaml:"bearer_token_file,omitempty"` + TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` + Namespaces Namespaces `yaml:"namespaces,omitempty"` + Selectors []Selector `yaml:"selectors,omitempty"` } // Namespaces represents namespaces for SDConfig diff --git a/lib/promscrape/discovery/openstack/openstack.go b/lib/promscrape/discovery/openstack/openstack.go index 52f0822e3..0a1e9bd1d 100644 --- a/lib/promscrape/discovery/openstack/openstack.go +++ b/lib/promscrape/discovery/openstack/openstack.go @@ -10,25 +10,25 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config type SDConfig struct { - IdentityEndpoint string `yaml:"identity_endpoint"` - Username string `yaml:"username"` - UserID string `yaml:"userid"` - Password string `yaml:"password"` - ProjectName string `yaml:"project_name"` - ProjectID string `yaml:"project_id"` - DomainName string `yaml:"domain_name"` - DomainID string `yaml:"domain_id"` - ApplicationCredentialName string `yaml:"application_credential_name"` - ApplicationCredentialID string `yaml:"application_credential_id"` - ApplicationCredentialSecret string `yaml:"application_credential_secret"` + IdentityEndpoint string `yaml:"identity_endpoint,omitempty"` + Username string `yaml:"username,omitempty"` + UserID string `yaml:"userid,omitempty"` + Password string `yaml:"password,omitempty"` + ProjectName string `yaml:"project_name,omitempty"` + ProjectID string `yaml:"project_id,omitempty"` + DomainName string `yaml:"domain_name,omitempty"` + DomainID string `yaml:"domain_id,omitempty"` + ApplicationCredentialName string `yaml:"application_credential_name,omitempty"` + ApplicationCredentialID string `yaml:"application_credential_id,omitempty"` + ApplicationCredentialSecret string `yaml:"application_credential_secret,omitempty"` Role string `yaml:"role"` Region string `yaml:"region"` // RefreshInterval time.Duration `yaml:"refresh_interval"` // refresh_interval is obtained from `-promscrape.openstackSDCheckInterval` command-line option. - Port int `yaml:"port"` - AllTenants bool `yaml:"all_tenants"` - TLSConfig *promauth.TLSConfig `yaml:"tls_config"` - Availability string `yaml:"availability"` + Port int `yaml:"port,omitempty"` + AllTenants bool `yaml:"all_tenants,omitempty"` + TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` + Availability string `yaml:"availability,omitempty"` } // GetLabels returns gce labels according to sdc. diff --git a/lib/promscrape/targetstatus.go b/lib/promscrape/targetstatus.go index cb4c8c0af..6479d581c 100644 --- a/lib/promscrape/targetstatus.go +++ b/lib/promscrape/targetstatus.go @@ -64,7 +64,7 @@ func (tsm *targetStatusMap) Reset() { func (tsm *targetStatusMap) Register(sw *ScrapeWork) { tsm.mu.Lock() tsm.m[sw.ID] = targetStatus{ - sw: sw, + sw: *sw, } tsm.mu.Unlock() } @@ -78,7 +78,7 @@ func (tsm *targetStatusMap) Unregister(sw *ScrapeWork) { func (tsm *targetStatusMap) Update(sw *ScrapeWork, group string, up bool, scrapeTime, scrapeDuration int64, err error) { tsm.mu.Lock() tsm.m[sw.ID] = targetStatus{ - sw: sw, + sw: *sw, up: up, scrapeGroup: group, scrapeTime: scrapeTime, @@ -221,7 +221,7 @@ type jobStatus struct { } type targetStatus struct { - sw *ScrapeWork + sw ScrapeWork up bool scrapeGroup string scrapeTime int64 diff --git a/lib/protoparser/csvimport/streamparser.go b/lib/protoparser/csvimport/streamparser.go index 027ecc458..0b318775a 100644 --- a/lib/protoparser/csvimport/streamparser.go +++ b/lib/protoparser/csvimport/streamparser.go @@ -11,7 +11,6 @@ import ( "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/metrics" ) @@ -24,7 +23,6 @@ var ( // ParseStream parses csv from req and calls callback for the parsed rows. // // The callback can be called concurrently multiple times for streamed data from req. -// The callback can be called after ParseStream returns. // // callback shouldn't hold rows after returning. func ParseStream(req *http.Request, callback func(rows []Row) error) error { @@ -47,12 +45,26 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error { defer putStreamContext(ctx) for ctx.Read() { uw := getUnmarshalWork() - uw.callback = callback + uw.callback = func(rows []Row) { + if err := callback(rows); err != nil { + ctx.callbackErrLock.Lock() + if ctx.callbackErr == nil { + ctx.callbackErr = fmt.Errorf("error when processing imported data: %w", err) + } + ctx.callbackErrLock.Unlock() + } + ctx.wg.Done() + } uw.cds = cds uw.reqBuf, ctx.reqBuf = ctx.reqBuf, uw.reqBuf + ctx.wg.Add(1) common.ScheduleUnmarshalWork(uw) } - return ctx.Error() + ctx.wg.Wait() + if err := ctx.Error(); err != nil { + return err + } + return ctx.callbackErr } func (ctx *streamContext) Read() bool { @@ -82,6 +94,10 @@ type streamContext struct { reqBuf []byte tailBuf []byte err error + + wg sync.WaitGroup + callbackErrLock sync.Mutex + callbackErr error } func (ctx *streamContext) Error() error { @@ -96,6 +112,7 @@ func (ctx *streamContext) reset() { ctx.reqBuf = ctx.reqBuf[:0] ctx.tailBuf = ctx.tailBuf[:0] ctx.err = nil + ctx.callbackErr = nil } func getStreamContext(r io.Reader) *streamContext { @@ -129,7 +146,7 @@ var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) type unmarshalWork struct { rows Rows - callback func(rows []Row) error + callback func(rows []Row) cds []ColumnDescriptor reqBuf []byte } @@ -164,11 +181,7 @@ func (uw *unmarshalWork) Unmarshal() { } } - if err := uw.callback(rows); err != nil { - logger.Errorf("error when processing imported data: %s", err) - putUnmarshalWork(uw) - return - } + uw.callback(rows) putUnmarshalWork(uw) } diff --git a/lib/protoparser/graphite/parser.go b/lib/protoparser/graphite/parser.go index e679f9524..c2e20b3d5 100644 --- a/lib/protoparser/graphite/parser.go +++ b/lib/protoparser/graphite/parser.go @@ -55,6 +55,33 @@ func (r *Row) reset() { r.Timestamp = 0 } +// UnmarshalMetricAndTags unmarshals metric and optional tags from s. +func (r *Row) UnmarshalMetricAndTags(s string, tagsPool []Tag) ([]Tag, error) { + if strings.Contains(s, " ") { + return tagsPool, fmt.Errorf("unexpected whitespace found in %q", s) + } + n := strings.IndexByte(s, ';') + if n < 0 { + // No tags + r.Metric = s + } else { + // Tags found + r.Metric = s[:n] + tagsStart := len(tagsPool) + var err error + tagsPool, err = unmarshalTags(tagsPool, s[n+1:]) + if err != nil { + return tagsPool, fmt.Errorf("cannot umarshal tags: %w", err) + } + tags := tagsPool[tagsStart:] + r.Tags = tags[:len(tags):len(tags)] + } + if len(r.Metric) == 0 { + return tagsPool, fmt.Errorf("metric cannot be empty") + } + return tagsPool, nil +} + func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) { r.reset() n := strings.IndexByte(s, ' ') @@ -64,24 +91,9 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) { metricAndTags := s[:n] tail := s[n+1:] - n = strings.IndexByte(metricAndTags, ';') - if n < 0 { - // No tags - r.Metric = metricAndTags - } else { - // Tags found - r.Metric = metricAndTags[:n] - tagsStart := len(tagsPool) - var err error - tagsPool, err = unmarshalTags(tagsPool, metricAndTags[n+1:]) - if err != nil { - return tagsPool, fmt.Errorf("cannot umarshal tags: %w", err) - } - tags := tagsPool[tagsStart:] - r.Tags = tags[:len(tags):len(tags)] - } - if len(r.Metric) == 0 { - return tagsPool, fmt.Errorf("metric cannot be empty") + tagsPool, err := r.UnmarshalMetricAndTags(metricAndTags, tagsPool) + if err != nil { + return tagsPool, err } n = strings.IndexByte(tail, ' ') diff --git a/lib/protoparser/graphite/parser_test.go b/lib/protoparser/graphite/parser_test.go index 6217adfc3..b04314f1a 100644 --- a/lib/protoparser/graphite/parser_test.go +++ b/lib/protoparser/graphite/parser_test.go @@ -7,6 +7,57 @@ import ( "testing" ) +func TestUnmarshalMetricAndTagsFailure(t *testing.T) { + f := func(s string) { + t.Helper() + var r Row + _, err := r.UnmarshalMetricAndTags(s, nil) + if err == nil { + t.Fatalf("expecting non-nil error for UnmarshalMetricAndTags(%q)", s) + } + } + f("") + f(";foo=bar") + f(" ") + f("foo;bar") + f("foo ;bar=baz") + f("f oo;bar=baz") + f("foo;bar=baz ") + f("foo;bar= baz") + f("foo;bar=b az") + f("foo;b ar=baz") +} + +func TestUnmarshalMetricAndTagsSuccess(t *testing.T) { + f := func(s string, rExpected *Row) { + t.Helper() + var r Row + _, err := r.UnmarshalMetricAndTags(s, nil) + if err != nil { + t.Fatalf("unexpected error in UnmarshalMetricAndTags(%q): %s", s, err) + } + if !reflect.DeepEqual(&r, rExpected) { + t.Fatalf("unexpected row;\ngot\n%+v\nwant\n%+v", &r, rExpected) + } + } + f("foo", &Row{ + Metric: "foo", + }) + f("foo;bar=123;baz=aabb", &Row{ + Metric: "foo", + Tags: []Tag{ + { + Key: "bar", + Value: "123", + }, + { + Key: "baz", + Value: "aabb", + }, + }, + }) +} + func TestRowsUnmarshalFailure(t *testing.T) { f := func(s string) { t.Helper() @@ -200,7 +251,7 @@ func Test_streamContext_Read(t *testing.T) { } uw := getUnmarshalWork() callbackCalls := 0 - uw.callback = func(rows []Row) error { + uw.callback = func(rows []Row) { callbackCalls++ if len(rows) != len(rowsExpected.Rows) { t.Fatalf("different len of expected rows;\ngot\n%+v;\nwant\n%+v", rows, rowsExpected.Rows) @@ -208,7 +259,6 @@ func Test_streamContext_Read(t *testing.T) { if !reflect.DeepEqual(rows, rowsExpected.Rows) { t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows, rowsExpected.Rows) } - return nil } uw.reqBuf = append(uw.reqBuf[:0], ctx.reqBuf...) uw.Unmarshal() diff --git a/lib/protoparser/graphite/streamparser.go b/lib/protoparser/graphite/streamparser.go index 7fa734f9f..26b490162 100644 --- a/lib/protoparser/graphite/streamparser.go +++ b/lib/protoparser/graphite/streamparser.go @@ -11,7 +11,6 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/metrics" ) @@ -24,7 +23,6 @@ var ( // ParseStream parses Graphite lines from r and calls callback for the parsed rows. // // The callback can be called concurrently multiple times for streamed data from r. -// The callback can be called after ParseStream returns. // // callback shouldn't hold rows after returning. func ParseStream(r io.Reader, callback func(rows []Row) error) error { @@ -33,11 +31,25 @@ func ParseStream(r io.Reader, callback func(rows []Row) error) error { for ctx.Read() { uw := getUnmarshalWork() - uw.callback = callback + uw.callback = func(rows []Row) { + if err := callback(rows); err != nil { + ctx.callbackErrLock.Lock() + if ctx.callbackErr == nil { + ctx.callbackErr = fmt.Errorf("error when processing imported data: %w", err) + } + ctx.callbackErrLock.Unlock() + } + ctx.wg.Done() + } uw.reqBuf, ctx.reqBuf = ctx.reqBuf, uw.reqBuf + ctx.wg.Add(1) common.ScheduleUnmarshalWork(uw) } - return ctx.Error() + ctx.wg.Wait() + if err := ctx.Error(); err != nil { + return err + } + return ctx.callbackErr } func (ctx *streamContext) Read() bool { @@ -61,6 +73,10 @@ type streamContext struct { reqBuf []byte tailBuf []byte err error + + wg sync.WaitGroup + callbackErrLock sync.Mutex + callbackErr error } func (ctx *streamContext) Error() error { @@ -75,6 +91,7 @@ func (ctx *streamContext) reset() { ctx.reqBuf = ctx.reqBuf[:0] ctx.tailBuf = ctx.tailBuf[:0] ctx.err = nil + ctx.callbackErr = nil } var ( @@ -114,7 +131,7 @@ var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) type unmarshalWork struct { rows Rows - callback func(rows []Row) error + callback func(rows []Row) reqBuf []byte } @@ -152,11 +169,7 @@ func (uw *unmarshalWork) Unmarshal() { } } - if err := uw.callback(rows); err != nil { - logger.Errorf("error when processing imported data: %s", err) - putUnmarshalWork(uw) - return - } + uw.callback(rows) putUnmarshalWork(uw) } diff --git a/lib/protoparser/influx/streamparser.go b/lib/protoparser/influx/streamparser.go index d302e3ef0..fc6bc0215 100644 --- a/lib/protoparser/influx/streamparser.go +++ b/lib/protoparser/influx/streamparser.go @@ -11,7 +11,6 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/metrics" ) @@ -25,7 +24,6 @@ var ( // ParseStream parses r with the given args and calls callback for the parsed rows. // // The callback can be called concurrently multiple times for streamed data from r. -// The callback can be called after ParseStream returns. // // callback shouldn't hold rows after returning. func ParseStream(r io.Reader, isGzipped bool, precision, db string, callback func(db string, rows []Row) error) error { @@ -59,13 +57,27 @@ func ParseStream(r io.Reader, isGzipped bool, precision, db string, callback fun defer putStreamContext(ctx) for ctx.Read() { uw := getUnmarshalWork() - uw.callback = callback + uw.callback = func(db string, rows []Row) { + if err := callback(db, rows); err != nil { + ctx.callbackErrLock.Lock() + if ctx.callbackErr == nil { + ctx.callbackErr = fmt.Errorf("error when processing imported data: %w", err) + } + ctx.callbackErrLock.Unlock() + } + ctx.wg.Done() + } uw.db = db uw.tsMultiplier = tsMultiplier uw.reqBuf, ctx.reqBuf = ctx.reqBuf, uw.reqBuf + ctx.wg.Add(1) common.ScheduleUnmarshalWork(uw) } - return ctx.Error() + ctx.wg.Wait() + if err := ctx.Error(); err != nil { + return err + } + return ctx.callbackErr } func (ctx *streamContext) Read() bool { @@ -95,6 +107,10 @@ type streamContext struct { reqBuf []byte tailBuf []byte err error + + wg sync.WaitGroup + callbackErrLock sync.Mutex + callbackErr error } func (ctx *streamContext) Error() error { @@ -109,6 +125,7 @@ func (ctx *streamContext) reset() { ctx.reqBuf = ctx.reqBuf[:0] ctx.tailBuf = ctx.tailBuf[:0] ctx.err = nil + ctx.callbackErr = nil } func getStreamContext(r io.Reader) *streamContext { @@ -142,7 +159,7 @@ var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) type unmarshalWork struct { rows Rows - callback func(db string, rows []Row) error + callback func(db string, rows []Row) db string tsMultiplier int64 reqBuf []byte @@ -195,11 +212,7 @@ func (uw *unmarshalWork) Unmarshal() { } } - if err := uw.callback(uw.db, rows); err != nil { - logger.Errorf("error when processing imported data: %s", err) - putUnmarshalWork(uw) - return - } + uw.callback(uw.db, rows) putUnmarshalWork(uw) } diff --git a/lib/protoparser/native/streamparser.go b/lib/protoparser/native/streamparser.go index 0e87d8787..fa4c5e3ce 100644 --- a/lib/protoparser/native/streamparser.go +++ b/lib/protoparser/native/streamparser.go @@ -18,10 +18,8 @@ import ( // ParseStream parses /api/v1/import/native lines from req and calls callback for parsed blocks. // // The callback can be called concurrently multiple times for streamed data from req. -// The callback can be called after ParseStream returns. // // callback shouldn't hold block after returning. -// callback can be called in parallel from multiple concurrent goroutines. func ParseStream(req *http.Request, callback func(block *Block) error) error { r := req.Body if req.Header.Get("Content-Encoding") == "gzip" { @@ -47,30 +45,49 @@ func ParseStream(req *http.Request, callback func(block *Block) error) error { // Read native blocks and feed workers with work. sizeBuf := make([]byte, 4) + var wg sync.WaitGroup + var ( + callbackErrLock sync.Mutex + callbackErr error + ) for { uw := getUnmarshalWork() uw.tr = tr - uw.callback = callback + uw.callback = func(block *Block) { + if err := callback(block); err != nil { + processErrors.Inc() + callbackErrLock.Lock() + if callbackErr == nil { + callbackErr = fmt.Errorf("error when processing native block: %w", err) + } + callbackErrLock.Unlock() + } + wg.Done() + } // Read uw.metricNameBuf if _, err := io.ReadFull(br, sizeBuf); err != nil { if err == io.EOF { // End of stream putUnmarshalWork(uw) - return nil + wg.Wait() + return callbackErr } readErrors.Inc() + wg.Wait() return fmt.Errorf("cannot read metricName size: %w", err) } readCalls.Inc() bufSize := encoding.UnmarshalUint32(sizeBuf) if bufSize > 1024*1024 { parseErrors.Inc() + wg.Wait() return fmt.Errorf("too big metricName size; got %d; shouldn't exceed %d", bufSize, 1024*1024) } uw.metricNameBuf = bytesutil.Resize(uw.metricNameBuf, int(bufSize)) if _, err := io.ReadFull(br, uw.metricNameBuf); err != nil { readErrors.Inc() + wg.Wait() return fmt.Errorf("cannot read metricName with size %d bytes: %w", bufSize, err) } readCalls.Inc() @@ -78,22 +95,26 @@ func ParseStream(req *http.Request, callback func(block *Block) error) error { // Read uw.blockBuf if _, err := io.ReadFull(br, sizeBuf); err != nil { readErrors.Inc() + wg.Wait() return fmt.Errorf("cannot read native block size: %w", err) } readCalls.Inc() bufSize = encoding.UnmarshalUint32(sizeBuf) if bufSize > 1024*1024 { parseErrors.Inc() + wg.Wait() return fmt.Errorf("too big native block size; got %d; shouldn't exceed %d", bufSize, 1024*1024) } uw.blockBuf = bytesutil.Resize(uw.blockBuf, int(bufSize)) if _, err := io.ReadFull(br, uw.blockBuf); err != nil { readErrors.Inc() + wg.Wait() return fmt.Errorf("cannot read native block with size %d bytes: %w", bufSize, err) } readCalls.Inc() blocksRead.Inc() + wg.Add(1) common.ScheduleUnmarshalWork(uw) } } @@ -123,7 +144,7 @@ var ( type unmarshalWork struct { tr storage.TimeRange - callback func(block *Block) error + callback func(block *Block) metricNameBuf []byte blockBuf []byte block Block @@ -144,12 +165,7 @@ func (uw *unmarshalWork) Unmarshal() { putUnmarshalWork(uw) return } - if err := uw.callback(&uw.block); err != nil { - processErrors.Inc() - logger.Errorf("error when processing native block: %s", err) - putUnmarshalWork(uw) - return - } + uw.callback(&uw.block) putUnmarshalWork(uw) } diff --git a/lib/protoparser/opentsdb/streamparser.go b/lib/protoparser/opentsdb/streamparser.go index 752f1cd5e..aaab8da92 100644 --- a/lib/protoparser/opentsdb/streamparser.go +++ b/lib/protoparser/opentsdb/streamparser.go @@ -11,7 +11,6 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/metrics" ) @@ -24,7 +23,6 @@ var ( // ParseStream parses OpenTSDB lines from r and calls callback for the parsed rows. // // The callback can be called concurrently multiple times for streamed data from r. -// The callback can be called after ParseStream returns. // // callback shouldn't hold rows after returning. func ParseStream(r io.Reader, callback func(rows []Row) error) error { @@ -32,11 +30,25 @@ func ParseStream(r io.Reader, callback func(rows []Row) error) error { defer putStreamContext(ctx) for ctx.Read() { uw := getUnmarshalWork() - uw.callback = callback + uw.callback = func(rows []Row) { + if err := callback(rows); err != nil { + ctx.callbackErrLock.Lock() + if ctx.callbackErr == nil { + ctx.callbackErr = fmt.Errorf("error when processing imported data: %w", err) + } + ctx.callbackErrLock.Unlock() + } + ctx.wg.Done() + } uw.reqBuf, ctx.reqBuf = ctx.reqBuf, uw.reqBuf + ctx.wg.Add(1) common.ScheduleUnmarshalWork(uw) } - return ctx.Error() + ctx.wg.Wait() + if err := ctx.Error(); err != nil { + return err + } + return ctx.callbackErr } func (ctx *streamContext) Read() bool { @@ -60,6 +72,10 @@ type streamContext struct { reqBuf []byte tailBuf []byte err error + + wg sync.WaitGroup + callbackErrLock sync.Mutex + callbackErr error } func (ctx *streamContext) Error() error { @@ -74,6 +90,7 @@ func (ctx *streamContext) reset() { ctx.reqBuf = ctx.reqBuf[:0] ctx.tailBuf = ctx.tailBuf[:0] ctx.err = nil + ctx.callbackErr = nil } var ( @@ -113,7 +130,7 @@ var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) type unmarshalWork struct { rows Rows - callback func(rows []Row) error + callback func(rows []Row) reqBuf []byte } @@ -151,11 +168,7 @@ func (uw *unmarshalWork) Unmarshal() { } } - if err := uw.callback(rows); err != nil { - logger.Errorf("error when processing imported data: %s", err) - putUnmarshalWork(uw) - return - } + uw.callback(rows) putUnmarshalWork(uw) } diff --git a/lib/protoparser/opentsdbhttp/streamparser.go b/lib/protoparser/opentsdbhttp/streamparser.go index 5a1f9012e..ad2d87dd8 100644 --- a/lib/protoparser/opentsdbhttp/streamparser.go +++ b/lib/protoparser/opentsdbhttp/streamparser.go @@ -13,7 +13,6 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/metrics" ) @@ -27,7 +26,6 @@ var ( // ParseStream parses OpenTSDB http lines from req and calls callback for the parsed rows. // // The callback can be called concurrently multiple times for streamed data from req. -// The callback can be called after ParseStream returns. // // callback shouldn't hold rows after returning. func ParseStream(req *http.Request, callback func(rows []Row) error) error { @@ -58,10 +56,50 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error { return fmt.Errorf("too big HTTP OpenTSDB request; mustn't exceed `-opentsdbhttp.maxInsertRequestSize=%d` bytes", maxInsertRequestSize.N) } - uw := getUnmarshalWork() - uw.callback = callback - uw.reqBuf, ctx.reqBuf.B = ctx.reqBuf.B, uw.reqBuf - common.ScheduleUnmarshalWork(uw) + // Process the request synchronously, since there is no sense in processing a single request asynchronously. + // Sync code is easier to read and understand. + p := getJSONParser() + defer putJSONParser(p) + v, err := p.ParseBytes(ctx.reqBuf.B) + if err != nil { + unmarshalErrors.Inc() + return fmt.Errorf("cannot parse HTTP OpenTSDB json: %w", err) + } + rs := getRows() + defer putRows(rs) + rs.Unmarshal(v) + rows := rs.Rows + rowsRead.Add(len(rows)) + + // Fill in missing timestamps + currentTimestamp := int64(fasttime.UnixTimestamp()) + for i := range rows { + r := &rows[i] + if r.Timestamp == 0 { + r.Timestamp = currentTimestamp + } + } + + // Convert timestamps in seconds to milliseconds if needed. + // See http://opentsdb.net/docs/javadoc/net/opentsdb/core/Const.html#SECOND_MASK + for i := range rows { + r := &rows[i] + if r.Timestamp&secondMask == 0 { + r.Timestamp *= 1e3 + } + } + + // Trim timestamps if required. + if tsTrim := trimTimestamp.Milliseconds(); tsTrim > 1 { + for i := range rows { + row := &rows[i] + row.Timestamp -= row.Timestamp % tsTrim + } + } + + if err := callback(rows); err != nil { + return fmt.Errorf("error when processing imported data: %w", err) + } return nil } @@ -113,77 +151,17 @@ func putStreamContext(ctx *streamContext) { var streamContextPool sync.Pool var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) -type unmarshalWork struct { - rows Rows - callback func(rows []Row) error - reqBuf []byte -} - -func (uw *unmarshalWork) reset() { - uw.rows.Reset() - uw.callback = nil - uw.reqBuf = uw.reqBuf[:0] -} - -// Unmarshal implements common.UnmarshalWork -func (uw *unmarshalWork) Unmarshal() { - p := getJSONParser() - defer putJSONParser(p) - v, err := p.ParseBytes(uw.reqBuf) - if err != nil { - unmarshalErrors.Inc() - logger.Errorf("cannot parse HTTP OpenTSDB json: %s", err) - return - } - uw.rows.Unmarshal(v) - rows := uw.rows.Rows - rowsRead.Add(len(rows)) - - // Fill in missing timestamps - currentTimestamp := int64(fasttime.UnixTimestamp()) - for i := range rows { - r := &rows[i] - if r.Timestamp == 0 { - r.Timestamp = currentTimestamp - } - } - - // Convert timestamps in seconds to milliseconds if needed. - // See http://opentsdb.net/docs/javadoc/net/opentsdb/core/Const.html#SECOND_MASK - for i := range rows { - r := &rows[i] - if r.Timestamp&secondMask == 0 { - r.Timestamp *= 1e3 - } - } - - // Trim timestamps if required. - if tsTrim := trimTimestamp.Milliseconds(); tsTrim > 1 { - for i := range rows { - row := &rows[i] - row.Timestamp -= row.Timestamp % tsTrim - } - } - - if err := uw.callback(rows); err != nil { - logger.Errorf("error when processing imported data: %s", err) - putUnmarshalWork(uw) - return - } - putUnmarshalWork(uw) -} - -func getUnmarshalWork() *unmarshalWork { - v := unmarshalWorkPool.Get() +func getRows() *Rows { + v := rowsPool.Get() if v == nil { - return &unmarshalWork{} + return &Rows{} } - return v.(*unmarshalWork) + return v.(*Rows) } -func putUnmarshalWork(uw *unmarshalWork) { - uw.reset() - unmarshalWorkPool.Put(uw) +func putRows(rs *Rows) { + rs.Reset() + rowsPool.Put(rs) } -var unmarshalWorkPool sync.Pool +var rowsPool sync.Pool diff --git a/lib/protoparser/prometheus/streamparser.go b/lib/protoparser/prometheus/streamparser.go index 8c915c9fd..2c6aa46c0 100644 --- a/lib/protoparser/prometheus/streamparser.go +++ b/lib/protoparser/prometheus/streamparser.go @@ -9,7 +9,6 @@ import ( "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/metrics" ) @@ -17,7 +16,6 @@ import ( // ParseStream parses lines with Prometheus exposition format from r and calls callback for the parsed rows. // // The callback can be called concurrently multiple times for streamed data from r. -// It is guaranteed that the callback isn't called after ParseStream returns. // // callback shouldn't hold rows after returning. func ParseStream(r io.Reader, defaultTimestamp int64, isGzipped bool, callback func(rows []Row) error) error { @@ -33,18 +31,26 @@ func ParseStream(r io.Reader, defaultTimestamp int64, isGzipped bool, callback f defer putStreamContext(ctx) for ctx.Read() { uw := getUnmarshalWork() - uw.callback = func(rows []Row) error { - err := callback(rows) + uw.callback = func(rows []Row) { + if err := callback(rows); err != nil { + ctx.callbackErrLock.Lock() + if ctx.callbackErr == nil { + ctx.callbackErr = fmt.Errorf("error when processing imported data: %w", err) + } + ctx.callbackErrLock.Unlock() + } ctx.wg.Done() - return err } uw.defaultTimestamp = defaultTimestamp uw.reqBuf, ctx.reqBuf = ctx.reqBuf, uw.reqBuf ctx.wg.Add(1) common.ScheduleUnmarshalWork(uw) } - ctx.wg.Wait() // wait for all the outstanding callback calls before returning - return ctx.Error() + ctx.wg.Wait() + if err := ctx.Error(); err != nil { + return err + } + return ctx.callbackErr } func (ctx *streamContext) Read() bool { @@ -69,7 +75,9 @@ type streamContext struct { tailBuf []byte err error - wg sync.WaitGroup + wg sync.WaitGroup + callbackErrLock sync.Mutex + callbackErr error } func (ctx *streamContext) Error() error { @@ -84,6 +92,7 @@ func (ctx *streamContext) reset() { ctx.reqBuf = ctx.reqBuf[:0] ctx.tailBuf = ctx.tailBuf[:0] ctx.err = nil + ctx.callbackErr = nil } var ( @@ -123,7 +132,7 @@ var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) type unmarshalWork struct { rows Rows - callback func(rows []Row) error + callback func(rows []Row) defaultTimestamp int64 reqBuf []byte } @@ -153,11 +162,7 @@ func (uw *unmarshalWork) Unmarshal() { } } - if err := uw.callback(rows); err != nil { - logger.Errorf("error when processing imported data: %s", err) - putUnmarshalWork(uw) - return - } + uw.callback(rows) putUnmarshalWork(uw) } diff --git a/lib/protoparser/promremotewrite/streamparser.go b/lib/protoparser/promremotewrite/streamparser.go index 0ce5632ce..ad447f979 100644 --- a/lib/protoparser/promremotewrite/streamparser.go +++ b/lib/protoparser/promremotewrite/streamparser.go @@ -9,10 +9,9 @@ import ( "sync" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/metrics" "github.com/golang/snappy" ) @@ -21,9 +20,6 @@ var maxInsertRequestSize = flagutil.NewBytes("maxInsertRequestSize", 32*1024*102 // ParseStream parses Prometheus remote_write message req and calls callback for the parsed timeseries. // -// The callback can be called concurrently multiple times for streamed data from req. -// The callback can be called after ParseStream returns. -// // callback shouldn't hold tss after returning. func ParseStream(req *http.Request, callback func(tss []prompb.TimeSeries) error) error { ctx := getPushCtx(req.Body) @@ -31,13 +27,42 @@ func ParseStream(req *http.Request, callback func(tss []prompb.TimeSeries) error if err := ctx.Read(); err != nil { return err } - uw := getUnmarshalWork() - uw.callback = callback - uw.reqBuf, ctx.reqBuf.B = ctx.reqBuf.B, uw.reqBuf - common.ScheduleUnmarshalWork(uw) + + // Synchronously process the request in order to properly return errors to ParseStream caller, + // so it could properly return HTTP 503 status code in response. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896 + bb := bodyBufferPool.Get() + defer bodyBufferPool.Put(bb) + var err error + bb.B, err = snappy.Decode(bb.B[:cap(bb.B)], ctx.reqBuf.B) + if err != nil { + return fmt.Errorf("cannot decompress request with length %d: %w", len(ctx.reqBuf.B), err) + } + if len(bb.B) > maxInsertRequestSize.N { + return fmt.Errorf("too big unpacked request; mustn't exceed `-maxInsertRequestSize=%d` bytes; got %d bytes", maxInsertRequestSize.N, len(bb.B)) + } + wr := getWriteRequest() + defer putWriteRequest(wr) + if err := wr.Unmarshal(bb.B); err != nil { + unmarshalErrors.Inc() + return fmt.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %w", len(bb.B), err) + } + + rows := 0 + tss := wr.Timeseries + for i := range tss { + rows += len(tss[i].Samples) + } + rowsRead.Add(rows) + + if err := callback(tss); err != nil { + return fmt.Errorf("error when processing imported data: %w", err) + } return nil } +var bodyBufferPool bytesutil.ByteBufferPool + type pushCtx struct { br *bufio.Reader reqBuf bytesutil.ByteBuffer @@ -51,10 +76,11 @@ func (ctx *pushCtx) reset() { func (ctx *pushCtx) Read() error { readCalls.Inc() lr := io.LimitReader(ctx.br, int64(maxInsertRequestSize.N)+1) + startTime := fasttime.UnixTimestamp() reqLen, err := ctx.reqBuf.ReadFrom(lr) if err != nil { readErrors.Inc() - return fmt.Errorf("cannot read compressed request: %w", err) + return fmt.Errorf("cannot read compressed request in %d seconds: %w", fasttime.UnixTimestamp()-startTime, err) } if reqLen > int64(maxInsertRequestSize.N) { readErrors.Inc() @@ -99,66 +125,17 @@ func putPushCtx(ctx *pushCtx) { var pushCtxPool sync.Pool var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1)) -type unmarshalWork struct { - wr prompb.WriteRequest - callback func(tss []prompb.TimeSeries) error - reqBuf []byte -} - -func (uw *unmarshalWork) reset() { - uw.wr.Reset() - uw.callback = nil - uw.reqBuf = uw.reqBuf[:0] -} - -// Unmarshal implements common.UnmarshalWork -func (uw *unmarshalWork) Unmarshal() { - bb := bodyBufferPool.Get() - defer bodyBufferPool.Put(bb) - var err error - bb.B, err = snappy.Decode(bb.B[:cap(bb.B)], uw.reqBuf) - if err != nil { - logger.Errorf("cannot decompress request with length %d: %s", len(uw.reqBuf), err) - return - } - if len(bb.B) > maxInsertRequestSize.N { - logger.Errorf("too big unpacked request; mustn't exceed `-maxInsertRequestSize=%d` bytes; got %d bytes", maxInsertRequestSize.N, len(bb.B)) - return - } - if err := uw.wr.Unmarshal(bb.B); err != nil { - unmarshalErrors.Inc() - logger.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %s", len(bb.B), err) - return - } - - rows := 0 - tss := uw.wr.Timeseries - for i := range tss { - rows += len(tss[i].Samples) - } - rowsRead.Add(rows) - - if err := uw.callback(tss); err != nil { - logger.Errorf("error when processing imported data: %s", err) - putUnmarshalWork(uw) - return - } - putUnmarshalWork(uw) -} - -var bodyBufferPool bytesutil.ByteBufferPool - -func getUnmarshalWork() *unmarshalWork { - v := unmarshalWorkPool.Get() +func getWriteRequest() *prompb.WriteRequest { + v := writeRequestPool.Get() if v == nil { - return &unmarshalWork{} + return &prompb.WriteRequest{} } - return v.(*unmarshalWork) + return v.(*prompb.WriteRequest) } -func putUnmarshalWork(uw *unmarshalWork) { - uw.reset() - unmarshalWorkPool.Put(uw) +func putWriteRequest(wr *prompb.WriteRequest) { + wr.Reset() + writeRequestPool.Put(wr) } -var unmarshalWorkPool sync.Pool +var writeRequestPool sync.Pool diff --git a/lib/protoparser/vmimport/streamparser.go b/lib/protoparser/vmimport/streamparser.go index 66ad6c322..52063ae2d 100644 --- a/lib/protoparser/vmimport/streamparser.go +++ b/lib/protoparser/vmimport/streamparser.go @@ -10,7 +10,6 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" "github.com/VictoriaMetrics/metrics" ) @@ -21,7 +20,6 @@ var maxLineLen = flagutil.NewBytes("import.maxLineLen", 100*1024*1024, "The maxi // ParseStream parses /api/v1/import lines from req and calls callback for the parsed rows. // // The callback can be called concurrently multiple times for streamed data from req. -// The callback can be called after ParseStream returns. // // callback shouldn't hold rows after returning. func ParseStream(req *http.Request, callback func(rows []Row) error) error { @@ -38,11 +36,25 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error { defer putStreamContext(ctx) for ctx.Read() { uw := getUnmarshalWork() - uw.callback = callback + uw.callback = func(rows []Row) { + if err := callback(rows); err != nil { + ctx.callbackErrLock.Lock() + if ctx.callbackErr == nil { + ctx.callbackErr = fmt.Errorf("error when processing imported data: %w", err) + } + ctx.callbackErrLock.Unlock() + } + ctx.wg.Done() + } uw.reqBuf, ctx.reqBuf = ctx.reqBuf, uw.reqBuf + ctx.wg.Add(1) common.ScheduleUnmarshalWork(uw) } - return ctx.Error() + ctx.wg.Wait() + if err := ctx.Error(); err != nil { + return err + } + return ctx.callbackErr } func (ctx *streamContext) Read() bool { @@ -72,6 +84,10 @@ type streamContext struct { reqBuf []byte tailBuf []byte err error + + wg sync.WaitGroup + callbackErrLock sync.Mutex + callbackErr error } func (ctx *streamContext) Error() error { @@ -86,6 +102,7 @@ func (ctx *streamContext) reset() { ctx.reqBuf = ctx.reqBuf[:0] ctx.tailBuf = ctx.tailBuf[:0] ctx.err = nil + ctx.callbackErr = nil } func getStreamContext(r io.Reader) *streamContext { @@ -119,7 +136,7 @@ var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) type unmarshalWork struct { rows Rows - callback func(rows []Row) error + callback func(rows []Row) reqBuf []byte } @@ -137,11 +154,7 @@ func (uw *unmarshalWork) Unmarshal() { row := &rows[i] rowsRead.Add(len(row.Timestamps)) } - if err := uw.callback(rows); err != nil { - logger.Errorf("error when processing imported data: %s", err) - putUnmarshalWork(uw) - return - } + uw.callback(rows) putUnmarshalWork(uw) } diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index aecd4ea44..f9d0d3649 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -722,6 +722,10 @@ func (db *indexDB) SearchTagKeysOnTimeRange(tr TimeRange, maxTagKeys int, deadli keys := make([]string, 0, len(tks)) for key := range tks { + if key == string(graphiteReverseTagKey) { + // Do not show artificially created graphiteReverseTagKey to the caller. + continue + } // Do not skip empty keys, since they are converted to __name__ keys = append(keys, key) } @@ -833,6 +837,10 @@ func (db *indexDB) SearchTagKeys(maxTagKeys int, deadline uint64) ([]string, err keys := make([]string, 0, len(tks)) for key := range tks { + if key == string(graphiteReverseTagKey) { + // Do not show artificially created graphiteReverseTagKey to the caller. + continue + } // Do not skip empty keys, since they are converted to __name__ keys = append(keys, key) } diff --git a/lib/storage/search.go b/lib/storage/search.go index 0ce480a95..ac644cc65 100644 --- a/lib/storage/search.go +++ b/lib/storage/search.go @@ -220,6 +220,15 @@ type SearchQuery struct { TagFilterss [][]TagFilter } +// NewSearchQuery creates new search query for the given args. +func NewSearchQuery(start, end int64, tagFilterss [][]TagFilter) *SearchQuery { + return &SearchQuery{ + MinTimestamp: start, + MaxTimestamp: end, + TagFilterss: tagFilterss, + } +} + // TagFilter represents a single tag filter from SearchQuery. type TagFilter struct { Key []byte diff --git a/lib/storage/storage.go b/lib/storage/storage.go index d79b38a55..e474a0d20 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -191,8 +191,8 @@ func OpenStorage(path string, retentionMsecs int64) (*Storage, error) { return s, nil } -// debugFlush flushes recently added storage data, so it becomes visible to search. -func (s *Storage) debugFlush() { +// DebugFlush flushes recently added storage data, so it becomes visible to search. +func (s *Storage) DebugFlush() { s.tb.flushRawRows() s.idb().tb.DebugFlush() } @@ -796,6 +796,41 @@ func nextRetentionDuration(retentionMonths int) time.Duration { return deadline.Sub(t) } +// SearchMetricNames returns metric names matching the given tfss on the given tr. +func (s *Storage) SearchMetricNames(tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]MetricName, error) { + tsids, err := s.searchTSIDs(tfss, tr, maxMetrics, deadline) + if err != nil { + return nil, err + } + if err = s.prefetchMetricNames(tsids, deadline); err != nil { + return nil, err + } + idb := s.idb() + is := idb.getIndexSearch(deadline) + defer idb.putIndexSearch(is) + mns := make([]MetricName, 0, len(tsids)) + var metricName []byte + for i := range tsids { + metricID := tsids[i].MetricID + var err error + metricName, err = is.searchMetricName(metricName[:0], metricID) + if err != nil { + if err == io.EOF { + // Skip missing metricName for metricID. + // It should be automatically fixed. See indexDB.searchMetricName for details. + continue + } + return nil, fmt.Errorf("error when searching metricName for metricID=%d: %w", metricID, err) + } + mns = mns[:len(mns)+1] + mn := &mns[len(mns)-1] + if err = mn.Unmarshal(metricName); err != nil { + return nil, fmt.Errorf("cannot unmarshal metricName=%q: %w", metricName, err) + } + } + return mns, nil +} + // searchTSIDs returns sorted TSIDs for the given tfss and the given tr. func (s *Storage) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) { // Do not cache tfss -> tsids here, since the caching is performed @@ -1070,7 +1105,6 @@ func (s *Storage) AddRows(mrs []MetricRow, precisionBits uint8) error { if len(mrs) == 0 { return nil } - atomic.AddUint64(&rowsAddedTotal, uint64(len(mrs))) // Limit the number of concurrent goroutines that may add rows to the storage. // This should prevent from out of memory errors and CPU trashing when too many @@ -1107,6 +1141,7 @@ func (s *Storage) AddRows(mrs []MetricRow, precisionBits uint8) error { <-addRowsConcurrencyCh + atomic.AddUint64(&rowsAddedTotal, uint64(len(mrs))) return err } @@ -1118,6 +1153,64 @@ var ( addRowsTimeout = 30 * time.Second ) +// RegisterMetricNames registers all the metric names from mns in the indexdb, so they can be queried later. +// +// The the MetricRow.Timestamp is used for registering the metric name starting from the given timestamp. +// Th MetricRow.Value field is ignored. +func (s *Storage) RegisterMetricNames(mrs []MetricRow) error { + var ( + tsid TSID + mn MetricName + metricName []byte + ) + idb := s.idb() + is := idb.getIndexSearch(noDeadline) + defer idb.putIndexSearch(is) + for i := range mrs { + mr := &mrs[i] + if s.getTSIDFromCache(&tsid, mr.MetricNameRaw) { + // Fast path - mr.MetricNameRaw has been already registered. + continue + } + + // Slow path - register mr.MetricNameRaw. + if err := mn.unmarshalRaw(mr.MetricNameRaw); err != nil { + return fmt.Errorf("cannot register the metric because cannot unmarshal MetricNameRaw %q: %w", mr.MetricNameRaw, err) + } + mn.sortTags() + metricName = mn.Marshal(metricName[:0]) + if err := is.GetOrCreateTSIDByName(&tsid, metricName); err != nil { + return fmt.Errorf("cannot register the metric because cannot create TSID for metricName %q: %w", metricName, err) + } + s.putTSIDToCache(&tsid, mr.MetricNameRaw) + + // Register the metric in per-day inverted index. + date := uint64(mr.Timestamp) / msecPerDay + metricID := tsid.MetricID + if s.dateMetricIDCache.Has(date, metricID) { + // Fast path: the metric has been already registered in per-day inverted index + continue + } + + // Slow path: acutally register the metric in per-day inverted index. + ok, err := is.hasDateMetricID(date, metricID) + if err != nil { + return fmt.Errorf("cannot register the metric in per-date inverted index because of error when locating (date=%d, metricID=%d) in database: %w", + date, metricID, err) + } + if !ok { + // The (date, metricID) entry is missing in the indexDB. Add it there. + if err := is.storeDateMetricID(date, metricID); err != nil { + return fmt.Errorf("cannot register the metric in per-date inverted index because of error when storing (date=%d, metricID=%d) in database: %w", + date, metricID, err) + } + } + // The metric must be added to cache only after it has been successfully added to indexDB. + s.dateMetricIDCache.Set(date, metricID) + } + return nil +} + func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]rawRow, error) { idb := s.idb() rowsLen := len(rows) diff --git a/lib/storage/storage_test.go b/lib/storage/storage_test.go index f3ae42486..ecacd69ef 100644 --- a/lib/storage/storage_test.go +++ b/lib/storage/storage_test.go @@ -5,6 +5,7 @@ import ( "math/rand" "os" "reflect" + "sort" "strings" "testing" "testing/quick" @@ -103,7 +104,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) { s.pendingHourEntries = &uint64set.Set{} return &s } - t.Run("empty_pedning_metric_ids_stale_curr_hour", func(t *testing.T) { + t.Run("empty_pending_metric_ids_stale_curr_hour", func(t *testing.T) { s := newStorage() hour := uint64(timestampFromTime(time.Now())) / msecPerHour hmOrig := &hourMetricIDs{ @@ -138,7 +139,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) { t.Fatalf("unexpected s.pendingHourEntries.Len(); got %d; want %d", s.pendingHourEntries.Len(), 0) } }) - t.Run("empty_pedning_metric_ids_valid_curr_hour", func(t *testing.T) { + t.Run("empty_pending_metric_ids_valid_curr_hour", func(t *testing.T) { s := newStorage() hour := uint64(timestampFromTime(time.Now())) / msecPerHour hmOrig := &hourMetricIDs{ @@ -557,7 +558,7 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error { return fmt.Errorf("unexpected error when adding mrs: %w", err) } } - s.debugFlush() + s.DebugFlush() // Verify tag values exist tvs, err := s.SearchTagValues(workerTag, 1e5, noDeadline) @@ -664,6 +665,167 @@ func checkTagKeys(tks []string, tksExpected map[string]bool) error { return nil } +func TestStorageRegisterMetricNamesSerial(t *testing.T) { + path := "TestStorageRegisterMetricNamesSerial" + s, err := OpenStorage(path, 0) + if err != nil { + t.Fatalf("cannot open storage: %s", err) + } + if err := testStorageRegisterMetricNames(s); err != nil { + t.Fatalf("unexpected error: %s", err) + } + s.MustClose() + if err := os.RemoveAll(path); err != nil { + t.Fatalf("cannot remove %q: %s", path, err) + } +} + +func TestStorageRegisterMetricNamesConcurrent(t *testing.T) { + path := "TestStorageRegisterMetricNamesConcurrent" + s, err := OpenStorage(path, 0) + if err != nil { + t.Fatalf("cannot open storage: %s", err) + } + ch := make(chan error, 3) + for i := 0; i < cap(ch); i++ { + go func() { + ch <- testStorageRegisterMetricNames(s) + }() + } + for i := 0; i < cap(ch); i++ { + select { + case err := <-ch: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(10 * time.Second): + t.Fatalf("timeout") + } + } + s.MustClose() + if err := os.RemoveAll(path); err != nil { + t.Fatalf("cannot remove %q: %s", path, err) + } +} + +func testStorageRegisterMetricNames(s *Storage) error { + const metricsPerAdd = 1e3 + const addsCount = 10 + + addIDsMap := make(map[string]struct{}) + for i := 0; i < addsCount; i++ { + var mrs []MetricRow + var mn MetricName + addID := fmt.Sprintf("%d", i) + addIDsMap[addID] = struct{}{} + mn.Tags = []Tag{ + {[]byte("job"), []byte("webservice")}, + {[]byte("instance"), []byte("1.2.3.4")}, + {[]byte("add_id"), []byte(addID)}, + } + now := timestampFromTime(time.Now()) + for j := 0; j < metricsPerAdd; j++ { + mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", j)) + metricNameRaw := mn.marshalRaw(nil) + + mr := MetricRow{ + MetricNameRaw: metricNameRaw, + Timestamp: now, + } + mrs = append(mrs, mr) + } + if err := s.RegisterMetricNames(mrs); err != nil { + return fmt.Errorf("unexpected error in AddMetrics: %w", err) + } + } + var addIDsExpected []string + for k := range addIDsMap { + addIDsExpected = append(addIDsExpected, k) + } + sort.Strings(addIDsExpected) + + // Verify the storage contains the added metric names. + s.DebugFlush() + + // Verify that SearchTagKeys returns correct result. + tksExpected := []string{ + "", + "add_id", + "instance", + "job", + } + tks, err := s.SearchTagKeys(100, noDeadline) + if err != nil { + return fmt.Errorf("error in SearchTagKeys: %w", err) + } + sort.Strings(tks) + if !reflect.DeepEqual(tks, tksExpected) { + return fmt.Errorf("unexpected tag keys returned from SearchTagKeys;\ngot\n%q\nwant\n%q", tks, tksExpected) + } + + // Verify that SearchTagKeysOnTimeRange returns correct result. + now := timestampFromTime(time.Now()) + start := now - msecPerDay + end := now + 60*1000 + tr := TimeRange{ + MinTimestamp: start, + MaxTimestamp: end, + } + tks, err = s.SearchTagKeysOnTimeRange(tr, 100, noDeadline) + if err != nil { + return fmt.Errorf("error in SearchTagKeysOnTimeRange: %w", err) + } + sort.Strings(tks) + if !reflect.DeepEqual(tks, tksExpected) { + return fmt.Errorf("unexpected tag keys returned from SearchTagKeysOnTimeRange;\ngot\n%q\nwant\n%q", tks, tksExpected) + } + + // Verify that SearchTagValues returns correct result. + addIDs, err := s.SearchTagValues([]byte("add_id"), addsCount+100, noDeadline) + if err != nil { + return fmt.Errorf("error in SearchTagValues: %w", err) + } + sort.Strings(addIDs) + if !reflect.DeepEqual(addIDs, addIDsExpected) { + return fmt.Errorf("unexpected tag values returned from SearchTagValues;\ngot\n%q\nwant\n%q", addIDs, addIDsExpected) + } + + // Verify that SearchTagValuesOnTimeRange returns correct result. + addIDs, err = s.SearchTagValuesOnTimeRange([]byte("add_id"), tr, addsCount+100, noDeadline) + if err != nil { + return fmt.Errorf("error in SearchTagValuesOnTimeRange: %w", err) + } + sort.Strings(addIDs) + if !reflect.DeepEqual(addIDs, addIDsExpected) { + return fmt.Errorf("unexpected tag values returned from SearchTagValuesOnTimeRange;\ngot\n%q\nwant\n%q", addIDs, addIDsExpected) + } + + // Verify that SearchMetricNames returns correct result. + tfs := NewTagFilters() + if err := tfs.Add([]byte("add_id"), []byte("0"), false, false); err != nil { + return fmt.Errorf("unexpected error in TagFilters.Add: %w", err) + } + mns, err := s.SearchMetricNames([]*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline) + if err != nil { + return fmt.Errorf("error in SearchMetricNames: %w", err) + } + if len(mns) < metricsPerAdd { + return fmt.Errorf("unexpected number of metricNames returned from SearchMetricNames; got %d; want at least %d", len(mns), int(metricsPerAdd)) + } + for i, mn := range mns { + addID := mn.GetTagValue("add_id") + if string(addID) != "0" { + return fmt.Errorf("unexpected addID for metricName #%d; got %q; want %q", i, addID, "0") + } + job := mn.GetTagValue("job") + if string(job) != "webservice" { + return fmt.Errorf("unexpected job for metricName #%d; got %q; want %q", i, job, "webservice") + } + } + + return nil +} + func TestStorageAddRowsSerial(t *testing.T) { path := "TestStorageAddRowsSerial" s, err := OpenStorage(path, 0) diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore index ee9694b87..cc7e53b46 100644 --- a/vendor/cloud.google.com/go/.gitignore +++ b/vendor/cloud.google.com/go/.gitignore @@ -2,6 +2,7 @@ .idea .vscode *.swp +.history # Test files *.test diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index d7487c8aa..4c762e636 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,6 +1,20 @@ # Changes +## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119) + + +### Bug Fixes + +* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44)) +* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a)) + + ## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30) diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md index ace2c003f..12e0c6104 100644 --- a/vendor/cloud.google.com/go/RELEASING.md +++ b/vendor/cloud.google.com/go/RELEASING.md @@ -131,8 +131,6 @@ To release a submodule: 1. On master, run `git log $CV.. -- datastore/` to list all the changes to the submodule directory since the last release. 1. Edit `datastore/CHANGES.md` to include a summary of the changes. -1. In `internal/version/version.go`, update `const Repo` to today's date with - the format `YYYYMMDD`. 1. In `internal/version` run `go generate`. 1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, and create a PR titled `chore(datastore): release $NV`. diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod index d2e0502fb..45d251b54 100644 --- a/vendor/cloud.google.com/go/go.mod +++ b/vendor/cloud.google.com/go/go.mod @@ -13,11 +13,11 @@ require ( github.com/jstemmer/go-junit-report v0.9.1 go.opencensus.io v0.22.5 golang.org/x/lint v0.0.0-20200302205851-738671d3881b - golang.org/x/net v0.0.0-20201026091529-146b70c837a4 + golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 - golang.org/x/text v0.3.3 - golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671 - google.golang.org/api v0.34.0 - google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3 - google.golang.org/grpc v1.33.1 + golang.org/x/text v0.3.4 + golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd + google.golang.org/api v0.35.0 + google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb + google.golang.org/grpc v1.33.2 ) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum index d45d0018f..3de6286e8 100644 --- a/vendor/cloud.google.com/go/go.sum +++ b/vendor/cloud.google.com/go/go.sum @@ -265,8 +265,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201026091529-146b70c837a4 h1:awiuzyrRjJDb+OXi9ceHO3SDxVoN3JER57mhtqkdQBs= -golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 h1:42cLlJJdEh+ySyeUUbEQ5bsTiq8voBeTuweGVkY6Puw= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -338,6 +338,8 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= @@ -391,8 +393,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d h1:szSOL78iTCl0LF1AMjhSWJj golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671 h1:8ylPbtgKXakJwDQKPjMJ6BSnlEIFViV0tYnu5/1Omk8= -golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd h1:kJP9fbfkpUoA4y03Nxor8be+YbShcXP16fc7G4nlgpw= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -423,8 +425,8 @@ google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.34.0 h1:k40adF3uR+6x/+hO5Dh4ZFUqFp67vxvbpafFiJxl10A= -google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -474,8 +476,8 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c h1:Lq4llNryJoaVFRm google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3 h1:sg8vLDNIxFPHTchfhH1E3AI32BL3f23oie38xUWnJM8= -google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb h1:MoNcrN5yaH+35Ge8RUwFbL7ekwq9ED2fiDpgWKrR29w= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= @@ -497,8 +499,8 @@ google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index 80536ea1c..fd9dd91e9 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20201027" +const Repo = "20201104" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index c30391084..1d1070a55 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -763,6 +763,7 @@ var awsPartition = partition{ "appsync": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -775,6 +776,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2996,6 +2998,7 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -6790,7 +6793,8 @@ var awscnPartition = partition{ "appsync": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "athena": service{ @@ -8485,6 +8489,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "lakeformation": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -9593,6 +9609,14 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index c9b740572..1c4ff26bd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.35.23" +const SDKVersion = "1.35.28" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index bcf7f0344..2ab5d1dad 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -1212,6 +1212,106 @@ func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBuc return out, req.Send() } +const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" + +// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketIntelligentTieringConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + output = &DeleteBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -2798,6 +2898,105 @@ func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEnc return out, req.Send() } +const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" + +// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketIntelligentTieringConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &GetBucketIntelligentTieringConfigurationInput{} + } + + output = &GetBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -4314,9 +4513,10 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // For more information about returning the ACL of an object, see GetObjectAcl // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). // -// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE -// storage classes, before you can retrieve the object you must first restore -// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// If the object you are retrieving is stored in the S3 Glacier, S3 Glacier +// Deep Archive, S3 Intelligent-Tiering Archive, or S3 Intelligent-Tiering Deep +// Archive storage classes, before you can retrieve the object you must first +// restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). // Otherwise, this operation returns an InvalidObjectStateError error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // @@ -4429,6 +4629,9 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // +// * ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) @@ -5379,6 +5582,105 @@ func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input return out, req.Send() } +const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" + +// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. +// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketIntelligentTieringConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &ListBucketIntelligentTieringConfigurationsInput{} + } + + output = &ListBucketIntelligentTieringConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketIntelligentTieringConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the @@ -7066,6 +7368,106 @@ func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEnc return out, req.Send() } +const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" + +// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketIntelligentTieringConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &PutBucketIntelligentTieringConfigurationInput{} + } + + output = &PutBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -7950,14 +8352,14 @@ func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControls // PutBucketOwnershipControls API operation for Amazon Simple Storage Service. // // Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this -// operation, you must have the s3:GetBucketOwnershipControls permission. For +// operation, you must have the s3:PutBucketOwnershipControls permission. For // more information about Amazon S3 permissions, see Specifying Permissions // in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). // // For information about Amazon S3 Object Ownership, see Using Object Ownership // (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). // -// The following operations are related to GetBucketOwnershipControls: +// The following operations are related to PutBucketOwnershipControls: // // * GetBucketOwnershipControls // @@ -9599,58 +10001,56 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Restoring Archives // -// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To -// access an archived object, you must first initiate a restore request. This -// restores a temporary copy of the archived object. In a restore request, you -// specify the number of days that you want the restored copy to exist. After -// the specified period, Amazon S3 deletes the temporary copy but the object -// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object -// was restored from. +// Objects that you archive to the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering +// Archive, or S3 Intelligent-Tiering Deep Archive storage classes are not accessible +// in real time. For objects in Archive Access tier or Deep Archive Access tier +// you must first initiate a restore request, and then wait until the object +// is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier +// Deep Archive you must first initiate a restore request, and then wait until +// a temporary copy of the object is available. To access an archived object, +// you must restore the object for the duration (number of days) that you specify. // // To restore a specific object version, you can provide a version ID. If you // don't provide a version ID, Amazon S3 restores the current version. // -// The time it takes restore jobs to finish depends on which storage class the -// object is being restored from and which data access tier you specify. -// // When restoring an archived object (or using a select request), you can specify // one of the following data access tier options in the Tier element of the // request body: // // * Expedited - Expedited retrievals allow you to quickly access your data -// stored in the GLACIER storage class when occasional urgent requests for -// a subset of archives are required. For all but the largest archived objects -// (250 MB+), data accessed using Expedited retrievals are typically made -// available within 1–5 minutes. Provisioned capacity ensures that retrieval -// capacity for Expedited retrievals is available when you need it. Expedited -// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE -// storage class. +// stored in the S3 Glacier or S3 Intelligent-Tiering Archive storage class +// when occasional urgent requests for a subset of archives are required. +// For all but the largest archived objects (250 MB+), data accessed using +// Expedited retrievals is typically made available within 1–5 minutes. +// Provisioned capacity ensures that retrieval capacity for Expedited retrievals +// is available when you need it. Expedited retrievals and provisioned capacity +// are not available for objects stored in the S3 Glacier Deep Archive or +// S3 Intelligent-Tiering Deep Archive storage class. // -// * Standard - S3 Standard retrievals allow you to access any of your archived -// objects within several hours. This is the default option for the GLACIER -// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval -// option. S3 Standard retrievals typically complete within 3-5 hours from -// the GLACIER storage class and typically complete within 12 hours from -// the DEEP_ARCHIVE storage class. +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// or S3 Intelligent-Tiering Archive storage class. They typically finish +// within 12 hours for objects stored in the S3 Glacier Deep Archive or S3 +// Intelligent-Tiering Deep Archive storage class. Standard retrievals are +// free for objects stored in S3 Intelligent-Tiering. // -// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval -// option, enabling you to retrieve large amounts, even petabytes, of data -// inexpensively in a day. Bulk retrievals typically complete within 5-12 -// hours from the GLACIER storage class and typically complete within 48 -// hours from the DEEP_ARCHIVE storage class. +// * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, +// enabling you to retrieve large amounts, even petabytes, of data inexpensively. +// Bulk retrievals typically finish within 5–12 hours for objects stored +// in the S3 Glacier or S3 Intelligent-Tiering Archive storage class. They +// typically finish within 48 hours for objects stored in the S3 Glacier +// Deep Archive or S3 Intelligent-Tiering Deep Archive storage class. Bulk +// retrievals are free for objects stored in S3 Intelligent-Tiering. // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) // in the Amazon Simple Storage Service Developer Guide. // // You can use Amazon S3 restore speed upgrade to change the restore speed to -// a faster speed while it is in progress. You upgrade the speed of an in-progress -// restoration by issuing another restore request to the same object, setting -// a new Tier request element. When issuing a request to upgrade the restore -// tier, you must choose a tier that is faster than the tier that the in-progress -// restore is using. You must not change any other parameters, such as the Days -// request element. For more information, see Upgrading the Speed of an In-Progress -// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) // in the Amazon Simple Storage Service Developer Guide. // // To get the status of object restoration, you can send a HEAD request. Operations @@ -9679,11 +10079,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // A successful operation returns either the 200 OK or 202 Accepted status code. // -// * If the object copy is not previously restored, then Amazon S3 returns -// 202 Accepted in the response. +// * If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. // -// * If the object copy is previously restored, Amazon S3 returns 200 OK -// in the response. +// * If the object is previously restored, Amazon S3 returns 200 OK in the +// response. // // Special Errors // @@ -9691,11 +10091,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // (This error does not apply to SELECT type requests.) HTTP Status Code: // 409 Conflict SOAP Fault Code Prefix: Client // -// * Code: GlacierExpeditedRetrievalNotAvailable Cause: S3 Glacier expedited -// retrievals are currently not available. Try again later. (Returned if -// there is insufficient capacity to process the Expedited request. This -// error applies only to Expedited retrievals and not to S3 Standard or Bulk -// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A // // Related Resources // @@ -13919,6 +14319,110 @@ func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { return &s, nil } +type DeleteBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + type DeleteBucketInventoryConfigurationInput struct { _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` @@ -14269,6 +14773,9 @@ type DeleteBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -14819,24 +15326,25 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Specifies whether Amazon S3 replicates the delete markers. If you specify -// a Filter, you must specify this element. However, in the latest version of -// replication configuration (when Filter is specified), Amazon S3 doesn't replicate -// delete markers. Therefore, the DeleteMarkerReplication element can contain -// only Disabled. For an example configuration, see Basic Rule -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a DeleteMarkerReplication +// element. If your Filter includes a Tag element, the DeleteMarkerReplication +// Status must be set to Disabled, because Amazon S3 does not support replicating +// delete markers for tag-based rules. For an example configuration, see Basic +// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). // -// If you don't specify the Filter element, Amazon S3 assumes that the replication -// configuration is the earlier version, V1. In the earlier version, Amazon -// S3 handled replication of delete markers differently. For more information, +// For more information about delete marker replication, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). type DeleteMarkerReplication struct { _ struct{} `type:"structure"` // Indicates whether to replicate delete markers. // - // In the current implementation, Amazon S3 doesn't replicate the delete markers. - // The status must be Disabled. + // Indicates whether to replicate delete markers. Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` } @@ -15597,9 +16105,8 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // A container specifying replication metrics-related settings enabling metrics - // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified - // together with a ReplicationTime block. + // A container specifying replication metrics-related settings enabling replication + // metrics and events. Metrics *Metrics `type:"structure"` // A container specifying S3 Replication Time Control (S3 RTC), including whether @@ -16928,6 +17435,119 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv return s } +type GetBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { + s.IntelligentTieringConfiguration = v + return s +} + type GetBucketInventoryConfigurationInput struct { _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` @@ -17709,6 +18329,9 @@ type GetBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20261,7 +20884,7 @@ func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *Public type GlacierJobParameters struct { _ struct{} `type:"structure"` - // S3 Glacier retrieval tier at which the restore will be processed. + // Retrieval tier at which the restore will be processed. // // Tier is a required field Tier *string `type:"string" required:"true" enum:"Tier"` @@ -20809,6 +21432,9 @@ type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + // The archive state of the head object. + ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` @@ -20980,6 +21606,12 @@ func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { return s } +// SetArchiveStatus sets the ArchiveStatus field's value. +func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { + s.ArchiveStatus = &v + return s +} + // SetCacheControl sets the CacheControl field's value. func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { s.CacheControl = &v @@ -21271,6 +21903,224 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { return s } +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the configuration applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the configuration + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s IntelligentTieringAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { + s.Tags = v + return s +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter `type:"structure"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies the status of the configuration. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // Tierings is a required field + Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s IntelligentTieringConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Tierings == nil { + invalidParams.Add(request.NewErrParamRequired("Tierings")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Tierings != nil { + for i, v := range s.Tierings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { + s.Status = &v + return s +} + +// SetTierings sets the Tierings field's value. +func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { + s.Tierings = v + return s +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +// applies to. +type IntelligentTieringFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // A container of a key value name pair. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s IntelligentTieringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { + s.Tag = v + return s +} + // Specifies the inventory configuration for an Amazon S3 bucket. For more information, // see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) // in the Amazon Simple Storage Service API Reference. @@ -22331,6 +23181,147 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str return s } +type ListBucketIntelligentTieringConfigurationsInput struct { + _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.ContinuationToken = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `type:"string"` + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { + s.IntelligentTieringConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + type ListBucketInventoryConfigurationsInput struct { _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` @@ -24548,17 +25539,14 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry { return s } -// A container specifying replication metrics-related settings enabling metrics -// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified -// together with a ReplicationTime block. +// A container specifying replication metrics-related settings enabling replication +// metrics and events. type Metrics struct { _ struct{} `type:"structure"` // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold // event. - // - // EventThreshold is a required field - EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` + EventThreshold *ReplicationTimeValue `type:"structure"` // Specifies whether the replication metrics are enabled. // @@ -24579,9 +25567,6 @@ func (s Metrics) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Metrics) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Metrics"} - if s.EventThreshold == nil { - invalidParams.Add(request.NewErrParamRequired("EventThreshold")) - } if s.Status == nil { invalidParams.Add(request.NewErrParamRequired("Status")) } @@ -25852,8 +26837,8 @@ type PublicAccessBlockConfiguration struct { // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only - // AWS services and authorized users within this account if the bucket has a - // public policy. + // AWS service principals and authorized users within this account if the bucket + // has a public policy. // // Enabling this setting doesn't affect previously stored bucket policies, except // that public and cross-account access within any public bucket policy, including @@ -26556,6 +27541,129 @@ func (s PutBucketEncryptionOutput) GoString() string { return s.String() } +type PutBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Container for S3 Intelligent-Tiering configuration. + // + // IntelligentTieringConfiguration is a required field + IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IntelligentTieringConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) + } + if s.IntelligentTieringConfiguration != nil { + if err := s.IntelligentTieringConfiguration.Validate(); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { + s.IntelligentTieringConfiguration = v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + type PutBucketInventoryConfigurationInput struct { _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` @@ -27409,6 +28517,9 @@ type PutBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want @@ -27661,6 +28772,7 @@ type PutBucketReplicationInput struct { // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // A token to allow Object Lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -30211,16 +31323,18 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo type ReplicationRule struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 replicates the delete markers. If you specify - // a Filter, you must specify this element. However, in the latest version of - // replication configuration (when Filter is specified), Amazon S3 doesn't replicate - // delete markers. Therefore, the DeleteMarkerReplication element can contain - // only Disabled. For an example configuration, see Basic Rule - // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a DeleteMarkerReplication + // element. If your Filter includes a Tag element, the DeleteMarkerReplication + // Status must be set to Disabled, because Amazon S3 does not support replicating + // delete markers for tag-based rules. For an example configuration, see Basic + // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). // - // If you don't specify the Filter element, Amazon S3 assumes that the replication - // configuration is the earlier version, V1. In the earlier version, Amazon - // S3 handled replication of delete markers differently. For more information, + // For more information about delete marker replication, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + // + // If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` @@ -30666,7 +31780,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { type RestoreObjectInput struct { _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` - // The bucket name or containing the object to restore. + // The bucket name containing the object to restore. // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. @@ -30857,6 +31971,9 @@ type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. Days *int64 `type:"integer"` // The optional description for the job. @@ -30872,7 +31989,7 @@ type RestoreRequest struct { // Describes the parameters for Select job types. SelectParameters *SelectParameters `type:"structure"` - // S3 Glacier retrieval tier at which the restore will be processed. + // Retrieval tier at which the restore will be processed. Tier *string `type:"string" enum:"Tier"` // Type of restore request. @@ -32348,6 +33465,65 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + _ struct{} `type:"structure"` + + // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing + // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // AccessTier is a required field + AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` + + // The number of days that you want your archived data to be accessible. The + // minimum number of days specified in the restore request must be at least + // 90 days. If a smaller value is specifed it will be ignored. + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Tiering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tiering) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tiering) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tiering"} + if s.AccessTier == nil { + invalidParams.Add(request.NewErrParamRequired("AccessTier")) + } + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *Tiering) SetAccessTier(v string) *Tiering { + s.AccessTier = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Tiering) SetDays(v int64) *Tiering { + s.Days = &v + return s +} + // A container for specifying the configuration for publication of messages // to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 // detects specified events. @@ -33406,6 +34582,22 @@ func AnalyticsS3ExportFileFormat_Values() []string { } } +const ( + // ArchiveStatusArchiveAccess is a ArchiveStatus enum value + ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" + + // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value + ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// ArchiveStatus_Values returns all elements of the ArchiveStatus enum +func ArchiveStatus_Values() []string { + return []string{ + ArchiveStatusArchiveAccess, + ArchiveStatusDeepArchiveAccess, + } +} + const ( // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value BucketAccelerateStatusEnabled = "Enabled" @@ -33801,6 +34993,38 @@ func FilterRuleName_Values() []string { } } +const ( + // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" + + // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum +func IntelligentTieringAccessTier_Values() []string { + return []string{ + IntelligentTieringAccessTierArchiveAccess, + IntelligentTieringAccessTierDeepArchiveAccess, + } +} + +const ( + // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusEnabled = "Enabled" + + // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusDisabled = "Disabled" +) + +// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum +func IntelligentTieringStatus_Values() []string { + return []string{ + IntelligentTieringStatusEnabled, + IntelligentTieringStatusDisabled, + } +} + const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index dd73d460c..f64b55135 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -21,6 +21,12 @@ const ( // bucket access control lists (ACLs). ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + ErrCodeInvalidObjectState = "InvalidObjectState" + // ErrCodeNoSuchBucket for service response error code // "NoSuchBucket". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go index bca091d75..7c6221878 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -96,6 +96,10 @@ type S3API interface { DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) + DeleteBucketIntelligentTieringConfiguration(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.DeleteBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationRequest(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) + DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) @@ -164,6 +168,10 @@ type S3API interface { GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error) GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) + GetBucketIntelligentTieringConfiguration(*s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.GetBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationRequest(*s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) + GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) @@ -272,6 +280,10 @@ type S3API interface { ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) + ListBucketIntelligentTieringConfigurations(*s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsWithContext(aws.Context, *s3.ListBucketIntelligentTieringConfigurationsInput, ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsRequest(*s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) + ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) @@ -339,6 +351,10 @@ type S3API interface { PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error) PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) + PutBucketIntelligentTieringConfiguration(*s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.PutBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationRequest(*s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) + PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go index b26d19ec2..35fc072a3 100644 --- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go +++ b/vendor/github.com/klauspost/compress/flate/gen_inflate.go @@ -42,16 +42,6 @@ func (f *decompressor) $FUNCNAME$() { stateDict ) fr := f.r.($TYPE$) - moreBits := func() error { - c, err := fr.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - return nil - } switch f.stepState { case stateInit: @@ -112,9 +102,7 @@ readLiteral: } } - var n uint // number of bits extra var length int - var err error switch { case v < 256: f.dict.writeByte(byte(v)) @@ -131,25 +119,26 @@ readLiteral: // otherwise, reference to older data case v < 265: length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 case v < maxNumLit: - length = 258 - n = 0 + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for f.nb < n { + c, err := fr.ReadByte() + if err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + } + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 + f.nb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") @@ -157,45 +146,70 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - if n > 0 { - for f.nb < n { - if err = moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } f.err = err return } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 } dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + dist = uint32(chunk >> huffmanValueShift) + break } - f.err = err - return } - dist = uint32(sym) } switch { @@ -206,13 +220,17 @@ readLiteral: // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 189e9fe0b..16bc51408 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -29,6 +29,13 @@ const ( debugDecode = false ) +// Value of length - 3 and extra bits. +type lengthExtra struct { + length, extra uint8 +} + +var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} + // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index 9a92a1b30..cc6db2792 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -20,16 +20,6 @@ func (f *decompressor) huffmanBytesBuffer() { stateDict ) fr := f.r.(*bytes.Buffer) - moreBits := func() error { - c, err := fr.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - return nil - } switch f.stepState { case stateInit: @@ -90,9 +80,7 @@ readLiteral: } } - var n uint // number of bits extra var length int - var err error switch { case v < 256: f.dict.writeByte(byte(v)) @@ -109,25 +97,26 @@ readLiteral: // otherwise, reference to older data case v < 265: length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 case v < maxNumLit: - length = 258 - n = 0 + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for f.nb < n { + c, err := fr.ReadByte() + if err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + } + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 + f.nb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") @@ -135,45 +124,70 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - if n > 0 { - for f.nb < n { - if err = moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } f.err = err return } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 } dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + dist = uint32(chunk >> huffmanValueShift) + break } - f.err = err - return } - dist = uint32(sym) } switch { @@ -184,13 +198,17 @@ readLiteral: // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 @@ -246,16 +264,6 @@ func (f *decompressor) huffmanBytesReader() { stateDict ) fr := f.r.(*bytes.Reader) - moreBits := func() error { - c, err := fr.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - return nil - } switch f.stepState { case stateInit: @@ -316,9 +324,7 @@ readLiteral: } } - var n uint // number of bits extra var length int - var err error switch { case v < 256: f.dict.writeByte(byte(v)) @@ -335,25 +341,26 @@ readLiteral: // otherwise, reference to older data case v < 265: length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 case v < maxNumLit: - length = 258 - n = 0 + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for f.nb < n { + c, err := fr.ReadByte() + if err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + } + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 + f.nb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") @@ -361,45 +368,70 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - if n > 0 { - for f.nb < n { - if err = moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } f.err = err return } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 } dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + dist = uint32(chunk >> huffmanValueShift) + break } - f.err = err - return } - dist = uint32(sym) } switch { @@ -410,13 +442,17 @@ readLiteral: // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 @@ -472,16 +508,6 @@ func (f *decompressor) huffmanBufioReader() { stateDict ) fr := f.r.(*bufio.Reader) - moreBits := func() error { - c, err := fr.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - return nil - } switch f.stepState { case stateInit: @@ -542,9 +568,7 @@ readLiteral: } } - var n uint // number of bits extra var length int - var err error switch { case v < 256: f.dict.writeByte(byte(v)) @@ -561,25 +585,26 @@ readLiteral: // otherwise, reference to older data case v < 265: length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 case v < maxNumLit: - length = 258 - n = 0 + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for f.nb < n { + c, err := fr.ReadByte() + if err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + } + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 + f.nb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") @@ -587,45 +612,70 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - if n > 0 { - for f.nb < n { - if err = moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } f.err = err return } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 } dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + dist = uint32(chunk >> huffmanValueShift) + break } - f.err = err - return } - dist = uint32(sym) } switch { @@ -636,13 +686,17 @@ readLiteral: // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 @@ -698,16 +752,6 @@ func (f *decompressor) huffmanStringsReader() { stateDict ) fr := f.r.(*strings.Reader) - moreBits := func() error { - c, err := fr.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - return nil - } switch f.stepState { case stateInit: @@ -768,9 +812,7 @@ readLiteral: } } - var n uint // number of bits extra var length int - var err error switch { case v < 256: f.dict.writeByte(byte(v)) @@ -787,25 +829,26 @@ readLiteral: // otherwise, reference to older data case v < 265: length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 case v < maxNumLit: - length = 258 - n = 0 + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for f.nb < n { + c, err := fr.ReadByte() + if err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + } + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 + f.nb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") @@ -813,45 +856,70 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - if n > 0 { - for f.nb < n { - if err = moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } f.err = err return } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 } dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := fr.ReadByte() + if err != nil { + f.b = b + f.nb = nb + f.err = noEOF(err) + return + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + dist = uint32(chunk >> huffmanValueShift) + break } - f.err = err - return } - dist = uint32(sym) } switch { @@ -862,13 +930,17 @@ readLiteral: // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = f.moreBits(); err != nil { + c, err := fr.ReadByte() + if err != nil { if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 07f7285f0..08e553f75 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -54,11 +54,11 @@ To create a writer with default options, do like this: ```Go // Compress input to output. func Compress(in io.Reader, out io.Writer) error { - w, err := NewWriter(output) + enc, err := zstd.NewWriter(out) if err != nil { return err } - _, err := io.Copy(w, input) + _, err = io.Copy(enc, in) if err != nil { enc.Close() return err diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index d78be6d42..cdda0de58 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -323,19 +323,23 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { // Never preallocate moe than 1 GB up front. - if uint64(cap(dst)) < frame.FrameContentSize { + if cap(dst)-len(dst) < int(frame.FrameContentSize) { dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) copy(dst2, dst) dst = dst2 } } if cap(dst) == 0 { - // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size. - size := frame.WindowSize * 2 + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 // Cap to 1 MB. if size > 1<<20 { size = 1 << 20 } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } dst = make([]byte, 0, size) } diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 342e5940d..b6079b31e 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -299,6 +299,17 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) return nil } + // Check for duplicate label names. + labels := make(map[string]struct{}) + for _, l := range p.currentMetric.Label { + lName := l.GetName() + if _, exists := labels[lName]; !exists { + labels[lName] = struct{}{} + } else { + p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + return nil + } + } return p.startLabelValue } diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index 06f84b855..6b4027b33 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s index 8a7278319..8a06b87d7 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s index 6321421f2..f2397fde5 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s index 333242d50..c9e6b6fc8 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc // +build arm,darwin #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s index 97e017437..89843f8f4 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc // +build arm64,darwin #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s index 603dd5728..27674e1ca 100644 --- a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s index c9a0a2601..49f0ac236 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s index 35172477c..f2dfc57b8 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s index 9227c875b..6d740db2c 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s index d9318cbf0..a8f5a29b3 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 448bebbb5..0655ecbfb 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index c6468a958..bc3fb6ac3 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index cf0f3575c..55b13c7ba 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index afe6fdf6b..22a83d8e3 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -4,7 +4,7 @@ // +build linux // +build arm64 -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index ab9d63831..dc222b90c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -4,7 +4,7 @@ // +build linux // +build mips64 mips64le -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 99e539904..d333f13cf 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -4,7 +4,7 @@ // +build linux // +build mips mipsle -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 88f712557..459a629c2 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -4,7 +4,7 @@ // +build linux // +build ppc64 ppc64le -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index 3cfefed2e..04d38497c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build riscv64,!gccgo +// +build riscv64,gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index a5a863c6b..cc303989e 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -4,7 +4,7 @@ // +build s390x // +build linux -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s index 48bdcd763..ae7b498d5 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s index 2ede05c72..e57367c17 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s index e8928571c..d7da175e1 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s index 6f98ba5a3..e7cbe1904 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s index 00576f3c8..2f00b0310 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s index 790ef77f8..07632c99c 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s index 469bfa100..73e997320 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s index 0cedea3d3..c47302aa4 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 567a4763c..47c93fcb6 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index ded8260f3..1f2c755a7 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index 5e9269063..86781eac2 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build ppc64 s390x mips mips64 +// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index bcdb5d30e..8822d8541 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64 +// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 21a4946ba..baa771f8a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build amd64,linux -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index c26e6ec23..9edf3961b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!gccgo +// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 070bd3899..90e33d8cf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!gccgo,386 +// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index 8c514c95e..1a97baae7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm,!gccgo,linux +// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 1c70d1b69..87bd161ce 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !gccgo,!ppc64le,!ppc64 +// +build gc,!ppc64le,!ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index 86dc765ab..d36216c3c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -4,7 +4,7 @@ // +build linux // +build ppc64le ppc64 -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 4b3a8ad7b..0550da06d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. // +build aix,ppc64 -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 82076fb74..9cd147b7e 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -32,6 +32,8 @@ type DLLError struct { func (e *DLLError) Error() string { return e.Msg } +func (e *DLLError) Unwrap() error { return e.Err } + // A DLL implements access to a single DLL. type DLL struct { Name string diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go index e409d76f0..1adb60739 100644 --- a/vendor/golang.org/x/sys/windows/memory_windows.go +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -16,13 +16,19 @@ const ( MEM_RESET_UNDO = 0x01000000 MEM_LARGE_PAGES = 0x20000000 - PAGE_NOACCESS = 0x01 - PAGE_READONLY = 0x02 - PAGE_READWRITE = 0x04 - PAGE_WRITECOPY = 0x08 - PAGE_EXECUTE_READ = 0x20 - PAGE_EXECUTE_READWRITE = 0x40 - PAGE_EXECUTE_WRITECOPY = 0x80 + PAGE_NOACCESS = 0x00000001 + PAGE_READONLY = 0x00000002 + PAGE_READWRITE = 0x00000004 + PAGE_WRITECOPY = 0x00000008 + PAGE_EXECUTE = 0x00000010 + PAGE_EXECUTE_READ = 0x00000020 + PAGE_EXECUTE_READWRITE = 0x00000040 + PAGE_EXECUTE_WRITECOPY = 0x00000080 + PAGE_GUARD = 0x00000100 + PAGE_NOCACHE = 0x00000200 + PAGE_WRITECOMBINE = 0x00000400 + PAGE_TARGETS_INVALID = 0x40000000 + PAGE_TARGETS_NO_UPDATE = 0x40000000 QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002 QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001 diff --git a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go new file mode 100644 index 000000000..1681810e0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go @@ -0,0 +1,100 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + ERROR_EXPECTED_SECTION_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE syscall.Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG syscall.Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX syscall.Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP syscall.Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS syscall.Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED syscall.Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY syscall.Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT syscall.Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY syscall.Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE syscall.Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH syscall.Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS syscall.Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE syscall.Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED syscall.Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING syscall.Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE syscall.Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER syscall.Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED syscall.Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS syscall.Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG syscall.Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL syscall.Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET syscall.Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 syscall.Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT syscall.Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED syscall.Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION syscall.Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR syscall.Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE syscall.Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED syscall.Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER syscall.Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED syscall.Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE syscall.Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY syscall.Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE syscall.Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE syscall.Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE syscall.Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED syscall.Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE syscall.Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 598e8ce58..3b6c5ae7d 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -174,6 +174,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) //sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process +//sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) @@ -275,7 +276,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo -//sys SetConsoleCursorPosition(console Handle, position Coord) (err error) = kernel32.SetConsoleCursorPosition +//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot @@ -1480,3 +1481,7 @@ func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf return languages, nil } } + +func SetConsoleCursorPosition(console Handle, position Coord) error { + return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5d0a54e69..cc5d74f56 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -248,6 +248,7 @@ var ( procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procIsWow64Process2 = modkernel32.NewProc("IsWow64Process2") procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") procLocalFree = modkernel32.NewProc("LocalFree") @@ -2055,6 +2056,18 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { return } +func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) { + err = procIsWow64Process2.Find() + if err != nil { + return + } + r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(libname) @@ -2316,8 +2329,8 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } -func SetConsoleCursorPosition(console Handle, position Coord) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(*((*uint32)(unsafe.Pointer(&position)))), 0) +func setConsoleCursorPosition(console Handle, position uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 8ba225383..f65aad4ec 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -13,6 +13,7 @@ import ( "os" "os/exec" "regexp" + "strconv" "strings" "sync" "time" @@ -133,6 +134,9 @@ type Invocation struct { ModFlag string ModFile string Overlay string + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool Env []string WorkingDir string Logf func(format string, args ...interface{}) @@ -207,7 +211,10 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { // The Go stdlib has a special feature where if the cwd and the PWD are the // same node then it trusts the PWD, so by setting it in the env for the child // process we fix up all the paths returned by the go command. - cmd.Env = append(os.Environ(), i.Env...) + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) if i.WorkingDir != "" { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir @@ -248,10 +255,19 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { - split := strings.Split(kv, "=") + split := strings.SplitN(kv, "=", 2) k, v := split[0], split[1] env[k] = v } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } diff --git a/vendor/modules.txt b/vendor/modules.txt index b69eda674..553ee1356 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go v0.71.0 +# cloud.google.com/go v0.72.0 cloud.google.com/go cloud.google.com/go/compute/metadata cloud.google.com/go/iam @@ -19,7 +19,7 @@ github.com/VictoriaMetrics/metrics # github.com/VictoriaMetrics/metricsql v0.7.2 github.com/VictoriaMetrics/metricsql github.com/VictoriaMetrics/metricsql/binaryop -# github.com/aws/aws-sdk-go v1.35.23 +# github.com/aws/aws-sdk-go v1.35.28 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -95,7 +95,7 @@ github.com/jmespath/go-jmespath github.com/jstemmer/go-junit-report github.com/jstemmer/go-junit-report/formatter github.com/jstemmer/go-junit-report/parser -# github.com/klauspost/compress v1.11.2 +# github.com/klauspost/compress v1.11.3 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/gzip @@ -115,7 +115,7 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal # github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.14.0 +# github.com/prometheus/common v0.15.0 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model @@ -179,7 +179,7 @@ golang.org/x/lint/golint # golang.org/x/mod v0.3.0 golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 +# golang.org/x/net v0.0.0-20201110031124-69a78807bb2b golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts @@ -188,7 +188,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 +# golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal @@ -196,7 +196,7 @@ golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20201106081118-db71ae66460a +# golang.org/x/sys v0.0.0-20201116161645-c061ba923fbb golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows @@ -205,7 +205,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.0.0-20201105220310-78b158585360 +# golang.org/x/tools v0.0.0-20201116182000-1d699438d2cf golang.org/x/tools/cmd/goimports golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata @@ -247,7 +247,7 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20201106154455-f9bfe239b0ba +# google.golang.org/genproto v0.0.0-20201116144945-7adebfbe6a3f google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/rpc/code