Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2023-11-15 20:05:11 +01:00
commit b65a9f2057
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
44 changed files with 603 additions and 144 deletions

View file

@ -34,7 +34,7 @@ all: \
clean:
rm -rf bin/*
publish: package-base \
publish: \
publish-victoria-metrics \
publish-vmagent \
publish-vmalert \

View file

@ -338,7 +338,8 @@ which can be used as faster and less resource-hungry alternative to Prometheus.
## Grafana setup
Create [Prometheus datasource](http://docs.grafana.org/features/datasources/prometheus/) in Grafana with the following url:
Create [Prometheus datasource](https://grafana.com/docs/grafana/latest/datasources/prometheus/configure-prometheus-data-source/)
in Grafana with the following url:
```url
http://<victoriametrics-addr>:8428
@ -352,6 +353,9 @@ or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
Alternatively, use VictoriaMetrics [datasource plugin](https://github.com/VictoriaMetrics/grafana-datasource) with support of extra features.
See more in [description](https://github.com/VictoriaMetrics/grafana-datasource#victoriametrics-data-source-for-grafana).
Creating a datasource may require [specific permissions](https://grafana.com/docs/grafana/latest/administration/data-source-management/).
If you don't see an option to create a data source - try contacting system administrator.
## How to upgrade VictoriaMetrics
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.

View file

@ -503,13 +503,16 @@ with [additional enhancements](#relabeling-enhancements). The relabeling can be
This relabeling can be debugged via `http://vmagent:8429/metric-relabel-debug` page. See [these docs](#relabel-debug) for details.
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is used for modifying labels for metrics
and for dropping unneeded metrics before sending them to a particular `-remoteWrite.url`.
and for dropping unneeded metrics before sending them to the particular `-remoteWrite.url`.
This relabeling can be debugged via `http://vmagent:8429/metric-relabel-debug` page. See [these docs](#relabel-debug) for details.
All the files with relabeling configs can contain special placeholders in the form `%{ENV_VAR}`,
which are replaced by the corresponding environment variable values.
[Streaming aggregation](https://docs.victoriametrics.com/stream-aggregation.html), if configured,
is pefrormed after applying all the relabeling stages mentioned above.
The following articles contain useful information about Prometheus relabeling:
* [Cookbook for common relabeling tasks](https://docs.victoriametrics.com/relabeling.html)

View file

@ -0,0 +1,12 @@
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
ARG certs_image
ARG root_image
FROM $certs_image as certs
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
FROM $root_image
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
EXPOSE 8429
ENTRYPOINT ["/vmalert-tool-prod"]
ARG TARGETARCH
COPY vmalert-tool-linux-${TARGETARCH}-prod ./vmalert-tool-prod

View file

@ -810,12 +810,11 @@ at least two times bigger than the resolution.
> Please note, data delay is inevitable in distributed systems. And it is better to account for it instead of ignoring.
By default, recently written samples to VictoriaMetrics [aren't visible for queries](https://docs.victoriametrics.com/keyConcepts.html#query-latency)
for up to 30s (see `-search.latencyOffset` command-line flag at vmselect). Such delay is needed to eliminate risk of
for up to 30s (see `-search.latencyOffset` command-line flag at vmselect or VictoriaMetrics single-node). Such delay is needed to eliminate risk of
incomplete data on the moment of querying, due to chance that metrics collectors won't be able to deliver that data in time.
To compensate the latency in timestamps for produced evaluation results, `-rule.evalDelay` is also set to 30s by default.
If you changed the `-search.latencyOffset` (cmd-line flag configured for VictoriaMetrics single-node or vmselect) value
or specified custom `latency_offset` param via [Group](#groups) and observed a delay in timestamps for produced
evaluation results - try changing `-rule.evalDelay` equal to `-search.latencyOffset`.
To compensate the latency in timestamps for produced evaluation results, `-rule.evalDelay` is also set to `30s` by default.
If you expect data to be delayed for longer intervals (it gets buffered, queued, or just network is slow sometimes)
- consider increasing the `-rule.evalDelay` value accordingly.
### Alerts state

View file

@ -1472,10 +1472,10 @@ func getMinMaxInstantValues(tssCached, tssStart, tssEnd []*timeseries, f func(a,
mStart := make(map[string]*timeseries, len(tssStart))
for _, ts := range tssStart {
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
if _, ok := m[string(bb.B)]; ok {
if _, ok := mStart[string(bb.B)]; ok {
logger.Panicf("BUG: duplicate series found: %s", &ts.MetricName)
}
m[string(bb.B)] = ts
mStart[string(bb.B)] = ts
tsCached := m[string(bb.B)]
if tsCached != nil && !math.IsNaN(tsCached.Values[0]) {
if !math.IsNaN(ts.Values[0]) {

View file

@ -65,7 +65,7 @@
"uid": "$ds"
},
"enable": true,
"expr": "sum(ALERTS{alertgroup=\"vmcluster\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
"expr": "sum(ALERTS{job=~\"$job\", instance=~\"$instance\", alertgroup=\"vmcluster\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
"iconColor": "red",
"name": "alerts",
"titleFormat": "{{alertname}}"
@ -1594,7 +1594,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Percentage of used memory (resident).\nThe application's performance will significantly degrade when memory usage is close to 100%.",
"description": "Percentage of used RSS memory (resident).\nThe RSS memory shows the amount of memory recently accessed by the application. It includes anonymous memory and data from recently accessed files (aka page cache).\nThe application's performance will significantly degrade when memory usage is close to 100%.\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {
@ -1706,7 +1706,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.",
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.\nSafe memory usage % considered to be below 80%\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {

View file

@ -65,7 +65,7 @@
"uid": "$ds"
},
"enable": true,
"expr": "sum(ALERTS{alertgroup=\"vmsingle\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
"expr": "sum(ALERTS{job=~\"$job\", instance=~\"$instance\", alertgroup=\"vmsingle\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
"iconColor": "red",
"name": "alerts",
"titleFormat": "{{alertname}}"
@ -1485,7 +1485,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Percentage of used memory (resident).\nThe application's performance will significantly degrade when memory usage is close to 100%.",
"description": "Percentage of used RSS memory (resident).\nThe RSS memory shows the amount of memory recently accessed by the application. It includes anonymous memory and data from recently accessed files (aka page cache).\nThe application's performance will significantly degrade when memory usage is close to 100%.\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {
@ -1747,7 +1747,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.\nSafe memory usage % considered to be below 80%",
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.\nSafe memory usage % considered to be below 80%\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {

View file

@ -66,7 +66,7 @@
"uid": "$ds"
},
"enable": true,
"expr": "sum(ALERTS{alertgroup=\"vmcluster\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
"expr": "sum(ALERTS{job=~\"$job\", instance=~\"$instance\", alertgroup=\"vmcluster\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
"iconColor": "red",
"name": "alerts",
"titleFormat": "{{alertname}}"
@ -1595,7 +1595,7 @@
"type": "victoriametrics-datasource",
"uid": "$ds"
},
"description": "Percentage of used memory (resident).\nThe application's performance will significantly degrade when memory usage is close to 100%.",
"description": "Percentage of used RSS memory (resident).\nThe RSS memory shows the amount of memory recently accessed by the application. It includes anonymous memory and data from recently accessed files (aka page cache).\nThe application's performance will significantly degrade when memory usage is close to 100%.\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {
@ -1707,7 +1707,7 @@
"type": "victoriametrics-datasource",
"uid": "$ds"
},
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.",
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.\nSafe memory usage % considered to be below 80%\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {

View file

@ -66,7 +66,7 @@
"uid": "$ds"
},
"enable": true,
"expr": "sum(ALERTS{alertgroup=\"vmsingle\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
"expr": "sum(ALERTS{job=~\"$job\", instance=~\"$instance\", alertgroup=\"vmsingle\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
"iconColor": "red",
"name": "alerts",
"titleFormat": "{{alertname}}"
@ -1486,7 +1486,7 @@
"type": "victoriametrics-datasource",
"uid": "$ds"
},
"description": "Percentage of used memory (resident).\nThe application's performance will significantly degrade when memory usage is close to 100%.",
"description": "Percentage of used RSS memory (resident).\nThe RSS memory shows the amount of memory recently accessed by the application. It includes anonymous memory and data from recently accessed files (aka page cache).\nThe application's performance will significantly degrade when memory usage is close to 100%.\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {
@ -1748,7 +1748,7 @@
"type": "victoriametrics-datasource",
"uid": "$ds"
},
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.\nSafe memory usage % considered to be below 80%",
"description": "Share for memory allocated by the process itself. When memory usage reaches 100% it will be likely OOM-killed.\nSafe memory usage % considered to be below 80%\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {

View file

@ -1615,7 +1615,7 @@
"type": "victoriametrics-datasource",
"uid": "$ds"
},
"description": "Percentage of used memory (resident).\nThe application's performance will significantly degrade when memory usage is close to 100%.\n\nClick on the line and choose Drilldown to show Mem usage per instance",
"description": "Percentage of used RSS memory (resident).\nThe RSS memory shows the amount of memory recently accessed by the application. It includes anonymous memory and data from recently accessed files (aka page cache).\nThe application's performance will significantly degrade when memory usage is close to 100%.\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {

View file

@ -1614,7 +1614,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Percentage of used memory (resident).\nThe application's performance will significantly degrade when memory usage is close to 100%.\n\nClick on the line and choose Drilldown to show Mem usage per instance",
"description": "Percentage of used RSS memory (resident).\nThe RSS memory shows the amount of memory recently accessed by the application. It includes anonymous memory and data from recently accessed files (aka page cache).\nThe application's performance will significantly degrade when memory usage is close to 100%.\n\nClick on the line and choose Drilldown to show memory usage per instance",
"fieldConfig": {
"defaults": {
"color": {

View file

@ -28,8 +28,13 @@ The sandbox cluster installation is running under the constant load generated by
## tip
**vmalert's cmd-line flag `datasource.queryTimeAlignment` was deprecated and will have no effect anymore. It will be completely removed in next releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5049) and more detailed changes below.**
**vmalert's cmd-line flag `datasource.lookback` will be deprecated soon. Please use `-rule.evalDelay` command-line flag instead. It will have no effect in next release and be removed in future releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155).**
## [v1.95.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.95.0)
Released at 2023-11-15
**vmalert's cmd-line flag `-datasource.lookback` will be deprecated soon. Please use `-rule.evalDelay` command-line flag instead and see more details on how to use it [here](https://docs.victoriametrics.com/vmalert.html#data-delay). The flag `datasource.lookback` will have no effect in the next release and will be removed in the future releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155).**
**vmalert's cmd-line flag `-datasource.queryTimeAlignment` was deprecated and will have no effect anymore. It will be completely removed in next releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5049) and more detailed changes related to vmalert below.**
* SECURITY: upgrade Go builder from Go1.21.1 to Go1.21.4. See [the list of issues addressed in Go1.21.2](https://github.com/golang/go/issues?q=milestone%3AGo1.21.2+label%3ACherryPickApproved), [the list of issues addressed in Go1.21.3](https://github.com/golang/go/issues?q=milestone%3AGo1.21.3+label%3ACherryPickApproved) and [the list of issues addressed in Go1.21.4](https://github.com/golang/go/issues?q=milestone%3AGo1.21.4+label%3ACherryPickApproved).
@ -93,6 +98,8 @@ The sandbox cluster installation is running under the constant load generated by
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): increment `vmalert_remotewrite_dropped_rows_total` and `vmalert_remotewrite_dropped_bytes_total` metrics if remote-write client's buffer is overloaded. Before, these metrics were incremented only after unsuccessful HTTP calls.
* BUGFIX: `vmselect`: improve performance and memory usage during query processing on machines with big number of CPU cores. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5087).
* BUGFIX: dashboards: fix vminsert/vmstorage/vmselect metrics filtering when dashboard is used to display data from many sub-clusters with unique job names. Before, only one specific job could have been accounted for component-specific panels, instead of all available jobs for the component.
* BUGFIX: dashboards: respect `job` and `instance` filters for `alerts` annotation in cluster and single-node dashboards.
* BUGFIX: dashboards: update description for RSS and anonymous memory panels to be consistent for single-node, cluster and vmagent dashboards.
* BUGFIX: dashboards/vmalert: apply `desc` sorting in tooltips for vmalert dashboard in order to improve visibility of the outliers on graph.
* BUGFIX: dashboards/vmalert: properly apply time series filter for panel `No data errors`. Before, the panel didn't respect `job` or `instance` filters.
* BUGFIX: dashboards/vmalert: fix panel `Errors rate to Alertmanager` not showing any data due to wrong label filters.
@ -116,6 +123,7 @@ The sandbox cluster installation is running under the constant load generated by
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): correctly display query errors on [Explore Prometheus Metrics](https://docs.victoriametrics.com/#metrics-explorer) page. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5202) for details.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly handle trailing slash in the server URL. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5203).
* BUGFIX: [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html): correctly print error in logs when copying backup fails. Previously, error was displayed in metrics but was missing in logs.
* BUGFIX: fix panic, which could occur when [query tracing](https://docs.victoriametrics.com/#query-tracing) is enabled. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5319).
## [v1.94.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.94.0)
@ -159,6 +167,19 @@ Released at 2023-10-02
* BUGFIX: [vminsert](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix ingestion via [multitenant url](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy-via-labels) for opentsdbhttp. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5061). The bug has been introduced in [v1.93.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.2).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): fix support of legacy DataDog agent, which adds trailing slashes to urls. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5078). Thanks to @maxb for spotting the issue.
## [v1.93.8](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.8)
Released at 2023-11-15
**v1.93.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
The v1.93.x line will be supported for at least 12 months since [v1.93.0](https://docs.victoriametrics.com/CHANGELOG.html#v1930) release**
* SECURITY: upgrade Go builder from Go1.21.3 to Go1.21.4. See [the list of issues addressed in Go1.21.4](https://github.com/golang/go/issues?q=milestone%3AGo1.21.4+label%3ACherryPickApproved).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly apply [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling) with `regex`, which start and end with `.+` or `.*` and which contain alternate sub-regexps. For example, `.+;|;.+` or `.*foo|bar|baz.*`. Previously such regexps were improperly parsed, which could result in undexpected relabeling results. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5297).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly decode Snappy-encoded data blocks received via [VictoriaMetrics remote_write protocol](https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5301).
* BUGFIX: fix panic, which could occur when [query tracing](https://docs.victoriametrics.com/#query-tracing) is enabled. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5319).
## [v1.93.7](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.7)
Released at 2023-11-02
@ -630,6 +651,19 @@ Released at 2023-02-24
* BUGFIX: properly parse timestamps in milliseconds when [ingesting data via OpenTSDB telnet put protocol](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol). Previously timestamps in milliseconds were mistakenly multiplied by 1000. Thanks to @Droxenator for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3810).
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): do not add extrapolated points outside the real points when using [interpolate()](https://docs.victoriametrics.com/MetricsQL.html#interpolate) function. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3816).
## [v1.87.11](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.11)
Released at 2023-11-14
**v1.87.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
The v1.87.x line will be supported for at least 12 months since [v1.87.0](https://docs.victoriametrics.com/CHANGELOG.html#v1870) release**
* SECURITY: upgrade Go builder from Go1.21.3 to Go1.21.4. [the list of issues addressed in Go1.21.4](https://github.com/golang/go/issues?q=milestone%3AGo1.21.4+label%3ACherryPickApproved).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly apply [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling) with `regex`, which start and end with `.+` or `.*` and which contain alternate sub-regexps. For example, `.+;|;.+` or `.*foo|bar|baz.*`. Previously such regexps were improperly parsed, which could result in undexpected relabeling results. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5297).
* BUGFIX: fix panic, which could occur when [query tracing](https://docs.victoriametrics.com/#query-tracing) is enabled. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5319).
* BUGFIX: [vmstorage](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): log warning about switching to ReadOnly mode only on state change. Before, vmstorage would log this warning every 1s. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5159) for details.
## [v1.87.10](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.10)
Released at 2023-10-16

View file

@ -484,7 +484,10 @@ during the config update / version upgrade. In this case the following strategy
since they need to process higher load when some of `vmstorage` nodes are temporarily unavailable in the cluster.
It is possible to reduce resource usage spikes by running more `vminsert` nodes and by passing bigger values
to `-storage.vminsertConnsShutdownDuration` command-line flag at `vmstorage` nodes.
Make sure that the `-storage.vminsertConnsShutdownDuration` is smaller than the graceful timeout configured at the system which manages `vmstorage`
In this case `vmstorage` increases the interval between gradual closing of `vminsert` connections during graceful shutdown.
This reduces data ingestion slowdown during rollout restarts.
Make sure that the `-storage.vminsertConnsShutdownDuration` is smaller than the graceful shutdown timeout configured at the system which manages `vmstorage`
(e.g. Docker, Kubernetes, systemd, etc.). Otherwise the system may kill `vmstorage` node before it finishes gradual closing of `vminsert` connections.
### Minimum downtime strategy

View file

@ -341,7 +341,8 @@ which can be used as faster and less resource-hungry alternative to Prometheus.
## Grafana setup
Create [Prometheus datasource](http://docs.grafana.org/features/datasources/prometheus/) in Grafana with the following url:
Create [Prometheus datasource](https://grafana.com/docs/grafana/latest/datasources/prometheus/configure-prometheus-data-source/)
in Grafana with the following url:
```url
http://<victoriametrics-addr>:8428
@ -355,6 +356,9 @@ or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
Alternatively, use VictoriaMetrics [datasource plugin](https://github.com/VictoriaMetrics/grafana-datasource) with support of extra features.
See more in [description](https://github.com/VictoriaMetrics/grafana-datasource#victoriametrics-data-source-for-grafana).
Creating a datasource may require [specific permissions](https://grafana.com/docs/grafana/latest/administration/data-source-management/).
If you don't see an option to create a data source - try contacting system administrator.
## How to upgrade VictoriaMetrics
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.

View file

@ -349,7 +349,8 @@ which can be used as faster and less resource-hungry alternative to Prometheus.
## Grafana setup
Create [Prometheus datasource](http://docs.grafana.org/features/datasources/prometheus/) in Grafana with the following url:
Create [Prometheus datasource](https://grafana.com/docs/grafana/latest/datasources/prometheus/configure-prometheus-data-source/)
in Grafana with the following url:
```url
http://<victoriametrics-addr>:8428
@ -363,6 +364,9 @@ or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
Alternatively, use VictoriaMetrics [datasource plugin](https://github.com/VictoriaMetrics/grafana-datasource) with support of extra features.
See more in [description](https://github.com/VictoriaMetrics/grafana-datasource#victoriametrics-data-source-for-grafana).
Creating a datasource may require [specific permissions](https://grafana.com/docs/grafana/latest/administration/data-source-management/).
If you don't see an option to create a data source - try contacting system administrator.
## How to upgrade VictoriaMetrics
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.

View file

@ -28,7 +28,7 @@ The relabeling is mostly used for the following tasks:
* Removing prefixes from target label names. See [how to remove prefixes from target label names](#how-to-remove-prefixes-from-target-label-names).
* Removing some labels from discovered targets. See [how to remove labels from targets](#how-to-remove-labels-from-targets).
* Dropping some metrics during scape. See [how to drop metrics during scrape](#how-to-drop-metrics-during-scrape).
* Removing some labels from scraped metrics. See [how to remove labels from metrics](#how-to-remove-labels-from-metrics).
* Removing some labels from scraped metrics. See [how to remove labels from scraped metrics](#how-to-remove-labels-from-scraped-metrics).
* Removing some labels from metrics matching some [series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors).
See [how to remove labels from metrics subset](#how-to-remove-labels-from-metrics-subset).

View file

@ -15,7 +15,8 @@ aliases:
[vmagent](https://docs.victoriametrics.com/vmagent.html) and [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
can aggregate incoming [samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) in streaming mode by time and by labels before data is written to remote storage.
The aggregation is applied to all the metrics received via any [supported data ingestion protocol](https://docs.victoriametrics.com/#how-to-import-time-series-data)
and/or scraped from [Prometheus-compatible targets](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter).
and/or scraped from [Prometheus-compatible targets](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
after applying all the configured [relabeling stages](https://docs.victoriametrics.com/vmagent.html#relabeling).
Stream aggregation ignores timestamps associated with the input [samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
It expects that the ingested samples have timestamps close to the current time.
@ -389,7 +390,10 @@ Both input and output metric names can be modified if needed via relabeling acco
## Relabeling
It is possible to apply [arbitrary relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling) to input and output metrics
during stream aggregation via `input_relabel_configs` and `output_relabel_config` options in [stream aggregation config](#stream-aggregation-config).
during stream aggregation via `input_relabel_configs` and `output_relabel_configs` options in [stream aggregation config](#stream-aggregation-config).
Relabeling rules inside `input_relabel_configs` are applied to samples matching the `match` filters.
Relabeling rules inside `output_relabel_configs` are applied to aggregated samples before sending them to the remote storage.
For example, the following config removes the `:1m_sum_samples` suffix added [to the output metric name](#output-metric-names):

View file

@ -514,13 +514,16 @@ with [additional enhancements](#relabeling-enhancements). The relabeling can be
This relabeling can be debugged via `http://vmagent:8429/metric-relabel-debug` page. See [these docs](#relabel-debug) for details.
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is used for modifying labels for metrics
and for dropping unneeded metrics before sending them to a particular `-remoteWrite.url`.
and for dropping unneeded metrics before sending them to the particular `-remoteWrite.url`.
This relabeling can be debugged via `http://vmagent:8429/metric-relabel-debug` page. See [these docs](#relabel-debug) for details.
All the files with relabeling configs can contain special placeholders in the form `%{ENV_VAR}`,
which are replaced by the corresponding environment variable values.
[Streaming aggregation](https://docs.victoriametrics.com/stream-aggregation.html), if configured,
is pefrormed after applying all the relabeling stages mentioned above.
The following articles contain useful information about Prometheus relabeling:
* [Cookbook for common relabeling tasks](https://docs.victoriametrics.com/relabeling.html)

View file

@ -821,12 +821,11 @@ at least two times bigger than the resolution.
> Please note, data delay is inevitable in distributed systems. And it is better to account for it instead of ignoring.
By default, recently written samples to VictoriaMetrics [aren't visible for queries](https://docs.victoriametrics.com/keyConcepts.html#query-latency)
for up to 30s (see `-search.latencyOffset` command-line flag at vmselect). Such delay is needed to eliminate risk of
for up to 30s (see `-search.latencyOffset` command-line flag at vmselect or VictoriaMetrics single-node). Such delay is needed to eliminate risk of
incomplete data on the moment of querying, due to chance that metrics collectors won't be able to deliver that data in time.
To compensate the latency in timestamps for produced evaluation results, `-rule.evalDelay` is also set to 30s by default.
If you changed the `-search.latencyOffset` (cmd-line flag configured for VictoriaMetrics single-node or vmselect) value
or specified custom `latency_offset` param via [Group](#groups) and observed a delay in timestamps for produced
evaluation results - try changing `-rule.evalDelay` equal to `-search.latencyOffset`.
To compensate the latency in timestamps for produced evaluation results, `-rule.evalDelay` is also set to `30s` by default.
If you expect data to be delayed for longer intervals (it gets buffered, queued, or just network is slow sometimes)
- consider increasing the `-rule.evalDelay` value accordingly.
### Alerts state

14
go.mod
View file

@ -17,8 +17,8 @@ require (
github.com/VictoriaMetrics/metrics v1.24.0
github.com/VictoriaMetrics/metricsql v0.69.0
github.com/aws/aws-sdk-go-v2 v1.22.2
github.com/aws/aws-sdk-go-v2/config v1.24.0
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6
github.com/aws/aws-sdk-go-v2/config v1.25.0
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.7
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1
github.com/bmatcuk/doublestar/v4 v4.6.1
github.com/cespare/xxhash/v2 v2.2.0
@ -30,7 +30,7 @@ require (
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.12.0
github.com/influxdata/influxdb v1.11.2
github.com/klauspost/compress v1.17.2
github.com/klauspost/compress v1.17.3
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/oklog/ulid v1.3.1
@ -60,9 +60,9 @@ require (
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.47.10 // indirect
github.com/aws/aws-sdk-go v1.47.11 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.15.2 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.16.0 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 // indirect
@ -113,8 +113,8 @@ require (
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 // indirect
go.opentelemetry.io/collector/semconv v0.88.0 // indirect
go.opentelemetry.io/collector/pdata v1.0.0-rcv0018 // indirect
go.opentelemetry.io/collector/semconv v0.89.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect
go.opentelemetry.io/otel v1.20.0 // indirect
go.opentelemetry.io/otel/metric v1.20.0 // indirect

28
go.sum
View file

@ -85,20 +85,20 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.47.10 h1:cvufN7WkD1nlOgpRopsmxKQlFp5X1MfyAw4r7BBORQc=
github.com/aws/aws-sdk-go v1.47.10/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.47.11 h1:Dol+MA+hQblbnXUI3Vk9qvoekU6O1uDEuAItezjiWNQ=
github.com/aws/aws-sdk-go v1.47.11/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.22.2 h1:lV0U8fnhAnPz8YcdmZVV60+tr6CakHzqA6P8T46ExJI=
github.com/aws/aws-sdk-go-v2 v1.22.2/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 h1:hHgLiIrTRtddC0AKcJr5s7i/hLgcpTt+q/FKxf1Zayk=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0/go.mod h1:w4I/v3NOWgD+qvs1NPEwhd++1h3XPHFaVxasfY6HlYQ=
github.com/aws/aws-sdk-go-v2/config v1.24.0 h1:4LEk29JO3w+y9dEo/5Tq5QTP7uIEw+KQrKiHOs4xlu4=
github.com/aws/aws-sdk-go-v2/config v1.24.0/go.mod h1:11nNDAuK86kOUHeuEQo8f3CkcV5xuUxvPwFjTZE/PnQ=
github.com/aws/aws-sdk-go-v2/credentials v1.15.2 h1:rKH7khRMxPdD0u3dHecd0Q7NOVw3EUe7AqdkUOkiOGI=
github.com/aws/aws-sdk-go-v2/credentials v1.15.2/go.mod h1:tXM8wmaeAhfC7nZoCxb0FzM/aRaB1m1WQ7x0qlBLq80=
github.com/aws/aws-sdk-go-v2/config v1.25.0 h1:WCwAqyrM/kqYi6pHjVpq/w2pLydeGKv8Af9vdtO3ciM=
github.com/aws/aws-sdk-go-v2/config v1.25.0/go.mod h1:1QMnmhoWcR6957nC1MUUhhOLx9NOGFSVNG3Mag9vLU4=
github.com/aws/aws-sdk-go-v2/credentials v1.16.0 h1:sSEHkXonpZBSPcyUBDRlZjxOi14qM/UK7/vfKhGwmTo=
github.com/aws/aws-sdk-go-v2/credentials v1.16.0/go.mod h1:tXM8wmaeAhfC7nZoCxb0FzM/aRaB1m1WQ7x0qlBLq80=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 h1:G5KawTAkyHH6WyKQCdHiW4h3PmAXNJpOgwKg3H7sDRE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3/go.mod h1:hugKmSFnZB+HgNI1sYGT14BUPZkO6alC/e0AWu+0IAQ=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6 h1:IpQbitxCZeC64C1ALz9QZu6AHHWundnU2evQ9xbp5k8=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6/go.mod h1:27jIVQK+al9s0yTo3pkMdahRinbscqSC6zNGfNWXPZc=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.7 h1:HDsYN1Qm6fFDKzaGfYVGGBNkifZAHWVBrzrILGhpdIU=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.7/go.mod h1:998wVfFSQY1hGhRXfv6QYGY08qi/L7Apr1XmJSWS5YI=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 h1:AaQsr5vvGR7rmeSWBtTCcw16tT9r51mWijuCQhzLnq8=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2/go.mod h1:o1IiRn7CWocIFTXJjGKJDOwxv1ibL53NpcvcqGWyRBA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 h1:UZx8SXZ0YtzRiALzYAWcjb9Y9hZUR7MBKaBQ5ouOjPs=
@ -319,8 +319,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -459,10 +459,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4=
go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE=
go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0018 h1:a2IHOZKphRzPagcvOHQHHUE0DlITFSKlIBwaWhPZpl4=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0018/go.mod h1:oNIcTRyEJYIfMcRYyyh5lquDU0Vl+ktTL6ka+p+dYvg=
go.opentelemetry.io/collector/semconv v0.89.0 h1:Sw+MiI3/oiYIY+ebkanZsOaBxXMx3sqnH1/6NaD4rLQ=
go.opentelemetry.io/collector/semconv v0.89.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U=
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=

View file

@ -46,7 +46,7 @@ func (cm *ConnsMap) Delete(c net.Conn) {
// CloseAll gradually closes all the cm conns with during the given shutdownDuration.
func (cm *ConnsMap) CloseAll(shutdownDuration time.Duration) {
cm.mu.Lock()
conns := make([]net.Conn, len(cm.m))
conns := make([]net.Conn, 0, len(cm.m))
for c := range cm.m {
conns = append(conns, c)
delete(cm.m, c)
@ -82,8 +82,7 @@ func (cm *ConnsMap) CloseAll(shutdownDuration time.Duration) {
remoteAddr := conns[0].RemoteAddr().String()
_ = conns[0].Close()
logger.Infof("closed %s connection %s", cm.clientName, remoteAddr)
conns = conns[1:]
for _, c := range conns {
for _, c := range conns[1:] {
time.Sleep(shutdownInterval)
remoteAddr := c.RemoteAddr().String()
_ = c.Close()

View file

@ -24,7 +24,7 @@ var (
loggerTimezone = flag.String("loggerTimezone", "UTC", "Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. "+
"For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local")
disableTimestamps = flag.Bool("loggerDisableTimestamps", false, "Whether to disable writing timestamps in logs")
maxLogArgLen = flag.Int("loggerMaxArgLen", 500, "The maximum length of a single logged argument. Longer arguments are replaced with 'arg_start..arg_end', "+
maxLogArgLen = flag.Int("loggerMaxArgLen", 1000, "The maximum length of a single logged argument. Longer arguments are replaced with 'arg_start..arg_end', "+
"where 'arg_start' and 'arg_end' is prefix and suffix of the arg with the length not exceeding -loggerMaxArgLen / 2")
errorsPerSecondLimit = flag.Int("loggerErrorsPerSecondLimit", 0, `Per-second limit on the number of ERROR messages. If more than the given number of errors are emitted per second, the remaining errors are suppressed. Zero values disable the rate limit`)

View file

@ -7,6 +7,7 @@ import (
"fmt"
"io"
"strings"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
@ -22,6 +23,14 @@ var denyQueryTracing = flag.Bool("denyQueryTracing", false, "Whether to disable
// Tracer may contain sub-tracers (branches) in order to build tree-like execution order.
// Call Tracer.NewChild func for adding sub-tracer.
type Tracer struct {
// isDone is set to true after Done* call.
//
// It is used for determining whether it is safe to print the trace.
// It is unsafe to print the trace when it isn't closed yet, since it may be modified
// by concurrently running goroutines.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5319
isDone atomic.Bool
// startTime is the time when Tracer was created
startTime time.Time
// doneTime is the time when Done or Donef was called
@ -30,7 +39,7 @@ type Tracer struct {
message string
// children is a list of children Tracer objects
children []*Tracer
// span contains span for the given Tracer. It is added via Tracer.AddSpan().
// span contains span for the given Tracer. It is added via Tracer.AddJSON().
// If span is non-nil, then the remaining fields aren't used.
span *span
}
@ -68,7 +77,7 @@ func (t *Tracer) NewChild(format string, args ...interface{}) *Tracer {
if t == nil {
return nil
}
if !t.doneTime.IsZero() {
if t.isDone.Load() {
panic(fmt.Errorf("BUG: NewChild() cannot be called after Donef(%q) call", t.message))
}
child := &Tracer{
@ -87,10 +96,11 @@ func (t *Tracer) Done() {
if t == nil {
return
}
if !t.doneTime.IsZero() {
if t.isDone.Load() {
panic(fmt.Errorf("BUG: Donef(%q) already called", t.message))
}
t.doneTime = time.Now()
t.isDone.Store(true)
}
// Donef appends the given fmt.Sprintf(format, args..) message to t and finished it.
@ -101,11 +111,12 @@ func (t *Tracer) Donef(format string, args ...interface{}) {
if t == nil {
return
}
if !t.doneTime.IsZero() {
if t.isDone.Load() {
panic(fmt.Errorf("BUG: Donef(%q) already called", t.message))
}
t.message += ": " + fmt.Sprintf(format, args...)
t.doneTime = time.Now()
t.isDone.Store(true)
}
// Printf adds new fmt.Sprintf(format, args...) message to t.
@ -115,7 +126,7 @@ func (t *Tracer) Printf(format string, args ...interface{}) {
if t == nil {
return
}
if !t.doneTime.IsZero() {
if t.isDone.Load() {
panic(fmt.Errorf("BUG: Printf() cannot be called after Done(%q) call", t.message))
}
now := time.Now()
@ -124,6 +135,7 @@ func (t *Tracer) Printf(format string, args ...interface{}) {
doneTime: now,
message: fmt.Sprintf(format, args...),
}
child.isDone.Store(true)
t.children = append(t.children, child)
}
@ -153,6 +165,9 @@ func (t *Tracer) AddJSON(jsonTrace []byte) error {
// String returns string representation of t.
//
// String must be called when t methods aren't called by other goroutines.
//
// It is safe calling String() when child tracers aren't finished yet.
// In this case they will contain the corresponding message.
func (t *Tracer) String() string {
if t == nil {
return ""
@ -166,6 +181,9 @@ func (t *Tracer) String() string {
// ToJSON returns JSON representation of t.
//
// ToJSON must be called when t methods aren't called by other goroutines.
//
// It is safe calling ToJSON() when child tracers aren't finished yet.
// In this case they will contain the corresponding message.
func (t *Tracer) ToJSON() string {
if t == nil {
return ""
@ -187,6 +205,12 @@ func (t *Tracer) toSpanInternal(prevTime time.Time) (*span, time.Time) {
if t.span != nil {
return t.span, prevTime
}
if !t.isDone.Load() {
s := &span{
Message: fmt.Sprintf("missing Tracer.Done() call for the trace with message=%s", t.message),
}
return s, prevTime
}
if t.doneTime == t.startTime {
// a single-line trace
d := t.startTime.Sub(prevTime)
@ -199,10 +223,6 @@ func (t *Tracer) toSpanInternal(prevTime time.Time) (*span, time.Time) {
// tracer with children
msg := t.message
doneTime := t.doneTime
if doneTime.IsZero() {
msg += ": missing Tracer.Done() call"
doneTime = t.getLastChildDoneTime(t.startTime)
}
d := doneTime.Sub(t.startTime)
var children []*span
var sChild *span
@ -219,14 +239,6 @@ func (t *Tracer) toSpanInternal(prevTime time.Time) (*span, time.Time) {
return s, doneTime
}
func (t *Tracer) getLastChildDoneTime(defaultTime time.Time) time.Time {
if len(t.children) == 0 {
return defaultTime
}
lastChild := t.children[len(t.children)-1]
return lastChild.getLastChildDoneTime(lastChild.startTime)
}
// span represents a single trace span
type span struct {
// DurationMsec is the duration for the current trace span in milliseconds.

View file

@ -1,7 +1,9 @@
package querytracer
import (
"fmt"
"regexp"
"sync"
"testing"
)
@ -137,11 +139,39 @@ func TestTraceMissingDonef(t *testing.T) {
qtChild.Printf("child printf")
qt.Printf("another parent printf")
s := qt.String()
sExpected := `- 0ms: : parent: missing Tracer.Done() call
| - 0ms: parent printf
| - 0ms: child: missing Tracer.Done() call
| | - 0ms: child printf
| - 0ms: another parent printf
sExpected := `- 0.000ms: missing Tracer.Done() call for the trace with message=: parent
`
if !areEqualTracesSkipDuration(s, sExpected) {
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
}
}
func TestTraceConcurrent(t *testing.T) {
qt := New(true, "parent")
childLocal := qt.NewChild("local")
childLocal.Printf("abc")
childLocal.Done()
var wg sync.WaitGroup
for i := 0; i < 3; i++ {
child := qt.NewChild(fmt.Sprintf("child %d", i))
wg.Add(1)
go func() {
for j := 0; j < 100; j++ {
child.Printf(fmt.Sprintf("message %d", j))
}
wg.Done()
}()
}
qt.Done()
// Verify that it is safe to call qt.String() when child traces aren't done yet
s := qt.String()
wg.Wait()
sExpected := `- 0.008ms: : parent
| - 0.002ms: local
| | - 0.000ms: abc
| - 0.000ms: missing Tracer.Done() call for the trace with message=child 0
| - 0.000ms: missing Tracer.Done() call for the trace with message=child 1
| - 0.000ms: missing Tracer.Done() call for the trace with message=child 2
`
if !areEqualTracesSkipDuration(s, sExpected) {
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)

View file

@ -1,3 +1,8 @@
# v1.25.0 (2023-11-14)
* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.24.0 (2023-11-13)
* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.

View file

@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.24.0"
const goModuleVersion = "1.25.0"

View file

@ -3,7 +3,10 @@ package config
import (
"context"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
@ -21,11 +24,33 @@ import (
const (
// valid credential source values
credSourceEc2Metadata = "Ec2InstanceMetadata"
credSourceEnvironment = "Environment"
credSourceECSContainer = "EcsContainer"
credSourceEc2Metadata = "Ec2InstanceMetadata"
credSourceEnvironment = "Environment"
credSourceECSContainer = "EcsContainer"
httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
)
// direct representation of the IPv4 address for the ECS container
// "169.254.170.2"
var ecsContainerIPv4 net.IP = []byte{
169, 254, 170, 2,
}
// direct representation of the IPv4 address for the EKS container
// "169.254.170.23"
var eksContainerIPv4 net.IP = []byte{
169, 254, 170, 23,
}
// direct representation of the IPv6 address for the EKS container
// "fd00:ec2::23"
var eksContainerIPv6 net.IP = []byte{
0xFD, 0, 0xE, 0xC2,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0x23,
}
var (
ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing
)
@ -222,6 +247,36 @@ func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *Shar
return nil
}
// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
//
// host can either be an IP address OR an unresolved hostname - resolution will
// be automatically performed in the latter case
func isAllowedHost(host string) (bool, error) {
if ip := net.ParseIP(host); ip != nil {
return isIPAllowed(ip), nil
}
addrs, err := lookupHostFn(host)
if err != nil {
return false, err
}
for _, addr := range addrs {
if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
return false, nil
}
}
return true, nil
}
func isIPAllowed(ip net.IP) bool {
return ip.IsLoopback() ||
ip.Equal(ecsContainerIPv4) ||
ip.Equal(eksContainerIPv4) ||
ip.Equal(eksContainerIPv6)
}
func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error {
var resolveErr error
@ -232,10 +287,12 @@ func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpoint
host := parsed.Hostname()
if len(host) == 0 {
resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL")
} else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, loopbackErr)
} else if !isLoopback {
resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback hosts are allowed", host)
} else if parsed.Scheme == "http" {
if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr)
} else if !isAllowedHost {
resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host)
}
}
}
@ -252,6 +309,16 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke
if len(authToken) != 0 {
options.AuthorizationToken = authToken
}
if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
var contents []byte
var err error
if contents, err = ioutil.ReadFile(authFilePath); err != nil {
return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
}
return string(contents), nil
})
}
options.APIOptions = cfg.APIOptions
if cfg.Retryer != nil {
options.Retryer = cfg.Retryer()

View file

@ -1,3 +1,7 @@
# v1.16.0 (2023-11-14)
* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider.
# v1.15.2 (2023-11-09)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -36,6 +36,7 @@ import (
"context"
"fmt"
"net/http"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client"
@ -81,7 +82,37 @@ type Options struct {
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
//
// When constructed from environment, the provider will use the value of
// AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
//
// Will be overridden if AuthorizationTokenProvider is configured
AuthorizationToken string
// Optional auth provider func to dynamically load the auth token from a file
// everytime a credential is retrieved
//
// When constructed from environment, the provider will read and use the content
// of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
// as the auth token everytime credentials are retrieved
//
// Will override AuthorizationToken if configured
AuthorizationTokenProvider AuthTokenProvider
}
// AuthTokenProvider defines an interface to dynamically load a value to be passed
// for the Authorization header of a credentials request.
type AuthTokenProvider interface {
GetToken() (string, error)
}
// TokenProviderFunc is a func type implementing AuthTokenProvider interface
// and enables customizing token provider behavior
type TokenProviderFunc func() (string, error)
// GetToken func retrieves auth token according to TokenProviderFunc implementation
func (p TokenProviderFunc) GetToken() (string, error) {
return p()
}
// New returns a credentials Provider for retrieving AWS credentials
@ -132,5 +163,30 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
}
func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) {
return p.client.GetCredentials(ctx, &client.GetCredentialsInput{AuthorizationToken: p.options.AuthorizationToken})
authToken, err := p.resolveAuthToken()
if err != nil {
return nil, fmt.Errorf("resolve auth token: %v", err)
}
return p.client.GetCredentials(ctx, &client.GetCredentialsInput{
AuthorizationToken: authToken,
})
}
func (p *Provider) resolveAuthToken() (string, error) {
authToken := p.options.AuthorizationToken
var err error
if p.options.AuthorizationTokenProvider != nil {
authToken, err = p.options.AuthorizationTokenProvider.GetToken()
if err != nil {
return "", err
}
}
if strings.ContainsAny(authToken, "\r\n") {
return "", fmt.Errorf("authorization token contains invalid newline sequence")
}
return authToken, nil
}

View file

@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.15.2"
const goModuleVersion = "1.16.0"

View file

@ -1,3 +1,7 @@
# v1.13.7 (2023-11-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.6 (2023-11-13)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package manager
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.6"
const goModuleVersion = "1.13.7"

View file

@ -31,6 +31,8 @@ package endpointcreds
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
@ -69,7 +71,37 @@ type Provider struct {
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
//
// When constructed from environment, the provider will use the value of
// AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
//
// Will be overridden if AuthorizationTokenProvider is configured
AuthorizationToken string
// Optional auth provider func to dynamically load the auth token from a file
// everytime a credential is retrieved
//
// When constructed from environment, the provider will read and use the content
// of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
// as the auth token everytime credentials are retrieved
//
// Will override AuthorizationToken if configured
AuthorizationTokenProvider AuthTokenProvider
}
// AuthTokenProvider defines an interface to dynamically load a value to be passed
// for the Authorization header of a credentials request.
type AuthTokenProvider interface {
GetToken() (string, error)
}
// TokenProviderFunc is a func type implementing AuthTokenProvider interface
// and enables customizing token provider behavior
type TokenProviderFunc func() (string, error)
// GetToken func retrieves auth token according to TokenProviderFunc implementation
func (p TokenProviderFunc) GetToken() (string, error) {
return p()
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
@ -164,7 +196,20 @@ func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error
req := p.Client.NewRequest(op, nil, out)
req.SetContext(ctx)
req.HTTPRequest.Header.Set("Accept", "application/json")
if authToken := p.AuthorizationToken; len(authToken) != 0 {
authToken := p.AuthorizationToken
var err error
if p.AuthorizationTokenProvider != nil {
authToken, err = p.AuthorizationTokenProvider.GetToken()
if err != nil {
return nil, fmt.Errorf("get authorization token: %v", err)
}
}
if strings.ContainsAny(authToken, "\r\n") {
return nil, fmt.Errorf("authorization token contains invalid newline sequence")
}
if len(authToken) != 0 {
req.HTTPRequest.Header.Set("Authorization", authToken)
}

View file

@ -9,6 +9,7 @@ package defaults
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
@ -115,9 +116,31 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro
const (
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
)
// direct representation of the IPv4 address for the ECS container
// "169.254.170.2"
var ecsContainerIPv4 net.IP = []byte{
169, 254, 170, 2,
}
// direct representation of the IPv4 address for the EKS container
// "169.254.170.23"
var eksContainerIPv4 net.IP = []byte{
169, 254, 170, 23,
}
// direct representation of the IPv6 address for the EKS container
// "fd00:ec2::23"
var eksContainerIPv6 net.IP = []byte{
0xFD, 0, 0xE, 0xC2,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0x23,
}
// RemoteCredProvider returns a credentials provider for the default remote
// endpoints such as EC2 or ECS Roles.
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
@ -135,19 +158,22 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
var lookupHostFn = net.LookupHost
func isLoopbackHost(host string) (bool, error) {
ip := net.ParseIP(host)
if ip != nil {
return ip.IsLoopback(), nil
// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
//
// host can either be an IP address OR an unresolved hostname - resolution will
// be automatically performed in the latter case
func isAllowedHost(host string) (bool, error) {
if ip := net.ParseIP(host); ip != nil {
return isIPAllowed(ip), nil
}
// Host is not an ip, perform lookup
addrs, err := lookupHostFn(host)
if err != nil {
return false, err
}
for _, addr := range addrs {
if !net.ParseIP(addr).IsLoopback() {
if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
return false, nil
}
}
@ -155,6 +181,13 @@ func isLoopbackHost(host string) (bool, error) {
return true, nil
}
func isIPAllowed(ip net.IP) bool {
return ip.IsLoopback() ||
ip.Equal(ecsContainerIPv4) ||
ip.Equal(eksContainerIPv4) ||
ip.Equal(eksContainerIPv6)
}
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
var errMsg string
@ -165,10 +198,12 @@ func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string)
host := aws.URLHostname(parsed)
if len(host) == 0 {
errMsg = "unable to parse host from local HTTP cred provider URL"
} else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
} else if !isLoopback {
errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
} else if parsed.Scheme == "http" {
if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr)
} else if !isAllowedHost {
errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host)
}
}
}
@ -190,6 +225,15 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
if contents, err := ioutil.ReadFile(authFilePath); err != nil {
return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
} else {
return string(contents), nil
}
})
}
},
)
}

View file

@ -6229,6 +6229,12 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -6250,6 +6256,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -6304,6 +6313,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -7002,6 +7014,14 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
endpointKey{
Region: "ap-south-2",
}: endpoint{
Hostname: "compute-optimizer.ap-south-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-south-2",
},
},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@ -7018,6 +7038,22 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{
Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-3",
},
},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{
Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-4",
},
},
endpointKey{
Region: "ca-central-1",
}: endpoint{
@ -7034,6 +7070,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
endpointKey{
Region: "eu-central-2",
}: endpoint{
Hostname: "compute-optimizer.eu-central-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-central-2",
},
},
endpointKey{
Region: "eu-north-1",
}: endpoint{
@ -7050,6 +7094,14 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
endpointKey{
Region: "eu-south-2",
}: endpoint{
Hostname: "compute-optimizer.eu-south-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-south-2",
},
},
endpointKey{
Region: "eu-west-1",
}: endpoint{
@ -7074,6 +7126,22 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "compute-optimizer.il-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "il-central-1",
},
},
endpointKey{
Region: "me-central-1",
}: endpoint{
Hostname: "compute-optimizer.me-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "me-central-1",
},
},
endpointKey{
Region: "me-south-1",
}: endpoint{
@ -35266,12 +35334,42 @@ var awsusgovPartition = partition{
},
"appconfigdata": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
},
},
},
"application-autoscaling": service{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.47.10"
const SDKVersion = "1.47.11"

View file

@ -16,6 +16,14 @@ This package provides various compression algorithms.
# changelog
* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838

View file

@ -212,7 +212,7 @@ func (s *Scratch) writeCount() error {
previous0 bool
charnum uint16
maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
// Write Table Size
bitStream = uint32(tableLog - minTablelog)

View file

@ -335,7 +335,16 @@ func (w *GzipResponseWriter) Close() error {
ce = w.Header().Get(contentEncoding)
cr = w.Header().Get(contentRange)
)
// fmt.Println(len(w.buf) == 0, len(w.buf) < w.minSize, len(w.Header()[HeaderNoCompression]) != 0, ce != "", cr != "", !w.contentTypeFilter(ct))
if ct == "" {
ct = http.DetectContentType(w.buf)
// Handles the intended case of setting a nil Content-Type (as for http/server or http/fs)
// Set the header only if the key does not exist
if _, ok := w.Header()[contentType]; w.setContentType && !ok {
w.Header().Set(contentType, ct)
}
}
if len(w.buf) == 0 || len(w.buf) < w.minSize || len(w.Header()[HeaderNoCompression]) != 0 || ce != "" || cr != "" || !w.contentTypeFilter(ct) {
// GZIP not triggered, write out regular response.
return w.startPlain()

View file

@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) {
if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3)
} else {
ofc = ofCode(uint32(m.rep))
ofc = ofCode(uint32(m.rep) & 3)
}
// Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@ -227,7 +227,7 @@ encodeLoop:
}
}
l := 4 + e.matchlen(s+4, offset+4, src)
if rep < 0 {
if true {
// Extend candidate match backwards as far as possible.
tMin := s - e.maxMatchOff
if tMin < 0 {
@ -282,6 +282,7 @@ encodeLoop:
// Load next and check...
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
index0 := s + 1
// Look far ahead, unless we have a really long match already...
if best.length < goodEnough {
@ -357,19 +358,16 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index old s + 1 -> s - 1
index0 := s + 1
s = best.s + best.length
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
// Index skipped...
end := s
if s > sLimit+4 {
end = sLimit + 4
}
off := index0 + e.cur
for index0 < s {
for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -378,6 +376,7 @@ encodeLoop:
off++
index0++
}
switch best.rep {
case 2, 4 | 1:
offset1, offset2 = offset2, offset1
@ -386,12 +385,17 @@ encodeLoop:
case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2
}
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
index0 := s + 1
s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
@ -419,19 +423,25 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
// Index old s + 1 -> s - 1 or sLimit
end := s
if s > sLimit-4 {
end = sLimit - 4
}
// Index old s + 1 -> s - 1
for index0 < s {
off := index0 + e.cur
for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
off++
}
if s >= sLimit {
break encodeLoop
}
}

View file

@ -145,7 +145,7 @@ encodeLoop:
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
var matched int32
var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
@ -162,6 +162,7 @@ encodeLoop:
off := s + e.cur
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@ -258,7 +259,6 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
index0 := s + repOff2
s += lenght + repOff2
nextEmit = s
if s >= sLimit {
@ -498,15 +498,15 @@ encodeLoop:
}
// Index match start+1 (long) -> s - 1
index0 := s - l + 1
off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
off += 2
}
cv = load6432(src, s)
@ -672,7 +672,7 @@ encodeLoop:
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
var matched int32
var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
@ -691,6 +691,7 @@ encodeLoop:
e.markLongShardDirty(nextHashL)
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
e.markShortShardDirty(nextHashS)
index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@ -726,7 +727,6 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
index0 := s + repOff
s += lenght + repOff
nextEmit = s
@ -790,7 +790,6 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
index0 := s + repOff2
s += lenght + repOff2
nextEmit = s
if s >= sLimit {
@ -1024,18 +1023,18 @@ encodeLoop:
}
// Index match start+1 (long) -> s - 1
index0 := s - l + 1
off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
off += 2
}
cv = load6432(src, s)

16
vendor/modules.txt vendored
View file

@ -110,7 +110,7 @@ github.com/VividCortex/ewma
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15
github.com/alecthomas/units
# github.com/aws/aws-sdk-go v1.47.10
# github.com/aws/aws-sdk-go v1.47.11
## explicit; go 1.19
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/auth/bearer
@ -182,10 +182,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv
## explicit; go 1.19
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi
# github.com/aws/aws-sdk-go-v2/config v1.24.0
# github.com/aws/aws-sdk-go-v2/config v1.25.0
## explicit; go 1.19
github.com/aws/aws-sdk-go-v2/config
# github.com/aws/aws-sdk-go-v2/credentials v1.15.2
# github.com/aws/aws-sdk-go-v2/credentials v1.16.0
## explicit; go 1.19
github.com/aws/aws-sdk-go-v2/credentials
github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
@ -198,7 +198,7 @@ github.com/aws/aws-sdk-go-v2/credentials/stscreds
## explicit; go 1.19
github.com/aws/aws-sdk-go-v2/feature/ec2/imds
github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.6
# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.7
## explicit; go 1.19
github.com/aws/aws-sdk-go-v2/feature/s3/manager
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2
@ -399,8 +399,8 @@ github.com/jpillora/backoff
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
# github.com/klauspost/compress v1.17.2
## explicit; go 1.18
# github.com/klauspost/compress v1.17.3
## explicit; go 1.19
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@ -573,7 +573,7 @@ go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
# go.opentelemetry.io/collector/pdata v1.0.0-rcv0017
# go.opentelemetry.io/collector/pdata v1.0.0-rcv0018
## explicit; go 1.20
go.opentelemetry.io/collector/pdata/internal
go.opentelemetry.io/collector/pdata/internal/data
@ -590,7 +590,7 @@ go.opentelemetry.io/collector/pdata/internal/otlp
go.opentelemetry.io/collector/pdata/pcommon
go.opentelemetry.io/collector/pdata/pmetric
go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp
# go.opentelemetry.io/collector/semconv v0.88.0
# go.opentelemetry.io/collector/semconv v0.89.0
## explicit; go 1.20
go.opentelemetry.io/collector/semconv/v1.6.1
# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0