Merge branch 'public-single-node' into victorialogs-wip

This commit is contained in:
Aliaksandr Valialkin 2024-05-22 23:08:21 +02:00
commit 984438b084
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
184 changed files with 2584 additions and 1392 deletions

View file

@ -2912,7 +2912,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
-maxLabelValueLen int
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 1024)
-maxLabelsPerTimeseries int
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
-memory.allowedBytes size

View file

@ -101,3 +101,10 @@ victoria-logs-windows-amd64:
victoria-logs-pure:
APP_NAME=victoria-logs $(MAKE) app-local-pure
run-victoria-logs:
mkdir -p victoria-logs-data
DOCKER_OPTS='-v $(shell pwd)/victoria-logs-data:/victoria-logs-data' \
APP_NAME=victoria-logs \
ARGS='' \
$(MAKE) run-via-docker

View file

@ -1,13 +1,13 @@
{
"files": {
"main.css": "./static/css/main.bc07cc78.css",
"main.js": "./static/js/main.8e7757ef.js",
"main.css": "./static/css/main.1c2f4a93.css",
"main.js": "./static/js/main.c3285306.js",
"static/js/685.bebe1265.chunk.js": "./static/js/685.bebe1265.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.da86c2db4f0b05e286b0.md",
"static/media/MetricsQL.md": "./static/media/MetricsQL.df7574389d8f8bbcf0c7.md",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.bc07cc78.css",
"static/js/main.8e7757ef.js"
"static/css/main.1c2f4a93.css",
"static/js/main.c3285306.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.8e7757ef.js"></script><link href="./static/css/main.bc07cc78.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.c3285306.js"></script><link href="./static/css/main.1c2f4a93.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -79,6 +79,8 @@ The list of MetricsQL features on top of PromQL:
* [Series selectors](https://docs.victoriametrics.com/keyconcepts/#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
selects series with `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
See [these docs](https://docs.victoriametrics.com/keyconcepts/#filtering-by-multiple-or-filters) for details.
* Support for matching against multiple numeric constants via `q == (C1, ..., CN)` and `q != (C1, ..., CN)` syntax. For example, `status_code == (300, 301, 304)`
returns `status_code` metrics with one of `300`, `301` or `304` values.
* Support for `group_left(*)` and `group_right(*)` for copying all the labels from time series on the `one` side
of [many-to-one operations](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches).
The copied label names may clash with the existing label names, so MetricsQL provides an ability to add prefix to the copied metric names
@ -152,9 +154,9 @@ MetricsQL provides the following functions:
### Rollup functions
**Rollup functions** (aka range functions or window functions) calculate rollups over **raw samples**
**Rollup functions** (aka range functions or window functions) calculate rollups over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window for the [selected time series](https://docs.victoriametrics.com/keyconcepts/#filtering).
For example, `avg_over_time(temperature[24h])` calculates the average temperature over raw samples for the last 24 hours.
For example, `avg_over_time(temperature[24h])` calculates the average temperature over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the last 24 hours.
Additional details:
@ -184,7 +186,7 @@ The list of supported rollup functions:
#### absent_over_time
`absent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1
if the given lookbehind window `d` doesn't contain raw samples. Otherwise, it returns an empty result.
if the given lookbehind window `d` doesn't contain [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples). Otherwise, it returns an empty result.
This function is supported by PromQL.
@ -193,7 +195,7 @@ See also [present_over_time](#present_over_time).
#### aggr_over_time
`aggr_over_time(("rollup_func1", "rollup_func2", ...), series_selector[d])` is a [rollup function](#rollup-functions),
which calculates all the listed `rollup_func*` for raw samples on the given lookbehind window `d`.
which calculates all the listed `rollup_func*` for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`.
The calculations are performed individually per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -203,7 +205,7 @@ would calculate [min_over_time](#min_over_time), [max_over_time](#max_over_time)
#### ascent_over_time
`ascent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates
ascent of raw sample values on the given lookbehind window `d`. The calculations are performed individually
ascent of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) values on the given lookbehind window `d`. The calculations are performed individually
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
This function is useful for tracking height gains in GPS tracking. Metric names are stripped from the resulting rollups.
@ -215,7 +217,7 @@ See also [descent_over_time](#descent_over_time).
#### avg_over_time
`avg_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average value
over raw samples on the given lookbehind window `d` per each time series returned
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
This function is supported by PromQL.
@ -225,7 +227,7 @@ See also [median_over_time](#median_over_time).
#### changes
`changes(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of times
the raw samples changed on the given lookbehind window `d` per each time series returned
the [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) changed on the given lookbehind window `d` per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Unlike `changes()` in Prometheus it takes into account the change from the last sample before the given lookbehind window `d`.
@ -240,7 +242,7 @@ See also [changes_prometheus](#changes_prometheus).
#### changes_prometheus
`changes_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of times
the raw samples changed on the given lookbehind window `d` per each time series returned
the [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) changed on the given lookbehind window `d` per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
It doesn't take into account the change from the last sample before the given lookbehind window `d` in the same way as Prometheus does.
@ -254,7 +256,7 @@ See also [changes](#changes).
#### count_eq_over_time
`count_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
`count_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`, which are equal to `eq`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -264,7 +266,7 @@ See also [count_over_time](#count_over_time), [share_eq_over_time](#share_eq_ove
#### count_gt_over_time
`count_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
`count_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`, which are bigger than `gt`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -274,7 +276,7 @@ See also [count_over_time](#count_over_time) and [share_gt_over_time](#share_gt_
#### count_le_over_time
`count_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
`count_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`, which don't exceed `le`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -284,7 +286,7 @@ See also [count_over_time](#count_over_time) and [share_le_over_time](#share_le_
#### count_ne_over_time
`count_ne_over_time(series_selector[d], ne)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
`count_ne_over_time(series_selector[d], ne)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`, which aren't equal to `ne`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -294,7 +296,7 @@ See also [count_over_time](#count_over_time).
#### count_over_time
`count_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw samples
`count_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -305,7 +307,7 @@ See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_
#### count_values_over_time
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of raw samples
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
with the same value over the given lookbehind window and stores the counts in a time series with an additional `label`, which contains each initial value.
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -315,8 +317,8 @@ See also [count_eq_over_time](#count_eq_over_time), [count_values](#count_values
#### decreases_over_time
`decreases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw sample value decreases
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`decreases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
value decreases over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -324,8 +326,8 @@ See also [increases_over_time](#increases_over_time).
#### default_rollup
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
@ -375,7 +377,7 @@ See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
#### deriv_fast
`deriv_fast(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivative
using the first and the last raw samples on the given lookbehind window `d` per each time series returned
using the first and the last [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -384,8 +386,8 @@ See also [deriv](#deriv) and [ideriv](#ideriv).
#### descent_over_time
`descent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates descent of raw sample values
on the given lookbehind window `d`. The calculations are performed individually per each time series returned
`descent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates descent of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
values on the given lookbehind window `d`. The calculations are performed individually per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
This function is useful for tracking height loss in GPS tracking.
@ -396,8 +398,8 @@ See also [ascent_over_time](#ascent_over_time).
#### distinct_over_time
`distinct_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number of distinct raw sample values
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`distinct_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number of unique [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
values on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -416,15 +418,15 @@ See also [lifetime](#lifetime) and [lag](#lag).
#### first_over_time
`first_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the first raw sample value
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`first_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the first [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
See also [last_over_time](#last_over_time) and [tfirst_over_time](#tfirst_over_time).
#### geomean_over_time
`geomean_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [geometric mean](https://en.wikipedia.org/wiki/Geometric_mean)
over raw samples on the given lookbehind window `d` per each time series returned
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -432,8 +434,8 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
#### histogram_over_time
`histogram_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates
[VictoriaMetrics histogram](https://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) over raw samples on the given lookbehind window `d`.
It is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
[VictoriaMetrics histogram](https://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`. It is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
The resulting histograms are useful to pass to [histogram_quantile](#histogram_quantile) for calculating quantiles
over multiple [gauges](https://docs.victoriametrics.com/keyconcepts/#gauge).
For example, the following query calculates median temperature by country over the last 24 hours:
@ -457,7 +459,7 @@ See also [hoeffding_bound_lower](#hoeffding_bound_lower).
#### holt_winters
`holt_winters(series_selector[d], sf, tf)` is a [rollup function](#rollup-functions), which calculates Holt-Winters value
(aka [double exponential smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing)) for raw samples
(aka [double exponential smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing)) for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
over the given lookbehind window `d` using the given smoothing factor `sf` and the given trend factor `tf`.
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
returns time series of [gauge type](https://docs.victoriametrics.com/keyconcepts/#gauge).
@ -468,7 +470,7 @@ See also [range_linear_regression](#range_linear_regression).
#### idelta
`idelta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between the last two raw samples
`idelta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -479,7 +481,8 @@ See also [delta](#delta).
#### ideriv
`ideriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the per-second derivative based on the last two raw samples
`ideriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the per-second derivative based
on the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
over the given lookbehind window `d`. The derivative is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -522,8 +525,8 @@ while [increase](#increase) ignores the first value in a series if it is too big
#### increases_over_time
`increases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw sample value increases
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`increases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
value increases over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -531,14 +534,15 @@ See also [decreases_over_time](#decreases_over_time).
#### integrate
`integrate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the integral over raw samples on the given lookbehind window `d`
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`integrate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the integral over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
#### irate
`irate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the "instant" per-second increase rate over the last two raw samples
`irate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the "instant" per-second increase rate over
the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
@ -560,8 +564,8 @@ See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
#### last_over_time
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
This function is supported by PromQL.
@ -579,13 +583,14 @@ See also [duration_over_time](#duration_over_time) and [lag](#lag).
#### mad_over_time
`mad_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outlier_iqr_over_time).
#### max_over_time
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over raw samples
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
This function is supported by PromQL.
@ -594,7 +599,7 @@ See also [tmax_over_time](#tmax_over_time).
#### median_over_time
`median_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates median value over raw samples
`median_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates median value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -602,7 +607,7 @@ See also [avg_over_time](#avg_over_time).
#### min_over_time
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over raw samples
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
This function is supported by PromQL.
@ -612,15 +617,16 @@ See also [tmin_over_time](#tmin_over_time).
#### mode_over_time
`mode_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [mode](https://en.wikipedia.org/wiki/Mode_(statistics))
for raw samples on the given lookbehind window `d`. It is calculated individually per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). It is expected that raw sample values are discrete.
for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`. It is calculated individually per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). It is expected that [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
values are discrete.
#### outlier_iqr_over_time
`outlier_iqr_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last sample on the given lookbehind window `d`
if its value is either smaller than the `q25-1.5*iqr` or bigger than `q75+1.5*iqr` where:
- `iqr` is an [Interquartile range](https://en.wikipedia.org/wiki/Interquartile_range) over raw samples on the lookbehind window `d`
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) over raw samples on the lookbehind window `d`.
- `iqr` is an [Interquartile range](https://en.wikipedia.org/wiki/Interquartile_range) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the lookbehind window `d`
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the lookbehind window `d`.
The `outlier_iqr_over_time()` is useful for detecting anomalies in gauge values based on the previous history of values.
For example, `outlier_iqr_over_time(memory_usage_bytes[1h])` triggers when `memory_usage_bytes` suddenly goes outside the usual value range for the last hour.
@ -630,8 +636,8 @@ See also [outliers_iqr](#outliers_iqr).
#### predict_linear
`predict_linear(series_selector[d], t)` is a [rollup function](#rollup-functions), which calculates the value `t` seconds in the future using
linear interpolation over raw samples on the given lookbehind window `d`. The predicted value is calculated individually per each time series
returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
linear interpolation over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`.
The predicted value is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
This function is supported by PromQL.
@ -639,7 +645,7 @@ See also [range_linear_regression](#range_linear_regression).
#### present_over_time
`present_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1 if there is at least a single raw sample
`present_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1 if there is at least a single [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`. Otherwise, an empty result is returned.
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -648,7 +654,7 @@ This function is supported by PromQL.
#### quantile_over_time
`quantile_over_time(phi, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi`-quantile over raw samples
`quantile_over_time(phi, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi`-quantile over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
The `phi` value must be in the range `[0...1]`.
@ -659,7 +665,7 @@ See also [quantiles_over_time](#quantiles_over_time).
#### quantiles_over_time
`quantiles_over_time("phiLabel", phi1, ..., phiN, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi*`-quantiles
over raw samples on the given lookbehind window `d` per each time series returned
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
The function returns individual series per each `phi*` with `{phiLabel="phi*"}` label. `phi*` values must be in the range `[0...1]`.
@ -667,7 +673,7 @@ See also [quantile_over_time](#quantile_over_time).
#### range_over_time
`range_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates value range over raw samples
`range_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates value range over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
E.g. it calculates `max_over_time(series_selector[d]) - min_over_time(series_selector[d])`.
@ -692,7 +698,7 @@ See also [irate](#irate) and [rollup_rate](#rollup_rate).
#### rate_over_sum
`rate_over_sum(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second rate over the sum of raw samples
`rate_over_sum(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second rate over the sum of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`. The calculations are performed individually per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -711,7 +717,7 @@ This function is supported by PromQL.
#### rollup
`rollup(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `min`, `max` and `avg` values for raw samples
`rollup(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `min`, `max` and `avg` values for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -721,7 +727,8 @@ See also [label_match](#label_match).
#### rollup_candlestick
`rollup_candlestick(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `open`, `high`, `low` and `close` values (aka OHLC)
over raw samples on the given lookbehind window `d` and returns them in time series with `rollup="open"`, `rollup="high"`, `rollup="low"` and `rollup="close"` additional labels.
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns them in time series
with `rollup="open"`, `rollup="high"`, `rollup="low"` and `rollup="close"` additional labels.
The calculations are performed individually per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). This function is useful for financial applications.
@ -730,7 +737,7 @@ See also [label_match](#label_match).
#### rollup_delta
`rollup_delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates differences between adjacent raw samples
`rollup_delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates differences between adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated differences
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -745,8 +752,8 @@ See also [rollup_increase](#rollup_increase).
#### rollup_deriv
`rollup_deriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivatives
for adjacent raw samples on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated per-second derivatives
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns `min`, `max` and `avg` values
for the calculated per-second derivatives and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
@ -756,7 +763,7 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
#### rollup_increase
`rollup_increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates increases for adjacent raw samples
`rollup_increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates increases for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated increases
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -768,7 +775,8 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
#### rollup_rate
`rollup_rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second change rates for adjacent raw samples
`rollup_rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second change rates
for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated per-second change rates
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
@ -785,7 +793,7 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
#### rollup_scrape_interval
`rollup_scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the interval in seconds between
adjacent raw samples on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated interval
adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated interval
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -796,7 +804,8 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
#### scrape_interval
`scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average interval in seconds between raw samples
`scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average interval in seconds
between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -805,7 +814,8 @@ See also [rollup_scrape_interval](#rollup_scrape_interval).
#### share_gt_over_time
`share_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
`share_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`, which are bigger than `gt`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -817,7 +827,8 @@ See also [share_le_over_time](#share_le_over_time) and [count_gt_over_time](#cou
#### share_le_over_time
`share_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
`share_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`, which are smaller or equal to `le`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -830,7 +841,8 @@ See also [share_gt_over_time](#share_gt_over_time) and [count_le_over_time](#cou
#### share_eq_over_time
`share_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
`share_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d`, which are equal to `eq`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -848,7 +860,7 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
#### stddev_over_time
`stddev_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard deviation over raw samples
`stddev_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard deviation over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -859,7 +871,7 @@ See also [stdvar_over_time](#stdvar_over_time).
#### stdvar_over_time
`stdvar_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard variance over raw samples
`stdvar_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard variance over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -870,8 +882,8 @@ See also [stddev_over_time](#stddev_over_time).
#### sum_eq_over_time
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values equal to `eq`
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
values equal to `eq` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -879,8 +891,8 @@ See also [sum_over_time](#sum_over_time) and [count_eq_over_time](#count_eq_over
#### sum_gt_over_time
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values bigger than `gt`
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
values bigger than `gt` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -888,8 +900,8 @@ See also [sum_over_time](#sum_over_time) and [count_gt_over_time](#count_gt_over
#### sum_le_over_time
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values smaller or equal to `le`
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
values smaller or equal to `le` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -897,7 +909,7 @@ See also [sum_over_time](#sum_over_time) and [count_le_over_time](#count_le_over
#### sum_over_time
`sum_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of raw sample values
`sum_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) values
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -906,14 +918,15 @@ This function is supported by PromQL.
#### sum2_over_time
`sum2_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of squares for raw sample values
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
`sum2_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of squares for [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
values on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
#### timestamp
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
for the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -924,7 +937,8 @@ See also [time](#time) and [now](#now).
#### timestamp_with_name
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
for the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are preserved in the resulting rollups.
@ -933,7 +947,8 @@ See also [timestamp](#timestamp) and [keep_metric_names](#keep_metric_names) mod
#### tfirst_over_time
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the first raw sample
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
for the first [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -957,7 +972,8 @@ See also [tlast_change_over_time](#tlast_change_over_time).
#### tmax_over_time
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
for the [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
with the maximum value on the given lookbehind window `d`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -967,7 +983,8 @@ See also [max_over_time](#max_over_time).
#### tmin_over_time
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
for the [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
with the minimum value on the given lookbehind window `d`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
@ -978,7 +995,7 @@ See also [min_over_time](#min_over_time).
#### zscore_over_time
`zscore_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns [z-score](https://en.wikipedia.org/wiki/Standard_score)
for raw samples on the given lookbehind window `d`. It is calculated independently per each time series returned
for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`. It is calculated independently per each time series returned
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
@ -2212,6 +2229,7 @@ Any [rollup function](#rollup-functions) for something other than [series select
Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions).
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m))[1i:1i])`, so it becomes a subquery,
since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
This behavior can be disabled or logged via cmd-line flags `-search.disableImplicitConversion` and `-search.logImplicitConversion` since v1.101.0.
VictoriaMetrics performs subqueries in the following way:
@ -2244,4 +2262,5 @@ VictoriaMetrics performs the following implicit conversions for incoming queries
For example, `avg_over_time(rate(http_requests_total[5m])[1h])` is automatically converted to `avg_over_time(rate(http_requests_total[5m])[1h:1i])`.
* If something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
is passed to [rollup function](#rollup-functions), then a [subquery](#subqueries) with `1i` lookbehind window and `1i` step is automatically formed.
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
This behavior can be disabled or logged via cmd-line flags `-search.disableImplicitConversion` and `-search.logImplicitConversion` since v1.101.0.

View file

@ -76,7 +76,7 @@ var (
configAuthKey = flagutil.NewPassword("configAuthKey", "Authorization key for accessing /config page. It must be passed via authKey query arg")
reloadAuthKey = flagutil.NewPassword("reloadAuthKey", "Auth key for /-/reload http endpoint. It must be passed as authKey=...")
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented")
maxLabelValueLen = flag.Int("maxLabelValueLen", 16*1024, "The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented")
maxLabelValueLen = flag.Int("maxLabelValueLen", 1024, "The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented")
)
var (

View file

@ -35,49 +35,22 @@ export const useFetchLogs = (server: string, query: string, limit: number) => {
};
const fetchLogs = useCallback(async () => {
const limit = Number(options.body.get("limit")) + 1;
const limit = Number(options.body.get("limit"));
setIsLoading(true);
setError(undefined);
try {
const response = await fetch(url, options);
const text = await response.text();
if (!response.ok || !response.body) {
const errorText = await response.text();
setError(errorText);
setError(text);
setLogs([]);
setIsLoading(false);
return;
}
const reader = response.body.getReader();
const decoder = new TextDecoder("utf-8");
const result = [];
while (reader) {
const { done, value } = await reader.read();
if (done) {
// "Stream finished, no more data."
break;
}
const lines = decoder.decode(value, { stream: true }).split("\n");
result.push(...lines);
// Trim result to limit
// This will lose its meaning with these changes:
// https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5778
if (result.length > limit) {
result.splice(0, result.length - limit);
}
if (result.length >= limit) {
// Reached the maximum line limit
reader.cancel();
break;
}
}
const data = result.map(parseLineToJSON).filter(line => line) as Logs[];
const lines = text.split("\n").filter(line => line).slice(0, limit);
const data = lines.map(parseLineToJSON).filter(line => line) as Logs[];
setLogs(data);
} catch (e) {
console.error(e);
@ -96,4 +69,3 @@ export const useFetchLogs = (server: string, query: string, limit: number) => {
fetchLogs,
};
};

View file

@ -33,6 +33,7 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
**Update note 1: the `-remoteWrite.multitenantURL` command-line flag at `vmagent` was removed starting from this release. This flag was deprecated since [v1.96.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.96.0). Use `-enableMultitenantHandlers` instead, as it is easier to use and combine with [multitenant URL at vminsert](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy-via-labels). See these [docs for details](https://docs.victoriametrics.com/vmagent.html#multitenancy).**
**Update note 2: the `-streamAggr.dropInputLabels` command-line flag at `vmagent` was renamed to `-remoteWrite.streamAggr.dropInputLabels`. `-streamAggr.dropInputLabels` is now used for global streaming aggregation.**
**Update note 3: the `-maxLabelValueLen` command-line flag default value was changed from 16kB to 1kB. It may lead to truncating of labels with enormous values.**
* SECURITY: upgrade Go builder from Go1.22.2 to Go1.22.3. See [the list of issues addressed in Go1.22.3](https://github.com/golang/go/issues?q=milestone%3AGo1.22.3+label%3ACherryPickApproved).
@ -62,6 +63,7 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
* BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation/): prevent from excessive resource usage when stream aggregation config file is empty.
* BUGFIX: properly estimate the needed memory for query execution if it has the format [`aggr_func`](https://docs.victoriametrics.com/metricsql/#aggregate-functions)([`rollup_func[d]`](https://docs.victoriametrics.com/metricsql/#rollup-functions) (for example, `sum(rate(request_duration_seconds_bucket[5m]))`). This should allow performing aggregations over bigger number of time series when VictoriaMetrics runs in environments with small amounts of available memory. The issue has been introduced in [this commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/5138eaeea0791caa34bcfab410e0ca9cd253cd8f) in [v1.83.0](https://docs.victoriametrics.com/changelog_2022/#v1830).
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): correctly apply `-inmemoryDataFlushInterval` when it's set to minimum supported value 1s.
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): reduce the default value for `-maxLabelValueLen` command-line flag from `16KiB` to `1KiB`. This should prevent from issues like [this one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6176) when time series with too long labels are ingested into VictoriaMetrics.
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): properly release memory used for metrics during config reload. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6247).
* BUGFIX: [dashboards](https://grafana.com/orgs/victoriametrics): fix `AnnotationQueryRunner` error in Grafana when executing annotations query against Prometheus backend. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6309) for details.

View file

@ -2915,7 +2915,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
-maxLabelValueLen int
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 1024)
-maxLabelsPerTimeseries int
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
-memory.allowedBytes size

View file

@ -2923,7 +2923,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum size in bytes of a single Prometheus remote_write API request
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
-maxLabelValueLen int
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 16384)
The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented (default 1024)
-maxLabelsPerTimeseries int
The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented (default 30)
-memory.allowedBytes size

View file

@ -19,6 +19,10 @@ according to [these docs](https://docs.victoriametrics.com/VictoriaLogs/QuickSta
## tip
## [v0.9.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.9.0-victorialogs)
Released at 2024-05-22
* FEATURE: allow using `~"some_regexp"` [regexp filter](https://docs.victoriametrics.com/victorialogs/logsql/#regexp-filter) instead of `re("some_regexp")`.
* FEATURE: allow using `="some phrase"` [exact filter](https://docs.victoriametrics.com/victorialogs/logsql/#exact-filter) instead of `exact("some phrase")`.
* FEATURE: allow using `="some prefix"*` [exact prefix filter](https://docs.victoriametrics.com/victorialogs/logsql/#exact-prefix-filter) instead of `exact("some prefix"*)`.
@ -33,6 +37,8 @@ according to [these docs](https://docs.victoriametrics.com/VictoriaLogs/QuickSta
* FEATURE: [web UI](https://docs.victoriametrics.com/VictoriaLogs/querying/#web-ui): change time range limitation from `_time` in the expression to `start` and `end` query args.
* BUGFIX: fix `invalid memory address or nil pointer dereference` panic when using [`extract`](https://docs.victoriametrics.com/victorialogs/logsql/#extract-pipe), [`unpack_json`](https://docs.victoriametrics.com/victorialogs/logsql/#unpack_json-pipe) or [`unpack_logfmt`](https://docs.victoriametrics.com/victorialogs/logsql/#unpack_logfmt-pipe) pipes. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6306).
* BUGFIX: [web UI](https://docs.victoriametrics.com/VictoriaLogs/querying/#web-ui): fix an issue where logs with long `_msg` values might not display. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6281).
* BUGFIX: properly handle time range boundaries with millisecond precision. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6293).
## [v0.8.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.8.0-victorialogs)

View file

@ -1272,11 +1272,11 @@ See also:
### format pipe
`| format "pattern" as result_field` [pipe](#format-pipe) combines [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
`| format "pattern" as result_field` [pipe](#pipe) combines [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
according to the `pattern` and stores it to the `result_field`. All the other fields remain unchanged after the `| format ...` pipe.
For example, the following query stores `request from <ip>:<port>` text into [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field),
by substituting `<ip>` and `<port>` with the corresponding [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) names:
by substituting `<ip>` and `<port>` with the corresponding [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) values:
```logsql
_time:5m | format "request from <ip>:<port>" as _msg
@ -1670,6 +1670,10 @@ form `foo`:
_time:5m | unpack_json from foo result_prefix "foo_"
```
Performance tip: it is better from performance and resource usage PoV ingesting parsed JSON logs into VictoriaLogs
according to the [supported data model](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
instead of ingesting unparsed JSON lines into VictoriaLogs and then parsing them at query time with [`unpack_json` pipe](#unpack_json-pipe).
See also:
- [Conditional `unpack_json`](#conditional-unpack_json)
@ -1729,6 +1733,10 @@ from `foo` field:
_time:5m | unpack_logfmt from foo result_prefix "foo_"
```
Performance tip: it is better from performance and resource usage PoV ingesting parsed [logfmt](https://brandur.org/logfmt) logs into VictoriaLogs
according to the [supported data model](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
instead of ingesting unparsed logfmt lines into VictoriaLogs and then parsing them at query time with [`unpack_logfmt` pipe](#unpack_logfmt-pipe).
See also:
- [Conditional unpack_logfmt](#conditional-unpack_logfmt)
@ -2103,11 +2111,10 @@ LogsQL supports the following transformations on the log entries selected with [
See [these docs](#extract-pipe) for details.
- Unpacking JSON fields from [log fields](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#data-model). See [these docs](#unpack_json-pipe).
- Unpacking [logfmt](https://brandur.org/logfmt) fields from [log fields](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#data-model). See [these docs](#unpack_logfmt-pipe).
- Creating a new field from existing [log fields](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#data-model) according to the provided format. See [these docs](#format-pipe).
LogsQL will support the following transformations in the future:
- Creating a new field from existing [log fields](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#data-model)
according to the provided format.
- Creating a new field according to math calculations over existing [log fields](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#data-model).
- Parsing duration strings into floating-point seconds for further [stats calculations](#stats-pipe).

View file

@ -349,7 +349,7 @@ This limit can be changed via `-maxLabelsPerTimeseries` command-line flag if nec
Every label value can contain an arbitrary string value. The good practice is to use short and meaningful label values to
describe the attribute of the metric, not to tell the story about it. For example, label-value pair
`environment="prod"` is ok, but `log_message="long log message with a lot of details..."` is not ok. By default,
VictoriaMetrics limits label's value size with 16kB. This limit can be changed via `-maxLabelValueLen` command-line flag.
VictoriaMetrics limits label's value size with 1kB. This limit can be changed via `-maxLabelValueLen` command-line flag.
It is very important to keep under control the number of unique label values, since every unique label value
leads to a new [time series](#time-series). Try to avoid using volatile label values such as session ID or query ID in order to

78
go.mod
View file

@ -3,17 +3,17 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.22.3
require (
cloud.google.com/go/storage v1.40.0
cloud.google.com/go/storage v1.41.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
github.com/VictoriaMetrics/easyproto v0.1.4
github.com/VictoriaMetrics/fastcache v1.12.2
github.com/VictoriaMetrics/metrics v1.33.1
github.com/VictoriaMetrics/metricsql v0.75.1
github.com/aws/aws-sdk-go-v2 v1.26.1
github.com/aws/aws-sdk-go-v2/config v1.27.13
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.17
github.com/aws/aws-sdk-go-v2/service/s3 v1.53.2
github.com/aws/aws-sdk-go-v2 v1.27.0
github.com/aws/aws-sdk-go-v2/config v1.27.15
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.20
github.com/aws/aws-sdk-go-v2/service/s3 v1.54.2
github.com/bmatcuk/doublestar/v4 v4.6.1
github.com/cespare/xxhash/v2 v2.3.0
github.com/cheggaaa/pb/v3 v3.1.5
@ -27,18 +27,18 @@ require (
github.com/valyala/fastjson v1.6.4
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.2
github.com/valyala/gozstd v1.20.1
github.com/valyala/gozstd v1.21.1
github.com/valyala/histogram v1.2.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/oauth2 v0.20.0
golang.org/x/sys v0.20.0
google.golang.org/api v0.180.0
google.golang.org/api v0.181.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cloud.google.com/go v0.113.0 // indirect
cloud.google.com/go/auth v0.4.1 // indirect
cloud.google.com/go/auth v0.4.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.8 // indirect
@ -47,32 +47,32 @@ require (
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/aws/aws-sdk-go v1.53.0 // indirect
github.com/aws/aws-sdk-go v1.53.8 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.13 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.15 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.20.6 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.28.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.20.8 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.28.9 // indirect
github.com/aws/smithy-go v1.20.2 // indirect
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/fatih/color v1.17.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@ -81,7 +81,7 @@ require (
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
@ -100,21 +100,21 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.53.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.14.0 // indirect
github.com/prometheus/procfs v0.15.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/collector/featuregate v1.7.0 // indirect
go.opentelemetry.io/collector/pdata v1.7.0 // indirect
go.opentelemetry.io/collector/semconv v0.100.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect
go.opentelemetry.io/otel v1.26.0 // indirect
go.opentelemetry.io/otel/metric v1.26.0 // indirect
go.opentelemetry.io/otel/trace v1.26.0 // indirect
go.opentelemetry.io/collector/featuregate v1.8.0 // indirect
go.opentelemetry.io/collector/pdata v1.8.0 // indirect
go.opentelemetry.io/collector/semconv v0.101.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
go.opentelemetry.io/otel v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.3.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
@ -124,14 +124,14 @@ require (
golang.org/x/sync v0.7.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto v0.0.0-20240509183442-62759503f434 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 // indirect
google.golang.org/grpc v1.63.2 // indirect
google.golang.org/genproto v0.0.0-20240521202816-d264139d666e // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect
google.golang.org/grpc v1.64.0 // indirect
google.golang.org/protobuf v1.34.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.30.0 // indirect
k8s.io/client-go v0.30.0 // indirect
k8s.io/apimachinery v0.30.1 // indirect
k8s.io/client-go v0.30.1 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
)

164
go.sum
View file

@ -15,8 +15,8 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA=
cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8=
cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg=
cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro=
cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg=
cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
@ -40,8 +40,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw=
cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g=
cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0=
cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
@ -89,44 +89,44 @@ github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.53.0 h1:MMo1x1ggPPxDfHMXJnQudTbGXYlD4UigUAud1DJxPVo=
github.com/aws/aws-sdk-go v1.53.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k=
github.com/aws/aws-sdk-go v1.53.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo=
github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
github.com/aws/aws-sdk-go-v2/config v1.27.13 h1:WbKW8hOzrWoOA/+35S5okqO/2Ap8hkkFUzoW8Hzq24A=
github.com/aws/aws-sdk-go-v2/config v1.27.13/go.mod h1:XLiyiTMnguytjRER7u5RIkhIqS8Nyz41SwAWb4xEjxs=
github.com/aws/aws-sdk-go-v2/credentials v1.17.13 h1:XDCJDzk/u5cN7Aple7D/MiAhx1Rjo/0nueJ0La8mRuE=
github.com/aws/aws-sdk-go-v2/credentials v1.17.13/go.mod h1:FMNcjQrmuBYvOTZDtOLCIu0esmxjF7RuA/89iSXWzQI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.17 h1:9b1Os1s11mF5qTIKLgSsyPG810di2+ySSLIIt9bwe9I=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.17/go.mod h1:9Wp7tDOMhv0+sb/FTRAkbHNQ7abYDnoJRzm5AAtCnTc=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
github.com/aws/aws-sdk-go-v2/config v1.27.15 h1:uNnGLZ+DutuNEkuPh6fwqK7LpEiPmzb7MIMA1mNWEUc=
github.com/aws/aws-sdk-go-v2/config v1.27.15/go.mod h1:7j7Kxx9/7kTmL7z4LlhwQe63MYEE5vkVV6nWg4ZAI8M=
github.com/aws/aws-sdk-go-v2/credentials v1.17.15 h1:YDexlvDRCA8ems2T5IP1xkMtOZ1uLJOCJdTr0igs5zo=
github.com/aws/aws-sdk-go-v2/credentials v1.17.15/go.mod h1:vxHggqW6hFNaeNC0WyXS3VdyjcV0a4KMUY4dKJ96buU=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 h1:dQLK4TjtnlRGb0czOht2CevZ5l6RSyRWAnKeGd7VAFE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3/go.mod h1:TL79f2P6+8Q7dTsILpiVST+AL9lkF6PPGI167Ny0Cjw=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.20 h1:NCM9wYaJCmlIWZSO/JwUEveKf0NCvsSgo9V9BwOAolo=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.20/go.mod h1:dmxIx3qriuepxqZgFeFMitFuftWPB94+MZv/6Btpth4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 h1:lf/8VTF2cM+N4SLzaYJERKEWAXq8MOMpZfU6wEPWsPk=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7/go.mod h1:4SjkU7QiqK2M9oozyMzfZ/23LmUY+h3oFqhdeP5OMiI=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 h1:4OYVp0705xu8yjdyoWix0r9wPIRXnIzzOoUpQVHIJ/g=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7/go.mod h1:vd7ESTEvI76T2Na050gODNmNU7+OyKrIKroYTu4ABiI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 h1:/FUtT3xsoHO3cfh+I/kCbcMCN98QZRsiFet/V8QkWSs=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7/go.mod h1:MaCAgWpGooQoCWZnMur97rGn5dp350w2+CeiV5406wE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ=
github.com/aws/aws-sdk-go-v2/service/s3 v1.53.2 h1:rq2hglTQM3yHZvOPVMtNvLS5x6hijx7JvRDgKiTNDGQ=
github.com/aws/aws-sdk-go-v2/service/s3 v1.53.2/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o=
github.com/aws/aws-sdk-go-v2/service/sso v1.20.6 h1:o5cTaeunSpfXiLTIBx5xo2enQmiChtu1IBbzXnfU9Hs=
github.com/aws/aws-sdk-go-v2/service/sso v1.20.6/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0 h1:Qe0r0lVURDDeBQJ4yP+BOrJkvkiCo/3FH/t+wY11dmw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak=
github.com/aws/aws-sdk-go-v2/service/sts v1.28.7 h1:et3Ta53gotFR4ERLXXHIHl/Uuk1qYpP5uU7cvNql8ns=
github.com/aws/aws-sdk-go-v2/service/sts v1.28.7/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 h1:UXqEWQI0n+q0QixzU0yUUQBZXRd5037qdInTIHFTl98=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9/go.mod h1:xP6Gq6fzGZT8w/ZN+XvGMZ2RU1LeEs7b2yUP5DN8NY4=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 h1:Wx0rlZoEJR7JwlSZcHnEa7CNjrSIyVxMFWGAaXy4fJY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9/go.mod h1:aVMHdE0aHO3v+f/iw01fmXV/5DbfQ3Bi9nN7nd9bE9Y=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 h1:uO5XR6QGBcmPyo2gxofYJLFkcVQ4izOoGDNenlZhTEk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7/go.mod h1:feeeAYfAcwTReM6vbwjEyDmiGho+YgBhaFULuXDW8kc=
github.com/aws/aws-sdk-go-v2/service/s3 v1.54.2 h1:gYSJhNiOF6J9xaYxu2NFNstoiNELwt0T9w29FxSfN+Y=
github.com/aws/aws-sdk-go-v2/service/s3 v1.54.2/go.mod h1:739CllldowZiPPsDFcJHNF4FXrVxaSGVnZ9Ez9Iz9hc=
github.com/aws/aws-sdk-go-v2/service/sso v1.20.8 h1:Kv1hwNG6jHC/sxMTe5saMjH6t6ZLkgfvVxyEjfWL1ks=
github.com/aws/aws-sdk-go-v2/service/sso v1.20.8/go.mod h1:c1qtZUWtygI6ZdvKppzCSXsDOq5I4luJPZ0Ud3juFCA=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.2 h1:nWBZ1xHCF+A7vv9sDzJOq4NWIdzFYm0kH7Pr4OjHYsQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.2/go.mod h1:9lmoVDVLz/yUZwLaQ676TK02fhCu4+PgRSmMaKR1ozk=
github.com/aws/aws-sdk-go-v2/service/sts v1.28.9 h1:Qp6Boy0cGDloOE3zI6XhNLNZgjNS8YmiFQFHe71SaW0=
github.com/aws/aws-sdk-go-v2/service/sts v1.28.9/go.mod h1:0Aqn1MnEuitqfsCNyKsdKLhDUOr4txD/g19EfiUqgws=
github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
@ -149,8 +149,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -183,8 +183,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@ -203,8 +203,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
@ -304,8 +304,8 @@ github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuEN
github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
@ -452,8 +452,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s=
github.com/prometheus/procfs v0.14.0/go.mod h1:XL+Iwz8k8ZabyZfMFHPiilCniixqQarAy5Mu67pHlNQ=
github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek=
github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
github.com/prometheus/prometheus v0.52.0 h1:f7kHJgr7+zShpWdTCeKqbCWR7nKTScgLYQwRux9h1V0=
github.com/prometheus/prometheus v0.52.0/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@ -497,8 +497,8 @@ github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/gozstd v1.20.1 h1:xPnnnvjmaDDitMFfDxmQ4vpx0+3CdTg2o3lALvXTU/g=
github.com/valyala/gozstd v1.20.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
github.com/valyala/gozstd v1.21.1 h1:TQFZVTk5zo7iJcX3o4XYBJujPdO31LFb4fVImwK873A=
github.com/valyala/gozstd v1.21.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM=
@ -506,8 +506,8 @@ github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/V
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -519,24 +519,24 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/featuregate v1.7.0 h1:8tNgX2VaiR9jrpZevRSvStuJrvvL6WwScT264HNLk7U=
go.opentelemetry.io/collector/featuregate v1.7.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
go.opentelemetry.io/collector/pdata v1.7.0 h1:/WNsBbE6KM3TTPUb9v/5B7IDqnDkgf8GyFhVJJqu7II=
go.opentelemetry.io/collector/pdata v1.7.0/go.mod h1:ehCBBA5GoFrMZkwyZAKGY/lAVSgZf6rzUt3p9mddmPU=
go.opentelemetry.io/collector/semconv v0.100.0 h1:QArUvWcbmsMjM4PV0zngUHRizZeUXibsPBWjDuNJXAs=
go.opentelemetry.io/collector/semconv v0.100.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc=
go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs=
go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4=
go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30=
go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4=
go.opentelemetry.io/collector/featuregate v1.8.0 h1:p/bAuk5LiSfdYS88yFl/Jzao9bHEYqCh7YvZJ+L+IZg=
go.opentelemetry.io/collector/featuregate v1.8.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo=
go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw=
go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA=
go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0=
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -768,8 +768,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.180.0 h1:M2D87Yo0rGBPWpo1orwfCLehUUL6E7/TYe5gvMQWDh4=
google.golang.org/api v0.180.0/go.mod h1:51AiyoEg1MJPSZ9zvklA8VnRILPXxn1iVen9v25XHAE=
google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4=
google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -805,12 +805,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240509183442-62759503f434 h1:+PQKEGakpJad0y8bF9UJlgg4dO2U5H+cydccJNjzkww=
google.golang.org/genproto v0.0.0-20240509183442-62759503f434/go.mod h1:i4np6Wrjp8EujFAUn0CM0SH+iZhY1EbrfzEIJbFkHFM=
google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 h1:OpXbo8JnN8+jZGPrL4SSfaDjSCjupr8lXyBAbexEm/U=
google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM=
google.golang.org/genproto v0.0.0-20240521202816-d264139d666e h1:axIBUGXSVho2zB+3tJj8l9Qvm/El5vVYPYqhGA5PmJM=
google.golang.org/genproto v0.0.0-20240521202816-d264139d666e/go.mod h1:gOvX/2dWTqh+u3+IHjFeCxinlz5AZ5qhOufbQPub/dE=
google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e h1:SkdGTrROJl2jRGT/Fxv5QUf9jtdKCQh4KQJXbXVLAi0=
google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e/go.mod h1:LweJcLbyVij6rCex8YunD8DYR5VDonap/jYl3ZRxcIU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -824,8 +824,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -868,12 +868,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA=
k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE=
k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA=
k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ=
k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY=
k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY=
k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM=
k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U=
k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q=
k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=

View file

@ -1307,7 +1307,8 @@ func parseFilterTime(lex *lexer) (*filterTime, error) {
if err != nil {
return nil, fmt.Errorf("cannot parse _time filter: %w", err)
}
startTime := int64(t * 1e9)
// Round to milliseconds
startTime := int64(math.Round(t*1e3)) * 1e6
endTime := getMatchingEndTime(startTime, s)
ft := &filterTime{
minTimestamp: startTime,
@ -1416,6 +1417,8 @@ func getMatchingEndTime(startTime int64, stringRepr string) int64 {
tEnd = tStart.Add(time.Minute)
case len(timeStr) == len("YYYY-MM-DDThh:mm:ss") && timeStr[len("YYYY")] == '-':
tEnd = tStart.Add(time.Second)
case len(timeStr) == len("YYYY-MM-DDThh:mm:ss.SSS") && timeStr[len("YYYY")] == '-':
tEnd = tStart.Add(time.Millisecond)
default:
tEnd = tStart.Add(time.Nanosecond)
}
@ -1459,7 +1462,8 @@ func parseTime(lex *lexer) (int64, string, error) {
if err != nil {
return 0, "", err
}
return int64(t * 1e9), s, nil
// round to milliseconds
return int64(math.Round(t*1e3)) * 1e6, s, nil
}
func quoteTokenIfNeeded(s string) string {

View file

@ -191,6 +191,21 @@ func TestParseTimeRange(t *testing.T) {
f("2023-02-28T23:59:59", minTimestamp, maxTimestamp)
f("2023-02-28T23:59:59Z", minTimestamp, maxTimestamp)
// _time:[YYYY-MM-DDTHH:MM:SS.sss, YYYY-MM-DDTHH:MM:SS.sss)
minTimestamp = time.Date(2024, time.May, 12, 0, 0, 0, 333000000, time.UTC).UnixNano()
maxTimestamp = time.Date(2024, time.May, 12, 0, 0, 0, 555000000, time.UTC).UnixNano() - 1
f("[2024-05-12T00:00:00.333+00:00,2024-05-12T00:00:00.555+00:00)", minTimestamp, maxTimestamp)
// _time:[YYYY-MM-DDTHH:MM:SS.sss, YYYY-MM-DDTHH:MM:SS.sss]
minTimestamp = time.Date(2024, time.May, 12, 0, 0, 0, 333000000, time.UTC).UnixNano()
maxTimestamp = time.Date(2024, time.May, 12, 0, 0, 0, 556000000, time.UTC).UnixNano() - 1
f("[2024-05-12T00:00:00.333+00:00,2024-05-12T00:00:00.555+00:00]", minTimestamp, maxTimestamp)
// _time:YYYY-MM-DDTHH:MM:SS.sss
minTimestamp = time.Date(2024, time.May, 14, 13, 54, 59, 134000000, time.UTC).UnixNano()
maxTimestamp = time.Date(2024, time.May, 14, 13, 54, 59, 135000000, time.UTC).UnixNano() - 1
f("2024-05-14T13:54:59.134Z", minTimestamp, maxTimestamp)
// _time:YYYY-MM-DDTHH:MM:SS-hh:mm
minTimestamp = time.Date(2023, time.February, 28, 23, 59, 59, 0, time.UTC).UnixNano()
maxTimestamp = time.Date(2023, time.March, 1, 0, 0, 0, 0, time.UTC).UnixNano() - 1

View file

@ -475,7 +475,7 @@ const maxLabelNameLen = 256
// The maximum length of label value.
//
// Longer values are truncated.
var maxLabelValueLen = 16 * 1024
var maxLabelValueLen = 1024
// SetMaxLabelValueLen sets the limit on the label value length.
//
@ -590,8 +590,10 @@ func trackTruncatedLabels(labels []prompb.Label, truncated *prompb.Label) {
}
}
var droppedLabelsLogTicker = time.NewTicker(5 * time.Second)
var truncatedLabelsLogTicker = time.NewTicker(5 * time.Second)
var (
droppedLabelsLogTicker = time.NewTicker(5 * time.Second)
truncatedLabelsLogTicker = time.NewTicker(5 * time.Second)
)
func labelsToString(labels []prompb.Label) string {
labelsCopy := append([]prompb.Label{}, labels...)
@ -767,6 +769,7 @@ func (ts *canonicalTagsSort) Less(i, j int) bool {
x := *ts
return string(x[i].key) < string(x[j].key)
}
func (ts *canonicalTagsSort) Swap(i, j int) {
x := *ts
x[i], x[j] = x[j], x[i]

View file

@ -1,5 +1,15 @@
# Changelog
## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16)
### Bug Fixes
* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15))
* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159)
* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8))
* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea))
## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09)

View file

@ -39,7 +39,7 @@ const (
// 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes,
// so we give it 15 seconds to refresh it's cache before attempting to refresh a token.
defaultExpiryDelta = 215 * time.Second
defaultExpiryDelta = 225 * time.Second
universeDomainDefault = "googleapis.com"
)

View file

@ -64,9 +64,9 @@ func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) {
v.Set("scopes", strings.Join(cs.scopes, ","))
tokenURI.RawQuery = v.Encode()
}
tokenJSON, err := metadata.Get(tokenURI.String())
tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String())
if err != nil {
return nil, err
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
var res metadataTokenResp
if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil {

View file

@ -152,7 +152,14 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er
}
base := client.Transport
if base == nil {
base = http.DefaultTransport.(*http.Transport).Clone()
if dt, ok := http.DefaultTransport.(*http.Transport); ok {
base = dt.Clone()
} else {
// Directly reuse the DefaultTransport if the application has
// replaced it with an implementation of RoundTripper other than
// http.Transport.
base = http.DefaultTransport
}
}
client.Transport = &authTransport{
creds: creds,

View file

@ -217,7 +217,7 @@ func getTransportConfig(opts *Options) (*transportConfig, error) {
// encountered while initializing the default source will be reported as client
// error (ex. corrupt metadata file).
func getClientCertificateSource(opts *Options) (cert.Provider, error) {
if !isClientCertificateEnabled() {
if !isClientCertificateEnabled(opts) {
return nil, nil
} else if opts.ClientCertProvider != nil {
return opts.ClientCertProvider, nil
@ -226,14 +226,14 @@ func getClientCertificateSource(opts *Options) (cert.Provider, error) {
}
// isClientCertificateEnabled returns true by default, unless explicitly set to false via env var.
func isClientCertificateEnabled() bool {
// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var
func isClientCertificateEnabled(opts *Options) bool {
if value, ok := os.LookupEnv(googleAPIUseCertSource); ok {
// error as false is OK
b, _ := strconv.ParseBool(value)
return b
}
return true
return opts.isUniverseDomainGDU()
}
type transportConfig struct {

View file

@ -1,6 +1,30 @@
# Changes
## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.40.0...storage/v1.41.0) (2024-05-13)
### Features
* **storage/control:** Make Managed Folders operations public ([264a6dc](https://github.com/googleapis/google-cloud-go/commit/264a6dcddbffaec987dce1dc00f6550c263d2df7))
* **storage:** Support for soft delete policies and restore ([#9520](https://github.com/googleapis/google-cloud-go/issues/9520)) ([985deb2](https://github.com/googleapis/google-cloud-go/commit/985deb2bdd1c79944cdd960bd3fbfa38cbfa1c91))
### Bug Fixes
* **storage/control:** An existing resource pattern value `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder=**}` to resource definition `storage.googleapis.com/ManagedFolder` is removed ([3e25053](https://github.com/googleapis/google-cloud-go/commit/3e250530567ee81ed4f51a3856c5940dbec35289))
* **storage:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90))
* **storage:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
* **storage:** Disable gax retries for gRPC ([#9747](https://github.com/googleapis/google-cloud-go/issues/9747)) ([bbfc0ac](https://github.com/googleapis/google-cloud-go/commit/bbfc0acc272f21bf1f558ea23648183d5a11cda5))
* **storage:** More strongly match regex ([#9706](https://github.com/googleapis/google-cloud-go/issues/9706)) ([3cfc8eb](https://github.com/googleapis/google-cloud-go/commit/3cfc8eb418e064d734bf3d8708162062dbbe988f)), refs [#9705](https://github.com/googleapis/google-cloud-go/issues/9705)
* **storage:** Retry net.OpError on connection reset ([#10154](https://github.com/googleapis/google-cloud-go/issues/10154)) ([54fab10](https://github.com/googleapis/google-cloud-go/commit/54fab107f98b4f79c9df2959a05b981be0a613c1)), refs [#9478](https://github.com/googleapis/google-cloud-go/issues/9478)
* **storage:** Wrap error when MaxAttempts is hit ([#9767](https://github.com/googleapis/google-cloud-go/issues/9767)) ([9cb262b](https://github.com/googleapis/google-cloud-go/commit/9cb262bb65a162665bfb8bed0022615131bae1f2)), refs [#9720](https://github.com/googleapis/google-cloud-go/issues/9720)
### Documentation
* **storage/control:** Update storage control documentation and add PHP for publishing ([1d757c6](https://github.com/googleapis/google-cloud-go/commit/1d757c66478963d6cbbef13fee939632c742759c))
## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29)

View file

@ -479,6 +479,13 @@ type BucketAttrs struct {
// cannot be modified once the bucket is created.
// ObjectRetention cannot be configured or reported through the gRPC API.
ObjectRetentionMode string
// SoftDeletePolicy contains the bucket's soft delete policy, which defines
// the period of time that soft-deleted objects will be retained, and cannot
// be permanently deleted. By default, new buckets will be created with a
// 7 day retention duration. In order to fully disable soft delete, you need
// to set a policy with a RetentionDuration of 0.
SoftDeletePolicy *SoftDeletePolicy
}
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
@ -766,6 +773,19 @@ type Autoclass struct {
TerminalStorageClassUpdateTime time.Time
}
// SoftDeletePolicy contains the bucket's soft delete policy, which defines the
// period of time that soft-deleted objects will be retained, and cannot be
// permanently deleted.
type SoftDeletePolicy struct {
// EffectiveTime indicates the time from which the policy, or one with a
// greater retention, was effective. This field is read-only.
EffectiveTime time.Time
// RetentionDuration is the amount of time that soft-deleted objects in the
// bucket will be retained and cannot be permanently deleted.
RetentionDuration time.Duration
}
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil, nil
@ -803,6 +823,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
RPO: toRPO(b),
CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig),
Autoclass: toAutoclassFromRaw(b.Autoclass),
SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy),
}, nil
}
@ -836,6 +857,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()),
ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
Autoclass: toAutoclassFromProto(b.GetAutoclass()),
SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy),
}
}
@ -891,6 +913,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
Rpo: b.RPO.String(),
CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
Autoclass: b.Autoclass.toRawAutoclass(),
SoftDeletePolicy: b.SoftDeletePolicy.toRawSoftDeletePolicy(),
}
}
@ -951,6 +974,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
Rpo: b.RPO.String(),
CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
Autoclass: b.Autoclass.toProtoAutoclass(),
SoftDeletePolicy: b.SoftDeletePolicy.toProtoSoftDeletePolicy(),
}
}
@ -1032,6 +1056,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
IamConfig: bktIAM,
Rpo: ua.RPO.String(),
Autoclass: ua.Autoclass.toProtoAutoclass(),
SoftDeletePolicy: ua.SoftDeletePolicy.toProtoSoftDeletePolicy(),
Labels: ua.setLabels,
}
}
@ -1152,6 +1177,9 @@ type BucketAttrsToUpdate struct {
// See https://cloud.google.com/storage/docs/using-autoclass for more information.
Autoclass *Autoclass
// If set, updates the soft delete policy of the bucket.
SoftDeletePolicy *SoftDeletePolicy
// acl is the list of access control rules on the bucket.
// It is unexported and only used internally by the gRPC client.
// Library users should use ACLHandle methods directly.
@ -1273,6 +1301,14 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
}
rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass")
}
if ua.SoftDeletePolicy != nil {
if ua.SoftDeletePolicy.RetentionDuration == 0 {
rb.NullFields = append(rb.NullFields, "SoftDeletePolicy")
rb.SoftDeletePolicy = nil
} else {
rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy()
}
}
if ua.PredefinedACL != "" {
// Clear ACL or the call will fail.
rb.Acl = nil
@ -2053,6 +2089,53 @@ func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass {
}
}
func (p *SoftDeletePolicy) toRawSoftDeletePolicy() *raw.BucketSoftDeletePolicy {
if p == nil {
return nil
}
// Excluding read only field EffectiveTime.
return &raw.BucketSoftDeletePolicy{
RetentionDurationSeconds: int64(p.RetentionDuration.Seconds()),
}
}
func (p *SoftDeletePolicy) toProtoSoftDeletePolicy() *storagepb.Bucket_SoftDeletePolicy {
if p == nil {
return nil
}
// Excluding read only field EffectiveTime.
return &storagepb.Bucket_SoftDeletePolicy{
RetentionDuration: durationpb.New(p.RetentionDuration),
}
}
func toSoftDeletePolicyFromRaw(p *raw.BucketSoftDeletePolicy) *SoftDeletePolicy {
if p == nil {
return nil
}
policy := &SoftDeletePolicy{
RetentionDuration: time.Duration(p.RetentionDurationSeconds) * time.Second,
}
// Return EffectiveTime only if parsed to a valid value.
if t, err := time.Parse(time.RFC3339, p.EffectiveTime); err == nil {
policy.EffectiveTime = t
}
return policy
}
func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDeletePolicy {
if p == nil {
return nil
}
return &SoftDeletePolicy{
EffectiveTime: p.GetEffectiveTime().AsTime(),
RetentionDuration: p.GetRetentionDuration().AsDuration(),
}
}
// Objects returns an iterator over the objects in the bucket that match the
// Query q. If q is nil, no filtering is done. Objects will be iterated over
// lexicographically by name.

View file

@ -59,8 +59,9 @@ type storageClient interface {
// Object metadata methods.
DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error
GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error)
UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error)
RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error)
// Default Object ACL methods.
@ -182,16 +183,6 @@ type storageOption interface {
Apply(s *settings)
}
func withGAXOptions(opts ...gax.CallOption) storageOption {
return &gaxOption{opts}
}
type gaxOption struct {
opts []gax.CallOption
}
func (o *gaxOption) Apply(s *settings) { s.gax = o.opts }
func withRetryConfig(rc *retryConfig) storageOption {
return &retryOption{rc}
}
@ -294,6 +285,14 @@ type newRangeReaderParams struct {
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
}
type getObjectParams struct {
bucket, object string
gen int64
encryptionKey []byte
conds *Conditions
softDeleted bool
}
type updateObjectParams struct {
bucket, object string
uattrs *ObjectAttrsToUpdate
@ -303,6 +302,14 @@ type updateObjectParams struct {
overrideRetention *bool
}
type restoreObjectParams struct {
bucket, object string
gen int64
encryptionKey []byte
conds *Conditions
copySourceACL bool
}
type composeObjectRequest struct {
dstBucket string
dstObject destinationObject

View file

@ -350,7 +350,7 @@ To create a client which will use gRPC, use the alternate constructor:
// Use client as usual.
If the application is running within GCP, users may get better performance by
enabling Google Direct Access (enabling requests to skip some proxy steps). To enable,
enabling Direct Google Access (enabling requests to skip some proxy steps). To enable,
set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
the following side-effect imports to your application:
@ -359,6 +359,13 @@ the following side-effect imports to your application:
_ "google.golang.org/grpc/xds/googledirectpath"
)
# Storage Control API
Certain control plane and long-running operations for Cloud Storage (including Folder
and Managed Folder operations) are supported via the autogenerated Storage Control
client, which is available as a subpackage in this module. See package docs at
[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs.
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
@ -367,5 +374,6 @@ the following side-effect imports to your application:
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata
[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2
*/
package storage // import "cloud.google.com/go/storage"

View file

@ -28,7 +28,6 @@ import (
"cloud.google.com/go/internal/trace"
gapic "cloud.google.com/go/storage/internal/apiv2"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"github.com/golang/protobuf/proto"
"github.com/googleapis/gax-go/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
@ -40,6 +39,7 @@ import (
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
)
@ -116,6 +116,8 @@ type grpcStorageClient struct {
func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {
s := initSettings(opts...)
s.clientOption = append(defaultGRPCOptions(), s.clientOption...)
// Disable all gax-level retries in favor of retry logic in the veneer client.
s.gax = append(s.gax, gax.WithRetry(nil))
config := newStorageConfig(s.clientOption...)
if config.readAPIWasSet {
@ -365,6 +367,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
if uattrs.Autoclass != nil {
fieldMask.Paths = append(fieldMask.Paths, "autoclass")
}
if uattrs.SoftDeletePolicy != nil {
fieldMask.Paths = append(fieldMask.Paths, "soft_delete_policy")
}
for label := range uattrs.setLabels {
fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label))
@ -377,6 +382,13 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
req.UpdateMask = fieldMask
if len(fieldMask.Paths) < 1 {
// Nothing to update. Send a get request for current attrs instead. This
// maintains consistency with JSON bucket updates.
opts = append(opts, idempotent(true))
return c.GetBucket(ctx, bucket, conds, opts...)
}
var battrs *BucketAttrs
err := run(ctx, func(ctx context.Context) error {
res, err := c.raw.UpdateBucket(ctx, req, s.gax...)
@ -419,6 +431,7 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter,
MatchGlob: it.query.MatchGlob,
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
SoftDeleted: it.query.SoftDeleted,
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
@ -488,22 +501,25 @@ func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object str
return err
}
func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
func (c *grpcStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
s := callSettings(c.settings, opts...)
req := &storagepb.GetObjectRequest{
Bucket: bucketResourceName(globalProjectAlias, bucket),
Object: object,
Bucket: bucketResourceName(globalProjectAlias, params.bucket),
Object: params.object,
// ProjectionFull by default.
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}},
}
if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil {
if err := applyCondsProto("grpcStorageClient.GetObject", params.gen, params.conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
if encryptionKey != nil {
req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey)
if params.encryptionKey != nil {
req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey)
}
if params.softDeleted {
req.SoftDeleted = &params.softDeleted
}
var attrs *ObjectAttrs
@ -593,6 +609,17 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje
req.UpdateMask = fieldMask
if len(fieldMask.Paths) < 1 {
// Nothing to update. To maintain consistency with JSON, we must still
// update the object because metageneration and other fields are
// updated even on an empty update.
// gRPC will fail if the fieldmask is empty, so instead we add an
// output-only field to the update mask. Output-only fields are (and must
// be - see AIP 161) ignored, but allow us to send an empty update because
// any mask that is valid for read (as this one is) must be valid for write.
fieldMask.Paths = append(fieldMask.Paths, "create_time")
}
var attrs *ObjectAttrs
err := run(ctx, func(ctx context.Context) error {
res, err := c.raw.UpdateObject(ctx, req, s.gax...)
@ -606,6 +633,32 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje
return attrs, err
}
func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
s := callSettings(c.settings, opts...)
req := &storagepb.RestoreObjectRequest{
Bucket: bucketResourceName(globalProjectAlias, params.bucket),
Object: params.object,
CopySourceAcl: &params.copySourceACL,
}
if err := applyCondsProto("grpcStorageClient.RestoreObject", params.gen, params.conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
var attrs *ObjectAttrs
err := run(ctx, func(ctx context.Context) error {
res, err := c.raw.RestoreObject(ctx, req, s.gax...)
attrs = newObjectFromProto(res)
return err
}, s.retry, s.idempotent)
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return nil, ErrObjectNotExist
}
return attrs, err
}
// Default Object ACL methods.
func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
@ -735,7 +788,7 @@ func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string,
func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve ObjectAttrs.
attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...)
if err != nil {
return err
}
@ -768,7 +821,7 @@ func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object
// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object.
// Selecting a specific generation of this object is not currently supported by the client.
func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) {
o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
o, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...)
if err != nil {
return nil, err
}
@ -778,7 +831,7 @@ func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object s
func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve ObjectAttrs.
attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...)
if err != nil {
return err
}

View file

@ -107,12 +107,12 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
// Append the emulator host as default endpoint for the user
o = append([]option.ClientOption{option.WithoutAuthentication()}, o...)
o = append(o, internaloption.WithDefaultEndpoint(endpoint))
o = append(o, internaloption.WithDefaultEndpointTemplate(endpoint))
o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint))
}
s.clientOption = o
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint.
hc, ep, err := htransport.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, fmt.Errorf("dialing: %w", err)
@ -337,6 +337,9 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
}
fetch := func(pageSize int, pageToken string) (string, error) {
req := c.raw.Objects.List(bucket)
if it.query.SoftDeleted {
req.SoftDeleted(it.query.SoftDeleted)
}
setClientHeader(req.Header())
projection := it.query.Projection
if projection == ProjectionDefault {
@ -409,18 +412,22 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str
return err
}
func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
func (c *httpStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
s := callSettings(c.settings, opts...)
req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx)
if err := applyConds("Attrs", gen, conds, req); err != nil {
req := c.raw.Objects.Get(params.bucket, params.object).Projection("full").Context(ctx)
if err := applyConds("Attrs", params.gen, params.conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
req.UserProject(s.userProject)
}
if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil {
if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil {
return nil, err
}
if params.softDeleted {
req.SoftDeleted(params.softDeleted)
}
var obj *raw.Object
var err error
err = run(ctx, func(ctx context.Context) error {
@ -547,6 +554,33 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObje
return newObject(obj), nil
}
func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
s := callSettings(c.settings, opts...)
req := c.raw.Objects.Restore(params.bucket, params.object, params.gen).Context(ctx)
// Do not set the generation here since it's not an optional condition; it gets set above.
if err := applyConds("RestoreObject", defaultGen, params.conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
req.UserProject(s.userProject)
}
if params.copySourceACL {
req.CopySourceAcl(params.copySourceACL)
}
if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
var err error
err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent)
var e *googleapi.Error
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
return newObject(obj), err
}
// Default Object ACL methods.
func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {

View file

@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc v4.25.2
// protoc-gen-go v1.33.0
// protoc v4.25.3
// source: google/storage/v2/storage.proto
package storagepb

View file

@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.40.0"
const Version = "1.41.0"

View file

@ -70,8 +70,8 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry
return internal.Retry(ctx, bo, func() (stop bool, err error) {
ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts)
err = call(ctxWithHeaders)
if retry.maxAttempts != nil && attempts >= *retry.maxAttempts {
return true, err
if err != nil && retry.maxAttempts != nil && attempts >= *retry.maxAttempts {
return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err)
}
attempts++
return !errorFunc(err), err
@ -105,18 +105,16 @@ func ShouldRetry(err error) bool {
if errors.Is(err, io.ErrUnexpectedEOF) {
return true
}
if errors.Is(err, net.ErrClosed) {
return true
}
switch e := err.(type) {
case *net.OpError:
if strings.Contains(e.Error(), "use of closed network connection") {
// TODO: check against net.ErrClosed (go 1.16+) instead of string
return true
}
case *googleapi.Error:
// Retry on 408, 429, and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case *url.Error:
case *net.OpError, *url.Error:
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
// Unfortunately the error type is unexported, so we resort to string
// matching.

View file

@ -116,7 +116,7 @@ func toProtoNotification(n *Notification) *storagepb.NotificationConfig {
}
}
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`)
// parseNotificationTopic extracts the project and topic IDs from from the full
// resource name returned by the service. If the name is malformed, it returns

View file

@ -180,12 +180,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
opts = append([]option.ClientOption{
option.WithoutAuthentication(),
internaloption.SkipDialSettingsValidation(),
internaloption.WithDefaultEndpoint(endpoint),
internaloption.WithDefaultEndpointTemplate(endpoint),
internaloption.WithDefaultMTLSEndpoint(endpoint),
}, opts...)
}
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint.
hc, ep, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("dialing: %w", err)
@ -232,7 +232,6 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
// You may configure the client by passing in options from the [google.golang.org/api/option]
// package.
func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
opts = append(defaultGRPCOptions(), opts...)
tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...))
if err != nil {
return nil, err
@ -898,6 +897,7 @@ type ObjectHandle struct {
readCompressed bool // Accept-Encoding: gzip
retry *retryConfig
overrideRetention *bool
softDeleted bool
}
// ACL provides access to the object's access control list.
@ -952,7 +952,7 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
return nil, err
}
opts := makeStorageOpts(true, o.retry, o.userProject)
return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...)
return o.c.tc.GetObject(ctx, &getObjectParams{o.bucket, o.object, o.gen, o.encryptionKey, o.conds, o.softDeleted}, opts...)
}
// Update updates an object with the provided attributes. See
@ -1057,6 +1057,50 @@ func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle {
return &o2
}
// SoftDeleted returns an object handle that can be used to get an object that
// has been soft deleted. To get a soft deleted object, the generation must be
// set on the object using ObjectHandle.Generation.
// Note that an error will be returned if a live object is queried using this.
func (o *ObjectHandle) SoftDeleted() *ObjectHandle {
o2 := *o
o2.softDeleted = true
return &o2
}
// RestoreOptions allows you to set options when restoring an object.
type RestoreOptions struct {
/// CopySourceACL indicates whether the restored object should copy the
// access controls of the source object. Only valid for buckets with
// fine-grained access. If uniform bucket-level access is enabled, setting
// CopySourceACL will cause an error.
CopySourceACL bool
}
// Restore will restore a soft-deleted object to a live object.
// Note that you must specify a generation to use this method.
func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*ObjectAttrs, error) {
if err := o.validate(); err != nil {
return nil, err
}
// Since the generation is required by restore calls, we set the default to
// 0 instead of a negative value, which returns a more descriptive error.
gen := o.gen
if o.gen == defaultGen {
gen = 0
}
// Restore is always idempotent because Generation is a required param.
sOpts := makeStorageOpts(true, o.retry, o.userProject)
return o.c.tc.RestoreObject(ctx, &restoreObjectParams{
bucket: o.bucket,
object: o.object,
gen: gen,
conds: o.conds,
copySourceACL: opts.CopySourceACL,
}, sOpts...)
}
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
@ -1390,6 +1434,21 @@ type ObjectAttrs struct {
// Retention contains the retention configuration for this object.
// ObjectRetention cannot be configured or reported through the gRPC API.
Retention *ObjectRetention
// SoftDeleteTime is the time when the object became soft-deleted.
// Soft-deleted objects are only accessible on an object handle returned by
// ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set,
// ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted.
// This field is read-only.
SoftDeleteTime time.Time
// HardDeleteTime is the time when the object will be permanently deleted.
// Only set when an object becomes soft-deleted with a soft delete policy.
// Soft-deleted objects are only accessible on an object handle returned by
// ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set,
// ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted.
// This field is read-only.
HardDeleteTime time.Time
}
// ObjectRetention contains the retention configuration for this object.
@ -1494,6 +1553,8 @@ func newObject(o *raw.Object) *ObjectAttrs {
CustomTime: convertTime(o.CustomTime),
ComponentCount: o.ComponentCount,
Retention: toObjectRetention(o.Retention),
SoftDeleteTime: convertTime(o.SoftDeleteTime),
HardDeleteTime: convertTime(o.HardDeleteTime),
}
}
@ -1529,6 +1590,8 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs {
Updated: convertProtoTime(o.GetUpdateTime()),
CustomTime: convertProtoTime(o.GetCustomTime()),
ComponentCount: int64(o.ComponentCount),
SoftDeleteTime: convertProtoTime(o.GetSoftDeleteTime()),
HardDeleteTime: convertProtoTime(o.GetHardDeleteTime()),
}
}
@ -1637,6 +1700,11 @@ type Query struct {
// prefixes returned by the query. Only applicable if Delimiter is set to /.
// IncludeFoldersAsPrefixes is not yet implemented in the gRPC API.
IncludeFoldersAsPrefixes bool
// SoftDeleted indicates whether to list soft-deleted objects.
// If true, only objects that have been soft-deleted will be listed.
// By default, soft-deleted objects are not listed.
SoftDeleted bool
}
// attrToFieldMap maps the field names of ObjectAttrs to the underlying field
@ -1672,6 +1740,8 @@ var attrToFieldMap = map[string]string{
"CustomTime": "customTime",
"ComponentCount": "componentCount",
"Retention": "retention",
"HardDeleteTime": "hardDeleteTime",
"SoftDeleteTime": "softDeleteTime",
}
// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field
@ -1704,6 +1774,8 @@ var attrToProtoFieldMap = map[string]string{
"CustomerKeySHA256": "customer_encryption",
"CustomTime": "custom_time",
"ComponentCount": "component_count",
"HardDeleteTime": "hard_delete_time",
"SoftDeleteTime": "soft_delete_time",
// MediaLink was explicitly excluded from the proto as it is an HTTP-ism.
// "MediaLink": "mediaLink",
// TODO: add object retention - b/308194853

View file

@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.26.1"
const goModuleVersion = "1.27.0"

View file

@ -112,6 +112,8 @@ type MetricData struct {
ResolveEndpointStartTime time.Time
ResolveEndpointEndTime time.Time
EndpointResolutionDuration time.Duration
GetIdentityStartTime time.Time
GetIdentityEndTime time.Time
InThroughput float64
OutThroughput float64
RetryCount int
@ -122,6 +124,7 @@ type MetricData struct {
OperationName string
PartitionID string
Region string
UserAgent string
RequestContentLength int64
Stream StreamMetrics
Attempts []AttemptMetrics
@ -144,8 +147,6 @@ type AttemptMetrics struct {
ConnRequestedTime time.Time
ConnObtainedTime time.Time
ConcurrencyAcquireDuration time.Duration
CredentialFetchStartTime time.Time
CredentialFetchEndTime time.Time
SignStartTime time.Time
SignEndTime time.Time
SigningDuration time.Duration

View file

@ -11,7 +11,6 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics"
v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
@ -301,22 +300,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")}
}
mctx := metrics.Context(ctx)
if mctx != nil {
if attempt, err := mctx.Data().LatestAttempt(); err == nil {
attempt.CredentialFetchStartTime = sdk.NowTime()
}
}
credentials, err := s.credentialsProvider.Retrieve(ctx)
if mctx != nil {
if attempt, err := mctx.Data().LatestAttempt(); err == nil {
attempt.CredentialFetchEndTime = sdk.NowTime()
}
}
if err != nil {
return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)}
}
@ -337,20 +321,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
})
}
if mctx != nil {
if attempt, err := mctx.Data().LatestAttempt(); err == nil {
attempt.SignStartTime = sdk.NowTime()
}
}
err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...)
if mctx != nil {
if attempt, err := mctx.Data().LatestAttempt(); err == nil {
attempt.SignEndTime = sdk.NowTime()
}
}
if err != nil {
return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)}
}

View file

@ -1,48 +1,41 @@
// Package v4 implements signing for AWS V4 signer
// Package v4 implements the AWS signature version 4 algorithm (commonly known
// as SigV4).
//
// Provides request signing for request that need to be signed with
// AWS V4 Signatures.
// For more information about SigV4, see [Signing AWS API requests] in the IAM
// user guide.
//
// # Standalone Signer
// While this implementation CAN work in an external context, it is developed
// primarily for SDK use and you may encounter fringe behaviors around header
// canonicalization.
//
// Generally using the signer outside of the SDK should not require any additional
// # Pre-escaping a request URI
//
// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires
// AWS v4 signature validation requires that the canonical string's URI path
// component must be the escaped form of the HTTP request's path.
//
// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent
// to the service as.
// The Go HTTP client will perform escaping automatically on the HTTP request.
// This may cause signature validation errors because the request differs from
// the URI path or query from which the signature was generated.
//
// The signer will first check the URL.Opaque field, and use its value if set.
// The signer does require the URL.Opaque field to be set in the form of:
// Because of this, we recommend that you explicitly escape the request when
// using this signer outside of the SDK to prevent possible signature mismatch.
// This can be done by setting URL.Opaque on the request. The signer will
// prefer that value, falling back to the return of URL.EscapedPath if unset.
//
// When setting URL.Opaque you must do so in the form of:
//
// "//<hostname>/<path>"
//
// // e.g.
// "//example.com/some/path"
//
// The leading "//" and hostname are required or the URL.Opaque escaping will
// not work correctly.
// The leading "//" and hostname are required or the escaping will not work
// correctly.
//
// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
// method and using the returned value.
// The TestStandaloneSign unit test provides a complete example of using the
// signer outside of the SDK and pre-escaping the URI path.
//
// AWS v4 signature validation requires that the canonical string's URI path
// element must be the URI escaped form of the HTTP request's path.
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
//
// The Go HTTP client will perform escaping automatically on the request. Some
// of these escaping may cause signature validation errors because the HTTP
// request differs from the URI path or query that the signature was generated.
// https://golang.org/pkg/net/url/#URL.EscapedPath
//
// Because of this, it is recommended that when using the signer outside of the
// SDK that explicitly escaping the request prior to being signed is preferable,
// and will help prevent signature validation errors. This can be done by setting
// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
// call URL.EscapedPath() if Opaque is not set.
//
// Test `TestStandaloneSign` provides a complete example of using the signer
// outside of the SDK and pre-escaping the URI path.
// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
package v4
import (

View file

@ -1,3 +1,11 @@
# v1.27.15 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.14 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.13 (2024-05-10)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.27.13"
const goModuleVersion = "1.27.15"

View file

@ -1,3 +1,11 @@
# v1.17.15 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.14 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.13 (2024-05-10)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.17.13"
const goModuleVersion = "1.17.15"

View file

@ -1,3 +1,11 @@
# v1.16.3 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.2 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.1 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.16.1"
const goModuleVersion = "1.16.3"

View file

@ -1,3 +1,15 @@
# v1.16.20 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.19 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.18 (2024-05-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.17 (2024-05-10)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package manager
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.16.17"
const goModuleVersion = "1.16.20"

View file

@ -1,3 +1,11 @@
# v1.3.7 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.6 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.5 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.3.5"
const goModuleVersion = "1.3.7"

View file

@ -1,3 +1,11 @@
# v2.6.7 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.6.6 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.6.5 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
const goModuleVersion = "2.6.5"
const goModuleVersion = "2.6.7"

View file

@ -1,3 +1,11 @@
# v1.3.7 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.6 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.5 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package v4a
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.3.5"
const goModuleVersion = "1.3.7"

View file

@ -1,3 +1,11 @@
# v1.3.9 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.8 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.7 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package checksum
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.3.7"
const goModuleVersion = "1.3.9"

View file

@ -1,3 +1,11 @@
# v1.11.9 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.8 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.7 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.11.7"
const goModuleVersion = "1.11.9"

View file

@ -1,3 +1,11 @@
# v1.17.7 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.6 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.5 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package s3shared
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.17.5"
const goModuleVersion = "1.17.7"

View file

@ -1,3 +1,15 @@
# v1.54.2 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.54.1 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.54.0 (2024-05-14)
* **Feature**: Updated a few x-id in the http uri traits
# v1.53.2 (2024-05-08)
* **Bug Fix**: GoDoc improvement

View file

@ -3,4 +3,4 @@
package s3
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.53.2"
const goModuleVersion = "1.54.2"

View file

@ -118,7 +118,7 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CompleteMultipartUpload")
opPath, opQuery := httpbinding.SplitURI("/{Key+}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@ -640,7 +640,7 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads&x-id=CreateMultipartUpload")
opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@ -1803,7 +1803,7 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/?delete&x-id=DeleteObjects")
opPath, opQuery := httpbinding.SplitURI("/?delete")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@ -7758,7 +7758,7 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore&x-id=RestoreObject")
opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@ -7866,7 +7866,7 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2&x-id=SelectObjectContent")
opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@ -8341,7 +8341,7 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse")
opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"

View file

@ -1,3 +1,11 @@
# v1.20.8 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.20.7 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.20.6 (2024-05-08)
* **Bug Fix**: GoDoc improvement

View file

@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.20.6"
const goModuleVersion = "1.20.8"

View file

@ -1,3 +1,11 @@
# v1.24.2 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.24.1 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.24.0 (2024-05-10)
* **Feature**: Updated request parameters for PKCE support.

View file

@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.24.0"
const goModuleVersion = "1.24.2"

View file

@ -1,3 +1,11 @@
# v1.28.9 (2024-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.28.8 (2024-05-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.28.7 (2024-05-08)
* **Bug Fix**: GoDoc improvement

View file

@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.28.7"
const goModuleVersion = "1.28.9"

View file

@ -822,6 +822,12 @@ var awsPartition = partition{
},
"airflow": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@ -846,6 +852,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -855,6 +864,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@ -864,6 +876,9 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -1060,6 +1075,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -1072,6 +1090,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
@ -27235,6 +27256,55 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "ca-west-1",
}: endpoint{
Hostname: "s3-control.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ca-west-1",
},
},
endpointKey{
Region: "ca-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ca-west-1",
},
},
endpointKey{
Region: "ca-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ca-west-1",
},
},
endpointKey{
Region: "ca-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ca-west-1",
},
},
endpointKey{
Region: "ca-west-1-fips",
}: endpoint{
Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "eu-central-1",
}: endpoint{
@ -33681,6 +33751,12 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.53.0"
const SDKVersion = "1.53.8"

View file

@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri
}
func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
// If it's empty, generate an empty value
if !value.IsNil() && value.Len() == 0 {
// If it's empty, and not ec2, generate an empty value
if !value.IsNil() && value.Len() == 0 && !q.isEC2 {
v.Set(prefix, "")
return nil
}

View file

@ -269,7 +269,7 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.wrap(fmt.Sprint(a...)))
return fmt.Fprintln(w, c.wrap(sprintln(a...)))
}
// Println formats using the default formats for its operands and writes to
@ -278,7 +278,7 @@ func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
return fmt.Fprintln(Output, c.wrap(fmt.Sprint(a...)))
return fmt.Fprintln(Output, c.wrap(sprintln(a...)))
}
// Sprint is just like Print, but returns a string instead of printing it.
@ -288,7 +288,7 @@ func (c *Color) Sprint(a ...interface{}) string {
// Sprintln is just like Println, but returns a string instead of printing it.
func (c *Color) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.Sprint(a...))
return c.wrap(sprintln(a...)) + "\n"
}
// Sprintf is just like Printf, but returns a string instead of printing it.
@ -370,7 +370,7 @@ func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return fmt.Sprintln(c.Sprint(a...))
return c.wrap(sprintln(a...)) + "\n"
}
}
@ -648,3 +648,8 @@ func HiCyanString(format string, a ...interface{}) string { return colorString(f
func HiWhiteString(format string, a ...interface{}) string {
return colorString(format, FgHiWhite, a...)
}
// sprintln is a helper function to format a string with fmt.Sprintln and trim the trailing newline.
func sprintln(a ...interface{}) string {
return strings.TrimSuffix(fmt.Sprintln(a...), "\n")
}

View file

@ -1,6 +1,7 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging

View file

@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
outputFormat outputFormat
prefix string
values []any
valuesStr string
parentValuesStr string
depth int
opts *Options
group string // for slog groups
groupDepth int
outputFormat outputFormat
prefix string
values []any
valuesStr string
depth int
opts *Options
groupName string // for slog groups
groups []groupDef
}
// outputFormat indicates which outputFormat to use.
@ -257,6 +256,13 @@ const (
outputJSON
)
// groupDef represents a saved group. The values may be empty, but we don't
// know if we need to render the group until the final record is rendered.
type groupDef struct {
name string
values string
}
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []any
@ -264,76 +270,102 @@ type PseudoStruct []any
func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{') // for the whole line
buf.WriteByte('{') // for the whole record
}
// Render builtins
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
f.flatten(buf, vals, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if f.parentValuesStr != "" {
// Turn the inner-most group into a string
argsStr := func() string {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, true) // escape user-provided keys
return buf.String()
}()
// Render the stack of groups from the inside out.
bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
for i := len(f.groups) - 1; i >= 0; i-- {
grp := &f.groups[i]
if grp.values == "" && bodyStr == "" {
// no contents, so we must elide the whole group
continue
}
bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
}
if bodyStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.parentValuesStr)
continuing = true
}
groupDepth := f.groupDepth
if f.group != "" {
if f.valuesStr != "" || len(args) != 0 {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
} else {
// The group was empty
groupDepth--
}
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
continuing = true
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
for i := 0; i < groupDepth; i++ {
buf.WriteByte('}') // for the groups
buf.WriteString(bodyStr)
}
if f.outputFormat == outputJSON {
buf.WriteByte('}') // for the whole line
buf.WriteByte('}') // for the whole record
}
return buf.String()
}
// flatten renders a list of key-value pairs into a buffer. If continuing is
// true, it assumes that the buffer has previous values and will emit a
// separator (which depends on the output format) before the first pair it
// writes. If escapeKeys is true, the keys are assumed to have
// non-JSON-compatible characters in them and must be evaluated for escapes.
// renderGroup returns a string representation of the named group with rendered
// values and args. If the name is empty, this will return the values and args,
// joined. If the name is not empty, this will return a single key-value pair,
// where the value is a grouping of the values and args. If the values and
// args are both empty, this will return an empty string, even if the name was
// specified.
func (f Formatter) renderGroup(name string, values string, args string) string {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
needClosingBrace := false
if name != "" && (values != "" || args != "") {
buf.WriteString(f.quoted(name, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{')
needClosingBrace = true
}
continuing := false
if values != "" {
buf.WriteString(values)
continuing = true
}
if args != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(args)
}
if needClosingBrace {
buf.WriteByte('}')
}
return buf.String()
}
// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
// true, the keys are assumed to have non-JSON-compatible characters in them
// and must be evaluated for escapes.
//
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
}
v := kvList[i+1]
if i > 0 || continuing {
if i > 0 {
if f.outputFormat == outputJSON {
buf.WriteByte(f.comma())
} else {
@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any {
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
func (f *Formatter) startGroup(group string) {
func (f *Formatter) startGroup(name string) {
// Unnamed groups are just inlined.
if group == "" {
if name == "" {
return
}
// Any saved values can no longer be changed.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
continuing := false
if f.parentValuesStr != "" {
buf.WriteString(f.parentValuesStr)
continuing = true
}
if f.group != "" && f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
}
// NOTE: We don't close the scope here - that's done later, when a log line
// is actually rendered (because we have N scopes to close).
f.parentValuesStr = buf.String()
n := len(f.groups)
f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
// Start collecting new values.
f.group = group
f.groupDepth++
f.groupName = name
f.valuesStr = ""
f.values = nil
}
@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) {
// Pre-render values, so we don't have to do it on each Info/Error call.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
f.flatten(buf, vals, false, true) // escape user-provided keys
f.flatten(buf, vals, true) // escape user-provided keys
f.valuesStr = buf.String()
}

View file

@ -91,9 +91,7 @@ func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) {
b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits)
} else {
b.visited = b.visited[:visitedSize]
for i := range b.visited {
b.visited[i] = 0
}
clear(b.visited) // set to 0
}
if cap(b.cap) < ncap {

View file

@ -6,7 +6,7 @@ package regexp
import (
"regexp/syntax"
"sort"
"slices"
"strings"
"unicode"
"unicode/utf8"
@ -33,11 +33,11 @@ type onePassInst struct {
Next []uint32
}
// OnePassPrefix returns a literal string that all matches for the
// onePassPrefix returns a literal string that all matches for the
// regexp must start with. Complete is true if the prefix
// is the entire match. Pc is the index of the last rune instruction
// in the string. The OnePassPrefix skips over the mandatory
// EmptyBeginText
// in the string. The onePassPrefix skips over the mandatory
// EmptyBeginText.
func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) {
i := &p.Inst[p.Start]
if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 {
@ -68,7 +68,7 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) {
return buf.String(), complete, pc
}
// OnePassNext selects the next actionable state of the prog, based on the input character.
// onePassNext selects the next actionable state of the prog, based on the input character.
// It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine.
// One of the alternates may ultimately lead without input to end of line. If the instruction
// is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next.
@ -218,7 +218,7 @@ func cleanupOnePass(prog *onePassProg, original *syntax.Prog) {
}
}
// onePassCopy creates a copy of the original Prog, as we'll be modifying it
// onePassCopy creates a copy of the original Prog, as we'll be modifying it.
func onePassCopy(prog *syntax.Prog) *onePassProg {
p := &onePassProg{
Start: prog.Start,
@ -282,13 +282,6 @@ func onePassCopy(prog *syntax.Prog) *onePassProg {
return p
}
// runeSlice exists to permit sorting the case-folded rune sets.
type runeSlice []rune
func (p runeSlice) Len() int { return len(p) }
func (p runeSlice) Less(i, j int) bool { return p[i] < p[j] }
func (p runeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune}
var anyRune = []rune{0, unicode.MaxRune}
@ -383,7 +376,7 @@ func makeOnePass(p *onePassProg) *onePassProg {
for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
runes = append(runes, r1, r1)
}
sort.Sort(runeSlice(runes))
slices.Sort(runes)
} else {
runes = append(runes, inst.Rune...)
}
@ -407,7 +400,7 @@ func makeOnePass(p *onePassProg) *onePassProg {
for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
runes = append(runes, r1, r1)
}
sort.Sort(runeSlice(runes))
slices.Sort(runes)
} else {
runes = append(runes, inst.Rune[0], inst.Rune[0])
}

View file

@ -8,9 +8,7 @@
// general syntax used by Perl, Python, and other languages.
// More precisely, it is the syntax accepted by RE2 and described at
// https://golang.org/s/re2syntax, except for \C.
// For an overview of the syntax, run
//
// go doc regexp/syntax
// For an overview of the syntax, see the [regexp/syntax] package.
//
// The regexp implementation provided by this package is
// guaranteed to run in time linear in the size of the input.
@ -23,10 +21,10 @@
// or any book about automata theory.
//
// All characters are UTF-8-encoded code points.
// Following utf8.DecodeRune, each byte of an invalid UTF-8 sequence
// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence
// is treated as if it encoded utf8.RuneError (U+FFFD).
//
// There are 16 methods of Regexp that match a regular expression and identify
// There are 16 methods of [Regexp] that match a regular expression and identify
// the matched text. Their names are matched by this regular expression:
//
// Find(All)?(String)?(Submatch)?(Index)?
@ -82,7 +80,7 @@ import (
// Regexp is the representation of a compiled regular expression.
// A Regexp is safe for concurrent use by multiple goroutines,
// except for configuration methods, such as Longest.
// except for configuration methods, such as [Regexp.Longest].
type Regexp struct {
expr string // as passed to Compile
prog *syntax.Prog // compiled program
@ -110,21 +108,21 @@ func (re *Regexp) String() string {
return re.expr
}
// Copy returns a new Regexp object copied from re.
// Calling Longest on one copy does not affect another.
// Copy returns a new [Regexp] object copied from re.
// Calling [Regexp.Longest] on one copy does not affect another.
//
// Deprecated: In earlier releases, when using a Regexp in multiple goroutines,
// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines,
// giving each goroutine its own copy helped to avoid lock contention.
// As of Go 1.12, using Copy is no longer necessary to avoid lock contention.
// Copy may still be appropriate if the reason for its use is to make
// two copies with different Longest settings.
// two copies with different [Regexp.Longest] settings.
func (re *Regexp) Copy() *Regexp {
re2 := *re
return &re2
}
// Compile parses a regular expression and returns, if successful,
// a Regexp object that can be used to match against text.
// a [Regexp] object that can be used to match against text.
//
// When matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
@ -132,12 +130,12 @@ func (re *Regexp) Copy() *Regexp {
// This so-called leftmost-first matching is the same semantics
// that Perl, Python, and other implementations use, although this
// package implements it without the expense of backtracking.
// For POSIX leftmost-longest matching, see CompilePOSIX.
// For POSIX leftmost-longest matching, see [CompilePOSIX].
func Compile(expr string) (*Regexp, error) {
return compile(expr, syntax.Perl, false)
}
// CompilePOSIX is like Compile but restricts the regular expression
// CompilePOSIX is like [Compile] but restricts the regular expression
// to POSIX ERE (egrep) syntax and changes the match semantics to
// leftmost-longest.
//
@ -164,7 +162,7 @@ func CompilePOSIX(expr string) (*Regexp, error) {
// That is, when matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses a match that is as long as possible.
// This method modifies the Regexp and may not be called concurrently
// This method modifies the [Regexp] and may not be called concurrently
// with any other methods.
func (re *Regexp) Longest() {
re.longest = true
@ -270,7 +268,7 @@ func (re *Regexp) put(m *machine) {
matchPool[re.mpool].Put(m)
}
// minInputLen walks the regexp to find the minimum length of any matchable input
// minInputLen walks the regexp to find the minimum length of any matchable input.
func minInputLen(re *syntax.Regexp) int {
switch re.Op {
default:
@ -310,7 +308,7 @@ func minInputLen(re *syntax.Regexp) int {
}
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// MustCompile is like [Compile] but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
@ -321,7 +319,7 @@ func MustCompile(str string) *Regexp {
return regexp
}
// MustCompilePOSIX is like CompilePOSIX but panics if the expression cannot be parsed.
// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompilePOSIX(str string) *Regexp {
@ -339,13 +337,13 @@ func quote(s string) string {
return strconv.Quote(s)
}
// NumSubexp returns the number of parenthesized subexpressions in this Regexp.
// NumSubexp returns the number of parenthesized subexpressions in this [Regexp].
func (re *Regexp) NumSubexp() int {
return re.numSubexp
}
// SubexpNames returns the names of the parenthesized subexpressions
// in this Regexp. The name for the first sub-expression is names[1],
// in this [Regexp]. The name for the first sub-expression is names[1],
// so that if m is a match slice, the name for m[i] is SubexpNames()[i].
// Since the Regexp as a whole cannot be named, names[0] is always
// the empty string. The slice should not be modified.
@ -521,7 +519,7 @@ func (re *Regexp) LiteralPrefix() (prefix string, complete bool) {
return re.prefix, re.prefixComplete
}
// MatchReader reports whether the text returned by the RuneReader
// MatchReader reports whether the text returned by the [io.RuneReader]
// contains any match of the regular expression re.
func (re *Regexp) MatchReader(r io.RuneReader) bool {
return re.doMatch(r, nil, "")
@ -541,7 +539,7 @@ func (re *Regexp) Match(b []byte) bool {
// MatchReader reports whether the text returned by the RuneReader
// contains any match of the regular expression pattern.
// More complicated queries need to use Compile and the full Regexp interface.
// More complicated queries need to use [Compile] and the full [Regexp] interface.
func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) {
re, err := Compile(pattern)
if err != nil {
@ -552,7 +550,7 @@ func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) {
// MatchString reports whether the string s
// contains any match of the regular expression pattern.
// More complicated queries need to use Compile and the full Regexp interface.
// More complicated queries need to use [Compile] and the full [Regexp] interface.
func MatchString(pattern string, s string) (matched bool, err error) {
re, err := Compile(pattern)
if err != nil {
@ -563,7 +561,7 @@ func MatchString(pattern string, s string) (matched bool, err error) {
// Match reports whether the byte slice b
// contains any match of the regular expression pattern.
// More complicated queries need to use Compile and the full Regexp interface.
// More complicated queries need to use [Compile] and the full [Regexp] interface.
func Match(pattern string, b []byte) (matched bool, err error) {
re, err := Compile(pattern)
if err != nil {
@ -572,9 +570,9 @@ func Match(pattern string, b []byte) (matched bool, err error) {
return re.Match(b), nil
}
// ReplaceAllString returns a copy of src, replacing matches of the Regexp
// with the replacement string repl. Inside repl, $ signs are interpreted as
// in Expand, so for instance $1 represents the text of the first submatch.
// ReplaceAllString returns a copy of src, replacing matches of the [Regexp]
// with the replacement string repl.
// Inside repl, $ signs are interpreted as in [Regexp.Expand].
func (re *Regexp) ReplaceAllString(src, repl string) string {
n := 2
if strings.Contains(repl, "$") {
@ -586,9 +584,9 @@ func (re *Regexp) ReplaceAllString(src, repl string) string {
return string(b)
}
// ReplaceAllLiteralString returns a copy of src, replacing matches of the Regexp
// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp]
// with the replacement string repl. The replacement repl is substituted directly,
// without using Expand.
// without using [Regexp.Expand].
func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
return append(dst, repl...)
@ -596,9 +594,9 @@ func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
}
// ReplaceAllStringFunc returns a copy of src in which all matches of the
// Regexp have been replaced by the return value of function repl applied
// [Regexp] have been replaced by the return value of function repl applied
// to the matched substring. The replacement returned by repl is substituted
// directly, without using Expand.
// directly, without using [Regexp.Expand].
func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
return append(dst, repl(src[match[0]:match[1]])...)
@ -671,9 +669,9 @@ func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst
return buf
}
// ReplaceAll returns a copy of src, replacing matches of the Regexp
// with the replacement text repl. Inside repl, $ signs are interpreted as
// in Expand, so for instance $1 represents the text of the first submatch.
// ReplaceAll returns a copy of src, replacing matches of the [Regexp]
// with the replacement text repl.
// Inside repl, $ signs are interpreted as in [Regexp.Expand].
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
n := 2
if bytes.IndexByte(repl, '$') >= 0 {
@ -689,9 +687,9 @@ func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
return b
}
// ReplaceAllLiteral returns a copy of src, replacing matches of the Regexp
// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp]
// with the replacement bytes repl. The replacement repl is substituted directly,
// without using Expand.
// without using [Regexp.Expand].
func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte {
return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
return append(dst, repl...)
@ -699,9 +697,9 @@ func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte {
}
// ReplaceAllFunc returns a copy of src in which all matches of the
// Regexp have been replaced by the return value of function repl applied
// [Regexp] have been replaced by the return value of function repl applied
// to the matched byte slice. The replacement returned by repl is substituted
// directly, without using Expand.
// directly, without using [Regexp.Expand].
func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
return append(dst, repl(src[match[0]:match[1]])...)
@ -845,7 +843,7 @@ func (re *Regexp) FindIndex(b []byte) (loc []int) {
// FindString returns a string holding the text of the leftmost match in s of the regular
// expression. If there is no match, the return value is an empty string,
// but it will also be empty if the regular expression successfully matches
// an empty string. Use FindStringIndex or FindStringSubmatch if it is
// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is
// necessary to distinguish these cases.
func (re *Regexp) FindString(s string) string {
var dstCap [2]int
@ -870,7 +868,7 @@ func (re *Regexp) FindStringIndex(s string) (loc []int) {
// FindReaderIndex returns a two-element slice of integers defining the
// location of the leftmost match of the regular expression in text read from
// the RuneReader. The match text was found in the input stream at
// the [io.RuneReader]. The match text was found in the input stream at
// byte offset loc[0] through loc[1]-1.
// A return value of nil indicates no match.
func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) {
@ -904,7 +902,7 @@ func (re *Regexp) FindSubmatch(b []byte) [][]byte {
// Expand appends template to dst and returns the result; during the
// append, Expand replaces variables in the template with corresponding
// matches drawn from src. The match slice should have been returned by
// FindSubmatchIndex.
// [Regexp.FindSubmatchIndex].
//
// In the template, a variable is denoted by a substring of the form
// $name or ${name}, where name is a non-empty sequence of letters,
@ -922,7 +920,7 @@ func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) [
return re.expand(dst, string(template), src, "", match)
}
// ExpandString is like Expand but the template and source are strings.
// ExpandString is like [Regexp.Expand] but the template and source are strings.
// It appends to and returns a byte slice in order to give the calling
// code control over allocation.
func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte {
@ -1067,7 +1065,7 @@ func (re *Regexp) FindStringSubmatchIndex(s string) []int {
// FindReaderSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression of text read by
// the RuneReader, and the matches, if any, of its subexpressions, as defined
// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined
// by the 'Submatch' and 'Index' descriptions in the package comment. A
// return value of nil indicates no match.
func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
@ -1076,7 +1074,7 @@ func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
const startSize = 10 // The size at which to start a slice in the 'All' routines.
// FindAll is the 'All' version of Find; it returns a slice of all successive
// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive
// matches of the expression, as defined by the 'All' description in the
// package comment.
// A return value of nil indicates no match.
@ -1094,7 +1092,7 @@ func (re *Regexp) FindAll(b []byte, n int) [][]byte {
return result
}
// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all
// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
@ -1112,7 +1110,7 @@ func (re *Regexp) FindAllIndex(b []byte, n int) [][]int {
return result
}
// FindAllString is the 'All' version of FindString; it returns a slice of all
// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
@ -1130,7 +1128,7 @@ func (re *Regexp) FindAllString(s string, n int) []string {
return result
}
// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a
// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a
// slice of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
@ -1148,7 +1146,7 @@ func (re *Regexp) FindAllStringIndex(s string, n int) [][]int {
return result
}
// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice
// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice
// of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
@ -1172,7 +1170,7 @@ func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte {
return result
}
// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns
// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns
// a slice of all successive matches of the expression, as defined by the
// 'All' description in the package comment.
// A return value of nil indicates no match.
@ -1190,7 +1188,7 @@ func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int {
return result
}
// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it
// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it
// returns a slice of all successive matches of the expression, as defined by
// the 'All' description in the package comment.
// A return value of nil indicates no match.
@ -1215,7 +1213,7 @@ func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
}
// FindAllStringSubmatchIndex is the 'All' version of
// FindStringSubmatchIndex; it returns a slice of all successive matches of
// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of
// the expression, as defined by the 'All' description in the package
// comment.
// A return value of nil indicates no match.
@ -1237,8 +1235,8 @@ func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
// the substrings between those expression matches.
//
// The slice returned by this method consists of all the substrings of s
// not contained in the slice returned by FindAllString. When called on an expression
// that contains no metacharacters, it is equivalent to strings.SplitN.
// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression
// that contains no metacharacters, it is equivalent to [strings.SplitN].
//
// Example:
//
@ -1283,3 +1281,24 @@ func (re *Regexp) Split(s string, n int) []string {
return strings
}
// MarshalText implements [encoding.TextMarshaler]. The output
// matches that of calling the [Regexp.String] method.
//
// Note that the output is lossy in some cases: This method does not indicate
// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or
// those for which the [Regexp.Longest] method has been called.
func (re *Regexp) MarshalText() ([]byte, error) {
return []byte(re.String()), nil
}
// UnmarshalText implements [encoding.TextUnmarshaler] by calling
// [Compile] on the encoded value.
func (re *Regexp) UnmarshalText(text []byte) error {
newRE, err := Compile(string(text))
if err != nil {
return err
}
*re = *newRE
return nil
}

View file

@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// DO NOT EDIT. This file is generated by mksyntaxgo from the RE2 distribution.
// Code generated by mksyntaxgo from the RE2 distribution. DO NOT EDIT.
/*
Package syntax parses regular expressions into parse trees and compiles
parse trees into programs. Most clients of regular expressions will use the
facilities of package regexp (such as Compile and Match) instead of this package.
facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package.
# Syntax
The regular expression syntax understood by this package when parsing with the Perl flag is as follows.
Parts of the syntax can be disabled by passing alternate flags to Parse.
The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows.
Parts of the syntax can be disabled by passing alternate flags to [Parse].
Single characters:
@ -56,6 +56,7 @@ Grouping:
(re) numbered capturing group (submatch)
(?P<name>re) named & numbered capturing group (submatch)
(?<name>re) named & numbered capturing group (submatch)
(?:re) non-capturing group
(?flags) set flags within current group; non-capturing
(?flags:re) set flags during re; non-capturing
@ -136,6 +137,6 @@ ASCII character classes:
[[:word:]] word characters (== [0-9A-Za-z_])
[[:xdigit:]] hex digit (== [0-9A-Fa-f])
Unicode character classes are those in unicode.Categories and unicode.Scripts.
Unicode character classes are those in [unicode.Categories] and [unicode.Scripts].
*/
package syntax

View file

@ -4,6 +4,32 @@ package syntax
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[OpNoMatch-1]
_ = x[OpEmptyMatch-2]
_ = x[OpLiteral-3]
_ = x[OpCharClass-4]
_ = x[OpAnyCharNotNL-5]
_ = x[OpAnyChar-6]
_ = x[OpBeginLine-7]
_ = x[OpEndLine-8]
_ = x[OpBeginText-9]
_ = x[OpEndText-10]
_ = x[OpWordBoundary-11]
_ = x[OpNoWordBoundary-12]
_ = x[OpCapture-13]
_ = x[OpStar-14]
_ = x[OpPlus-15]
_ = x[OpQuest-16]
_ = x[OpRepeat-17]
_ = x[OpConcat-18]
_ = x[OpAlternate-19]
_ = x[opPseudo-128]
}
const (
_Op_name_0 = "NoMatchEmptyMatchLiteralCharClassAnyCharNotNLAnyCharBeginLineEndLineBeginTextEndTextWordBoundaryNoWordBoundaryCaptureStarPlusQuestRepeatConcatAlternate"
_Op_name_1 = "opPseudo"

View file

@ -44,6 +44,7 @@ const (
ErrTrailingBackslash ErrorCode = "trailing backslash at end of expression"
ErrUnexpectedParen ErrorCode = "unexpected )"
ErrNestingDepth ErrorCode = "expression nests too deeply"
ErrLarge ErrorCode = "expression too large"
)
func (e ErrorCode) String() string {
@ -159,7 +160,7 @@ func (p *parser) reuse(re *Regexp) {
func (p *parser) checkLimits(re *Regexp) {
if p.numRunes > maxRunes {
panic(ErrInternalError)
panic(ErrLarge)
}
p.checkSize(re)
p.checkHeight(re)
@ -203,7 +204,7 @@ func (p *parser) checkSize(re *Regexp) {
}
if p.calcSize(re, true) > maxSize {
panic(ErrInternalError)
panic(ErrLarge)
}
}
@ -248,9 +249,7 @@ func (p *parser) calcSize(re *Regexp, force bool) int64 {
size = int64(re.Max)*sub + int64(re.Max-re.Min)
}
if size < 1 {
size = 1
}
size = max(1, size)
p.size[re] = size
return size
}
@ -381,14 +380,12 @@ func minFoldRune(r rune) rune {
if r < minFold || r > maxFold {
return r
}
min := r
m := r
r0 := r
for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) {
if min > r {
min = r
}
m = min(m, r)
}
return min
return m
}
// op pushes a regexp with the given op onto the stack
@ -897,8 +894,8 @@ func parse(s string, flags Flags) (_ *Regexp, err error) {
panic(r)
case nil:
// ok
case ErrInternalError: // too big
err = &Error{Code: ErrInternalError, Expr: s}
case ErrLarge: // too big
err = &Error{Code: ErrLarge, Expr: s}
case ErrNestingDepth:
err = &Error{Code: ErrNestingDepth, Expr: s}
}
@ -1158,9 +1155,18 @@ func (p *parser) parsePerlFlags(s string) (rest string, err error) {
// support all three as well. EcmaScript 4 uses only the Python form.
//
// In both the open source world (via Code Search) and the
// Google source tree, (?P<expr>name) is the dominant form,
// so that's the one we implement. One is enough.
if len(t) > 4 && t[2] == 'P' && t[3] == '<' {
// Google source tree, (?P<expr>name) and (?<expr>name) are the
// dominant forms of named captures and both are supported.
startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<'
startsWithName := len(t) > 3 && t[2] == '<'
if startsWithP || startsWithName {
// position of expr start
exprStartPos := 4
if startsWithName {
exprStartPos = 3
}
// Pull out name.
end := strings.IndexRune(t, '>')
if end < 0 {
@ -1170,8 +1176,8 @@ func (p *parser) parsePerlFlags(s string) (rest string, err error) {
return "", &Error{ErrInvalidNamedCapture, s}
}
capture := t[:end+1] // "(?P<name>"
name := t[4:end] // "name"
capture := t[:end+1] // "(?P<name>" or "(?<name>"
name := t[exprStartPos:end] // "name"
if err = checkUTF8(name); err != nil {
return "", err
}
@ -1853,6 +1859,22 @@ func cleanClass(rp *[]rune) []rune {
return r[:w]
}
// inCharClass reports whether r is in the class.
// It assumes the class has been cleaned by cleanClass.
func inCharClass(r rune, class []rune) bool {
_, ok := sort.Find(len(class)/2, func(i int) int {
lo, hi := class[2*i], class[2*i+1]
if r > hi {
return +1
}
if r < lo {
return -1
}
return 0
})
return ok
}
// appendLiteral returns the result of appending the literal x to the class r.
func appendLiteral(r []rune, x rune, flags Flags) []rune {
if flags&FoldCase != 0 {
@ -1937,7 +1959,7 @@ func appendClass(r []rune, x []rune) []rune {
return r
}
// appendFolded returns the result of appending the case folding of the class x to the class r.
// appendFoldedClass returns the result of appending the case folding of the class x to the class r.
func appendFoldedClass(r []rune, x []rune) []rune {
for i := 0; i < len(x); i += 2 {
r = appendFoldedRange(r, x[i], x[i+1])

View file

@ -106,7 +106,9 @@ func EmptyOpContext(r1, r2 rune) EmptyOp {
// during the evaluation of the \b and \B zero-width assertions.
// These assertions are ASCII-only: the word characters are [A-Za-z0-9_].
func IsWordChar(r rune) bool {
return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' || r == '_'
// Test for lowercase letters first, as these occur more
// frequently than uppercase letters in common cases.
return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_'
}
// An Inst is a single instruction in a regular expression program.
@ -189,7 +191,7 @@ Loop:
const noMatch = -1
// MatchRune reports whether the instruction matches (and consumes) r.
// It should only be called when i.Op == InstRune.
// It should only be called when i.Op == [InstRune].
func (i *Inst) MatchRune(r rune) bool {
return i.MatchRunePos(r) != noMatch
}
@ -198,7 +200,7 @@ func (i *Inst) MatchRune(r rune) bool {
// If so, MatchRunePos returns the index of the matching rune pair
// (or, when len(i.Rune) == 1, rune singleton).
// If not, MatchRunePos returns -1.
// MatchRunePos should only be called when i.Op == InstRune.
// MatchRunePos should only be called when i.Op == [InstRune].
func (i *Inst) MatchRunePos(r rune) int {
rune := i.Rune
@ -245,7 +247,7 @@ func (i *Inst) MatchRunePos(r rune) int {
lo := 0
hi := len(rune) / 2
for lo < hi {
m := lo + (hi-lo)/2
m := int(uint(lo+hi) >> 1)
if c := rune[2*m]; c <= r {
if r <= rune[2*m+1] {
return m
@ -260,7 +262,7 @@ func (i *Inst) MatchRunePos(r rune) int {
// MatchEmptyWidth reports whether the instruction matches
// an empty string between the runes before and after.
// It should only be called when i.Op == InstEmptyWidth.
// It should only be called when i.Op == [InstEmptyWidth].
func (i *Inst) MatchEmptyWidth(before rune, after rune) bool {
switch EmptyOp(i.Arg) {
case EmptyBeginLine:

View file

@ -8,6 +8,7 @@ package syntax
// In this package, re is always a *Regexp and r is always a rune.
import (
"slices"
"strconv"
"strings"
"unicode"
@ -75,24 +76,10 @@ func (x *Regexp) Equal(y *Regexp) bool {
}
case OpLiteral, OpCharClass:
if len(x.Rune) != len(y.Rune) {
return false
}
for i, r := range x.Rune {
if r != y.Rune[i] {
return false
}
}
return slices.Equal(x.Rune, y.Rune)
case OpAlternate, OpConcat:
if len(x.Sub) != len(y.Sub) {
return false
}
for i, sub := range x.Sub {
if !sub.Equal(y.Sub[i]) {
return false
}
}
return slices.EqualFunc(x.Sub, y.Sub, func(a, b *Regexp) bool { return a.Equal(b) })
case OpStar, OpPlus, OpQuest:
if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) {
@ -112,8 +99,165 @@ func (x *Regexp) Equal(y *Regexp) bool {
return true
}
// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp.
type printFlags uint8
const (
flagI printFlags = 1 << iota // (?i:
flagM // (?m:
flagS // (?s:
flagOff // )
flagPrec // (?: )
negShift = 5 // flagI<<negShift is (?-i:
)
// addSpan enables the flags f around start..last,
// by setting flags[start] = f and flags[last] = flagOff.
func addSpan(start, last *Regexp, f printFlags, flags *map[*Regexp]printFlags) {
if *flags == nil {
*flags = make(map[*Regexp]printFlags)
}
(*flags)[start] = f
(*flags)[last] |= flagOff // maybe start==last
}
// calcFlags calculates the flags to print around each subexpression in re,
// storing that information in (*flags)[sub] for each affected subexpression.
// The first time an entry needs to be written to *flags, calcFlags allocates the map.
// calcFlags also calculates the flags that must be active or can't be active
// around re and returns those flags.
func calcFlags(re *Regexp, flags *map[*Regexp]printFlags) (must, cant printFlags) {
switch re.Op {
default:
return 0, 0
case OpLiteral:
// If literal is fold-sensitive, return (flagI, 0) or (0, flagI)
// according to whether (?i) is active.
// If literal is not fold-sensitive, return 0, 0.
for _, r := range re.Rune {
if minFold <= r && r <= maxFold && unicode.SimpleFold(r) != r {
if re.Flags&FoldCase != 0 {
return flagI, 0
} else {
return 0, flagI
}
}
}
return 0, 0
case OpCharClass:
// If literal is fold-sensitive, return 0, flagI - (?i) has been compiled out.
// If literal is not fold-sensitive, return 0, 0.
for i := 0; i < len(re.Rune); i += 2 {
lo := max(minFold, re.Rune[i])
hi := min(maxFold, re.Rune[i+1])
for r := lo; r <= hi; r++ {
for f := unicode.SimpleFold(r); f != r; f = unicode.SimpleFold(f) {
if !(lo <= f && f <= hi) && !inCharClass(f, re.Rune) {
return 0, flagI
}
}
}
}
return 0, 0
case OpAnyCharNotNL: // (?-s).
return 0, flagS
case OpAnyChar: // (?s).
return flagS, 0
case OpBeginLine, OpEndLine: // (?m)^ (?m)$
return flagM, 0
case OpEndText:
if re.Flags&WasDollar != 0 { // (?-m)$
return 0, flagM
}
return 0, 0
case OpCapture, OpStar, OpPlus, OpQuest, OpRepeat:
return calcFlags(re.Sub[0], flags)
case OpConcat, OpAlternate:
// Gather the must and cant for each subexpression.
// When we find a conflicting subexpression, insert the necessary
// flags around the previously identified span and start over.
var must, cant, allCant printFlags
start := 0
last := 0
did := false
for i, sub := range re.Sub {
subMust, subCant := calcFlags(sub, flags)
if must&subCant != 0 || subMust&cant != 0 {
if must != 0 {
addSpan(re.Sub[start], re.Sub[last], must, flags)
}
must = 0
cant = 0
start = i
did = true
}
must |= subMust
cant |= subCant
allCant |= subCant
if subMust != 0 {
last = i
}
if must == 0 && start == i {
start++
}
}
if !did {
// No conflicts: pass the accumulated must and cant upward.
return must, cant
}
if must != 0 {
// Conflicts found; need to finish final span.
addSpan(re.Sub[start], re.Sub[last], must, flags)
}
return 0, allCant
}
}
// writeRegexp writes the Perl syntax for the regular expression re to b.
func writeRegexp(b *strings.Builder, re *Regexp) {
func writeRegexp(b *strings.Builder, re *Regexp, f printFlags, flags map[*Regexp]printFlags) {
f |= flags[re]
if f&flagPrec != 0 && f&^(flagOff|flagPrec) != 0 && f&flagOff != 0 {
// flagPrec is redundant with other flags being added and terminated
f &^= flagPrec
}
if f&^(flagOff|flagPrec) != 0 {
b.WriteString(`(?`)
if f&flagI != 0 {
b.WriteString(`i`)
}
if f&flagM != 0 {
b.WriteString(`m`)
}
if f&flagS != 0 {
b.WriteString(`s`)
}
if f&((flagM|flagS)<<negShift) != 0 {
b.WriteString(`-`)
if f&(flagM<<negShift) != 0 {
b.WriteString(`m`)
}
if f&(flagS<<negShift) != 0 {
b.WriteString(`s`)
}
}
b.WriteString(`:`)
}
if f&flagOff != 0 {
defer b.WriteString(`)`)
}
if f&flagPrec != 0 {
b.WriteString(`(?:`)
defer b.WriteString(`)`)
}
switch re.Op {
default:
b.WriteString("<invalid op" + strconv.Itoa(int(re.Op)) + ">")
@ -122,15 +266,9 @@ func writeRegexp(b *strings.Builder, re *Regexp) {
case OpEmptyMatch:
b.WriteString(`(?:)`)
case OpLiteral:
if re.Flags&FoldCase != 0 {
b.WriteString(`(?i:`)
}
for _, r := range re.Rune {
escape(b, r, false)
}
if re.Flags&FoldCase != 0 {
b.WriteString(`)`)
}
case OpCharClass:
if len(re.Rune)%2 != 0 {
b.WriteString(`[invalid char class]`)
@ -147,7 +285,9 @@ func writeRegexp(b *strings.Builder, re *Regexp) {
lo, hi := re.Rune[i]+1, re.Rune[i+1]-1
escape(b, lo, lo == '-')
if lo != hi {
b.WriteRune('-')
if hi != lo+1 {
b.WriteRune('-')
}
escape(b, hi, hi == '-')
}
}
@ -156,25 +296,25 @@ func writeRegexp(b *strings.Builder, re *Regexp) {
lo, hi := re.Rune[i], re.Rune[i+1]
escape(b, lo, lo == '-')
if lo != hi {
b.WriteRune('-')
if hi != lo+1 {
b.WriteRune('-')
}
escape(b, hi, hi == '-')
}
}
}
b.WriteRune(']')
case OpAnyCharNotNL:
b.WriteString(`(?-s:.)`)
case OpAnyChar:
b.WriteString(`(?s:.)`)
case OpAnyCharNotNL, OpAnyChar:
b.WriteString(`.`)
case OpBeginLine:
b.WriteString(`(?m:^)`)
b.WriteString(`^`)
case OpEndLine:
b.WriteString(`(?m:$)`)
b.WriteString(`$`)
case OpBeginText:
b.WriteString(`\A`)
case OpEndText:
if re.Flags&WasDollar != 0 {
b.WriteString(`(?-m:$)`)
b.WriteString(`$`)
} else {
b.WriteString(`\z`)
}
@ -191,17 +331,17 @@ func writeRegexp(b *strings.Builder, re *Regexp) {
b.WriteRune('(')
}
if re.Sub[0].Op != OpEmptyMatch {
writeRegexp(b, re.Sub[0])
writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags)
}
b.WriteRune(')')
case OpStar, OpPlus, OpQuest, OpRepeat:
if sub := re.Sub[0]; sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 {
b.WriteString(`(?:`)
writeRegexp(b, sub)
b.WriteString(`)`)
} else {
writeRegexp(b, sub)
p := printFlags(0)
sub := re.Sub[0]
if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 {
p = flagPrec
}
writeRegexp(b, sub, p, flags)
switch re.Op {
case OpStar:
b.WriteRune('*')
@ -225,27 +365,31 @@ func writeRegexp(b *strings.Builder, re *Regexp) {
}
case OpConcat:
for _, sub := range re.Sub {
p := printFlags(0)
if sub.Op == OpAlternate {
b.WriteString(`(?:`)
writeRegexp(b, sub)
b.WriteString(`)`)
} else {
writeRegexp(b, sub)
p = flagPrec
}
writeRegexp(b, sub, p, flags)
}
case OpAlternate:
for i, sub := range re.Sub {
if i > 0 {
b.WriteRune('|')
}
writeRegexp(b, sub)
writeRegexp(b, sub, 0, flags)
}
}
}
func (re *Regexp) String() string {
var b strings.Builder
writeRegexp(&b, re)
var flags map[*Regexp]printFlags
must, cant := calcFlags(re, &flags)
must |= (cant &^ flagI) << negShift
if must != 0 {
must |= flagOff
}
writeRegexp(&b, re, must, flags)
return b.String()
}

View file

@ -55,7 +55,7 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),)
endif
endif
PROMU_VERSION ?= 0.15.0
PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=

View file

@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
}
node := strings.TrimRight(parts[1], ",")
zone := strings.TrimRight(parts[3], ",")
node := strings.TrimSuffix(parts[1], ",")
zone := strings.TrimSuffix(parts[3], ",")
arraySize := len(parts[4:])
if bucketCount == -1 {

View file

@ -23,7 +23,7 @@ import (
var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
@ -50,6 +50,8 @@ type MDStat struct {
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
// Number of blocks on the device that need to be synced.
BlocksToBeSynced int64
// progress percentage of current sync
BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes)
@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
blocksSynced := size
blocksToBeSynced := size
speed := float64(0)
finish := float64(0)
pct := float64(0)
@ -136,9 +139,9 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0
blocksSynced = 0
} else {
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
BlocksSynced: blocksSynced,
BlocksToBeSynced: blocksToBeSynced,
BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed,
@ -206,48 +210,54 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
return active, total, down, size, nil
}
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
blocks := strings.Split(matches[1], "/")
blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
}
blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
if err != nil {
return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
}
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
}
return syncedBlocks, pct, finish, speed, nil
return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {

View file

@ -88,7 +88,7 @@ type MountStatsNFS struct {
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
Transport NFSTransportStats
Transport []NFSTransportStats
}
// mountStats implements MountStats.
@ -432,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
return nil, err
}
stats.Transport = *tstats
stats.Transport = append(stats.Transport, *tstats)
}
// When encountering "per-operation statistics", we must break this

View file

@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
}
// Wchan returns the wchan (wait channel) of a process.

View file

@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
}
v := strings.TrimSpace(kv[1])
v = strings.TrimRight(v, " kB")
v = strings.TrimSuffix(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {

View file

@ -3,7 +3,7 @@ GOARCH ?= $(shell go env GOARCH)
GOOS_GOARCH := $(GOOS)_$(GOARCH)
GOOS_GOARCH_NATIVE := $(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
LIBZSTD_NAME := libzstd_$(GOOS_GOARCH).a
ZSTD_VERSION ?= v1.5.5
ZSTD_VERSION ?= v1.5.6
ZIG_BUILDER_IMAGE=euantorano/zig:0.10.1
BUILDER_IMAGE := local/builder_musl:2.0.0-$(shell echo $(ZIG_BUILDER_IMAGE) | tr : _ | tr / _)-1
@ -82,7 +82,7 @@ update-zstd:
cp zstd/lib/zstd_errors.h .
test:
CGO_ENABLED=1 GODEBUG=cgocheck=2 go test -v
CGO_ENABLED=1 GOEXPERIMENT=cgocheck2 go test -v
bench:
CGO_ENABLED=1 go test -bench=.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show more