mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-10 15:14:09 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
96160000e0
118 changed files with 3181 additions and 1358 deletions
32
README.md
32
README.md
|
@ -323,7 +323,7 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
|||
|
||||
## How to send data from DataDog agent
|
||||
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD]() via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
|
||||
Run DataDog agent with `DD_DD_URL=http://victoriametrics-host:8428/datadog` environment variable in order to write data to VictoriaMetrics at `victoriametrics-host` host. Another option is to set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||
|
||||
|
@ -1213,6 +1213,8 @@ By default VictoriaMetrics is tuned for an optimal resource usage under typical
|
|||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries.
|
||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsSubqueryPerTimeseries` limits the number of calculated points, which can be generated per each matching time series during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation.
|
||||
- `-search.maxSeries` limits the number of time series, which may be returned from [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers). This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagValues` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
|
@ -1547,8 +1549,27 @@ Both limits can be set simultaneously. If any of these limits is reached, then i
|
|||
The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
||||
|
||||
* `vm_hourly_series_limit_rows_dropped_total` - the number of metrics dropped due to exceeded hourly limit on the number of unique time series.
|
||||
|
||||
* `vm_hourly_series_limit_max_series` - the hourly series limit set via `-storage.maxHourlySeries` command-line flag.
|
||||
|
||||
* `vm_hourly_series_limit_current_series` - the current number of unique series during the last hour.
|
||||
The following query can be useful for alerting when the number of unique series during the last hour exceeds 90% of the `-storage.maxHourlySeries`:
|
||||
|
||||
```metricsql
|
||||
vm_hourly_series_limit_current_series / vm_hourly_series_limit_max_series > 0.9
|
||||
```
|
||||
|
||||
* `vm_daily_series_limit_rows_dropped_total` - the number of metrics dropped due to exceeded daily limit on the number of unique time series.
|
||||
|
||||
* `vm_daily_series_limit_max_series` - the daily series limit set via `-storage.maxDailySeries` command-line flag.
|
||||
|
||||
* `vm_daily_series_limit_current_series` - the current number of unique series during the last day.
|
||||
The following query can be useful for alerting when the number of unique series during the last day exceeds 90% of the `-storage.maxDailySeries`:
|
||||
|
||||
```metricsql
|
||||
vm_daily_series_limit_current_series / vm_daily_series_limit_max_series > 0.9
|
||||
```
|
||||
|
||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||
|
||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
|
@ -1803,6 +1824,7 @@ curl http://0.0.0.0:8428/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Integrations
|
||||
|
||||
|
@ -2153,7 +2175,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-search.maxLookback duration
|
||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
||||
-search.maxPointsPerTimeseries int
|
||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||
-search.maxPointsSubqueryPerTimeseries int
|
||||
The maximum number of points per series, which can be generated by subquery. See https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3 (default 100000)
|
||||
-search.maxQueryDuration duration
|
||||
The maximum duration for query execution (default 30s)
|
||||
-search.maxQueryLen size
|
||||
|
@ -2225,9 +2249,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Overrides max size for storage/tsid cache. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
-storage.maxDailySeries int
|
||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
|
||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/#cardinality-limiter . See also -storage.maxHourlySeries
|
||||
-storage.maxHourlySeries int
|
||||
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See also -storage.maxDailySeries
|
||||
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/#cardinality-limiter . See also -storage.maxDailySeries
|
||||
-storage.minFreeDiskSpaceBytes size
|
||||
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 10000000)
|
||||
|
|
|
@ -893,6 +893,7 @@ curl http://0.0.0.0:8429/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Advanced usage
|
||||
|
||||
|
|
|
@ -114,10 +114,12 @@ func main() {
|
|||
graphiteServer = graphiteserver.MustStart(*graphiteListenAddr, graphite.InsertHandler)
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
opentsdbServer = opentsdbserver.MustStart(*opentsdbListenAddr, opentsdb.InsertHandler, opentsdbhttp.InsertHandler)
|
||||
httpInsertHandler := getOpenTSDBHTTPInsertHandler()
|
||||
opentsdbServer = opentsdbserver.MustStart(*opentsdbListenAddr, opentsdb.InsertHandler, httpInsertHandler)
|
||||
}
|
||||
if len(*opentsdbHTTPListenAddr) > 0 {
|
||||
opentsdbhttpServer = opentsdbhttpserver.MustStart(*opentsdbHTTPListenAddr, opentsdbhttp.InsertHandler)
|
||||
httpInsertHandler := getOpenTSDBHTTPInsertHandler()
|
||||
opentsdbhttpServer = opentsdbhttpserver.MustStart(*opentsdbHTTPListenAddr, httpInsertHandler)
|
||||
}
|
||||
|
||||
promscrape.Init(remotewrite.Push)
|
||||
|
@ -159,6 +161,40 @@ func main() {
|
|||
logger.Infof("successfully stopped vmagent in %.3f seconds", time.Since(startTime).Seconds())
|
||||
}
|
||||
|
||||
func getOpenTSDBHTTPInsertHandler() func(req *http.Request) error {
|
||||
if !remotewrite.MultitenancyEnabled() {
|
||||
return func(req *http.Request) error {
|
||||
path := strings.Replace(req.URL.Path, "//", "/", -1)
|
||||
if path != "/api/put" {
|
||||
return fmt.Errorf("unsupported path requested: %q; expecting '/api/put'", path)
|
||||
}
|
||||
return opentsdbhttp.InsertHandler(nil, req)
|
||||
}
|
||||
}
|
||||
return func(req *http.Request) error {
|
||||
path := strings.Replace(req.URL.Path, "//", "/", -1)
|
||||
at, err := getAuthTokenFromPath(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain auth token from path %q: %w", path, err)
|
||||
}
|
||||
return opentsdbhttp.InsertHandler(at, req)
|
||||
}
|
||||
}
|
||||
|
||||
func getAuthTokenFromPath(path string) (*auth.Token, error) {
|
||||
p, err := httpserver.ParsePath(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse multitenant path: %w", err)
|
||||
}
|
||||
if p.Prefix != "insert" {
|
||||
return nil, fmt.Errorf(`unsupported multitenant prefix: %q; expected "insert"`, p.Prefix)
|
||||
}
|
||||
if p.Suffix != "opentsdb/api/put" {
|
||||
return nil, fmt.Errorf("unsupported path requested: %q; expecting 'opentsdb/api/put'", p.Suffix)
|
||||
}
|
||||
return auth.NewToken(p.AuthToken)
|
||||
}
|
||||
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
if r.URL.Path == "/" {
|
||||
if r.Method != "GET" {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp"
|
||||
|
@ -19,19 +20,19 @@ var (
|
|||
|
||||
// InsertHandler processes HTTP OpenTSDB put requests.
|
||||
// See http://opentsdb.net/docs/build/html/api_http/put.html
|
||||
func InsertHandler(req *http.Request) error {
|
||||
func InsertHandler(at *auth.Token, req *http.Request) error {
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeconcurrencylimiter.Do(func() error {
|
||||
return parser.ParseStream(req, func(rows []parser.Row) error {
|
||||
return insertRows(rows, extraLabels)
|
||||
return insertRows(at, rows, extraLabels)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
||||
func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
|
@ -65,7 +66,7 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
|||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
remotewrite.Push(nil, &ctx.WriteRequest)
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return nil
|
||||
|
|
|
@ -630,6 +630,35 @@ Use the official [Grafana dashboard](https://grafana.com/grafana/dashboards/1495
|
|||
If you have suggestions for improvements or have found a bug - please open an issue on github or add
|
||||
a review to the dashboard.
|
||||
|
||||
## Profiling
|
||||
|
||||
`vmalert` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
||||
* Memory profile. It can be collected with the following command (replace `0.0.0.0` with hostname if needed):
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://0.0.0.0:8880/debug/pprof/heap > mem.pprof
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
* CPU profile. It can be collected with the following command (replace `0.0.0.0` with hostname if needed):
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://0.0.0.0:8880/debug/pprof/profile > cpu.pprof
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Flags
|
||||
|
|
|
@ -217,6 +217,7 @@ curl http://0.0.0.0:8427/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Advanced usage
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ func (ip *influxProcessor) run(silent, verbose bool) error {
|
|||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer barpool.Stop()
|
||||
|
||||
seriesCh := make(chan *influx.Series)
|
||||
errCh := make(chan error)
|
||||
|
@ -96,7 +97,7 @@ func (ip *influxProcessor) run(silent, verbose bool) error {
|
|||
for err := range errCh {
|
||||
return fmt.Errorf("import process failed: %s", err)
|
||||
}
|
||||
barpool.Stop()
|
||||
|
||||
log.Println("Import finished!")
|
||||
log.Print(ip.im.Stats())
|
||||
return nil
|
||||
|
|
|
@ -82,6 +82,9 @@ func (op *otsdbProcessor) run(silent, verbose bool) error {
|
|||
errCh := make(chan error)
|
||||
// we're going to make serieslist * queryRanges queries, so we should represent that in the progress bar
|
||||
bar := pb.StartNew(len(serieslist) * queryRanges)
|
||||
defer func(bar *pb.ProgressBar) {
|
||||
bar.Finish()
|
||||
}(bar)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(op.otsdbcc)
|
||||
for i := 0; i < op.otsdbcc; i++ {
|
||||
|
|
|
@ -43,6 +43,7 @@ func (pp *prometheusProcessor) run(silent, verbose bool) error {
|
|||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer barpool.Stop()
|
||||
|
||||
blockReadersCh := make(chan tsdb.BlockReader)
|
||||
errCh := make(chan error, pp.cc)
|
||||
|
@ -89,7 +90,7 @@ func (pp *prometheusProcessor) run(silent, verbose bool) error {
|
|||
for err := range errCh {
|
||||
return fmt.Errorf("import process failed: %s", err)
|
||||
}
|
||||
barpool.Stop()
|
||||
|
||||
log.Println("Import finished!")
|
||||
log.Print(pp.im.Stats())
|
||||
return nil
|
||||
|
|
|
@ -89,6 +89,7 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
|
|||
log.Printf("error start process bars pool: %s", err)
|
||||
return err
|
||||
}
|
||||
defer barpool.Stop()
|
||||
|
||||
w := io.Writer(pw)
|
||||
if p.rateLimit > 0 {
|
||||
|
@ -106,7 +107,6 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
|
|||
}
|
||||
<-sync
|
||||
|
||||
barpool.Stop()
|
||||
log.Println("Import finished!")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -44,11 +44,14 @@ var (
|
|||
maxStepForPointsAdjustment = flag.Duration("search.maxStepForPointsAdjustment", time.Minute, "The maximum step when /api/v1/query_range handler adjusts "+
|
||||
"points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data")
|
||||
|
||||
maxUniqueTimeseries = flag.Int("search.maxUniqueTimeseries", 300e3, "The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage")
|
||||
maxFederateSeries = flag.Int("search.maxFederateSeries", 1e6, "The maximum number of time series, which can be returned from /federate. This option allows limiting memory usage")
|
||||
maxExportSeries = flag.Int("search.maxExportSeries", 10e6, "The maximum number of time series, which can be returned from /api/v1/export* APIs. This option allows limiting memory usage")
|
||||
maxTSDBStatusSeries = flag.Int("search.maxTSDBStatusSeries", 10e6, "The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage")
|
||||
maxSeriesLimit = flag.Int("search.maxSeries", 30e3, "The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage")
|
||||
maxUniqueTimeseries = flag.Int("search.maxUniqueTimeseries", 300e3, "The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage")
|
||||
maxFederateSeries = flag.Int("search.maxFederateSeries", 1e6, "The maximum number of time series, which can be returned from /federate. This option allows limiting memory usage")
|
||||
maxExportSeries = flag.Int("search.maxExportSeries", 10e6, "The maximum number of time series, which can be returned from /api/v1/export* APIs. This option allows limiting memory usage")
|
||||
maxTSDBStatusSeries = flag.Int("search.maxTSDBStatusSeries", 10e6, "The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage")
|
||||
maxSeriesLimit = flag.Int("search.maxSeries", 30e3, "The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage")
|
||||
maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 30e3, "The maximum points per a single timeseries returned from /api/v1/query_range. "+
|
||||
"This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points "+
|
||||
"returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph")
|
||||
)
|
||||
|
||||
// Default step used if not set.
|
||||
|
@ -613,7 +616,12 @@ func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxSeriesLimit)
|
||||
|
||||
minLimit := *maxSeriesLimit
|
||||
if limit > 0 && limit < *maxSeriesLimit {
|
||||
minLimit = limit
|
||||
}
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, minLimit)
|
||||
metricNames, err := netstorage.SearchMetricNames(qt, sq, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
||||
|
@ -733,6 +741,7 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWr
|
|||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
MaxPointsPerSeries: *maxPointsPerTimeseries,
|
||||
MaxSeries: *maxUniqueTimeseries,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
|
@ -818,7 +827,7 @@ func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
|||
if start > end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
if err := promql.ValidateMaxPointsPerTimeseries(start, end, step); err != nil {
|
||||
if err := promql.ValidateMaxPointsPerSeries(start, end, step, *maxPointsPerTimeseries); err != nil {
|
||||
return err
|
||||
}
|
||||
if mayCache {
|
||||
|
@ -829,6 +838,7 @@ func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
|||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
MaxPointsPerSeries: *maxPointsPerTimeseries,
|
||||
MaxSeries: *maxUniqueTimeseries,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
|
|
|
@ -24,10 +24,9 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
disableCache = flag.Bool("search.disableCache", false, "Whether to disable response caching. This may be useful during data backfilling")
|
||||
maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 30e3, "The maximum points per a single timeseries returned from /api/v1/query_range. "+
|
||||
"This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points "+
|
||||
"returned to graphing UI such as Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph")
|
||||
disableCache = flag.Bool("search.disableCache", false, "Whether to disable response caching. This may be useful during data backfilling")
|
||||
maxPointsSubqueryPerTimeseries = flag.Int("search.maxPointsSubqueryPerTimeseries", 100e3, "The maximum number of points per series, which can be generated by subquery. "+
|
||||
"See https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3")
|
||||
noStaleMarkers = flag.Bool("search.noStaleMarkers", false, "Set this flag to true if the database doesn't contain Prometheus stale markers, so there is no need in spending additional CPU time on its handling. Staleness markers may exist only in data obtained from Prometheus scrape targets")
|
||||
)
|
||||
|
||||
|
@ -36,15 +35,15 @@ var (
|
|||
// big time ranges.
|
||||
const minTimeseriesPointsForTimeRounding = 50
|
||||
|
||||
// ValidateMaxPointsPerTimeseries checks the maximum number of points that
|
||||
// may be returned per each time series.
|
||||
//
|
||||
// The number mustn't exceed -search.maxPointsPerTimeseries.
|
||||
func ValidateMaxPointsPerTimeseries(start, end, step int64) error {
|
||||
// ValidateMaxPointsPerSeries validates that the number of points for the given start, end and step do not exceed maxPoints.
|
||||
func ValidateMaxPointsPerSeries(start, end, step int64, maxPoints int) error {
|
||||
if step == 0 {
|
||||
return fmt.Errorf("step can't be equal to zero")
|
||||
}
|
||||
points := (end-start)/step + 1
|
||||
if uint64(points) > uint64(*maxPointsPerTimeseries) {
|
||||
return fmt.Errorf(`too many points for the given step=%d, start=%d and end=%d: %d; cannot exceed -search.maxPointsPerTimeseries=%d`,
|
||||
step, start, end, uint64(points), *maxPointsPerTimeseries)
|
||||
if points > int64(maxPoints) {
|
||||
return fmt.Errorf("too many points for the given start=%d, end=%d and step=%d: %d; the maximum number of points is %d (see -search.maxPoints* command-line flags)",
|
||||
start, end, step, points, maxPoints)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -99,6 +98,9 @@ type EvalConfig struct {
|
|||
// Zero means 'no limit'
|
||||
MaxSeries int
|
||||
|
||||
// MaxPointsPerSeries is the limit on the number of points, which can be generated per each returned time series.
|
||||
MaxPointsPerSeries int
|
||||
|
||||
// QuotedRemoteAddr contains quoted remote address.
|
||||
QuotedRemoteAddr string
|
||||
|
||||
|
@ -127,6 +129,7 @@ func copyEvalConfig(src *EvalConfig) *EvalConfig {
|
|||
ec.End = src.End
|
||||
ec.Step = src.Step
|
||||
ec.MaxSeries = src.MaxSeries
|
||||
ec.MaxPointsPerSeries = src.MaxPointsPerSeries
|
||||
ec.Deadline = src.Deadline
|
||||
ec.MayCache = src.MayCache
|
||||
ec.LookbackDelta = src.LookbackDelta
|
||||
|
@ -174,10 +177,10 @@ func (ec *EvalConfig) getSharedTimestamps() []int64 {
|
|||
}
|
||||
|
||||
func (ec *EvalConfig) timestampsInit() {
|
||||
ec.timestamps = getTimestamps(ec.Start, ec.End, ec.Step)
|
||||
ec.timestamps = getTimestamps(ec.Start, ec.End, ec.Step, ec.MaxPointsPerSeries)
|
||||
}
|
||||
|
||||
func getTimestamps(start, end, step int64) []int64 {
|
||||
func getTimestamps(start, end, step int64, maxPointsPerSeries int) []int64 {
|
||||
// Sanity checks.
|
||||
if step <= 0 {
|
||||
logger.Panicf("BUG: Step must be bigger than 0; got %d", step)
|
||||
|
@ -185,7 +188,7 @@ func getTimestamps(start, end, step int64) []int64 {
|
|||
if start > end {
|
||||
logger.Panicf("BUG: Start cannot exceed End; got %d vs %d", start, end)
|
||||
}
|
||||
if err := ValidateMaxPointsPerTimeseries(start, end, step); err != nil {
|
||||
if err := ValidateMaxPointsPerSeries(start, end, step, maxPointsPerSeries); err != nil {
|
||||
logger.Panicf("BUG: %s; this must be validated before the call to getTimestamps", err)
|
||||
}
|
||||
|
||||
|
@ -309,7 +312,9 @@ func evalTransformFunc(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.Fun
|
|||
}
|
||||
rv, err := tf(tfa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
||||
return nil, &UserReadableError{
|
||||
Err: fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err),
|
||||
}
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
@ -806,7 +811,8 @@ func evalRollupFuncWithSubquery(qt *querytracer.Tracer, ec *EvalConfig, funcName
|
|||
ecSQ.Start -= window + maxSilenceInterval + step
|
||||
ecSQ.End += step
|
||||
ecSQ.Step = step
|
||||
if err := ValidateMaxPointsPerTimeseries(ecSQ.Start, ecSQ.End, ecSQ.Step); err != nil {
|
||||
ecSQ.MaxPointsPerSeries = *maxPointsSubqueryPerTimeseries
|
||||
if err := ValidateMaxPointsPerSeries(ecSQ.Start, ecSQ.End, ecSQ.Step, ecSQ.MaxPointsPerSeries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// unconditionally align start and end args to step for subquery as Prometheus does.
|
||||
|
@ -818,8 +824,8 @@ func evalRollupFuncWithSubquery(qt *querytracer.Tracer, ec *EvalConfig, funcName
|
|||
if len(tssSQ) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
sharedTimestamps := getTimestamps(ec.Start, ec.End, ec.Step)
|
||||
preFunc, rcs, err := getRollupConfigs(funcName, rf, expr, ec.Start, ec.End, ec.Step, window, ec.LookbackDelta, sharedTimestamps)
|
||||
sharedTimestamps := getTimestamps(ec.Start, ec.End, ec.Step, ec.MaxPointsPerSeries)
|
||||
preFunc, rcs, err := getRollupConfigs(funcName, rf, expr, ec.Start, ec.End, ec.Step, ec.MaxPointsPerSeries, window, ec.LookbackDelta, sharedTimestamps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -956,8 +962,8 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
|
|||
|
||||
// Obtain rollup configs before fetching data from db,
|
||||
// so type errors can be caught earlier.
|
||||
sharedTimestamps := getTimestamps(start, ec.End, ec.Step)
|
||||
preFunc, rcs, err := getRollupConfigs(funcName, rf, expr, start, ec.End, ec.Step, window, ec.LookbackDelta, sharedTimestamps)
|
||||
sharedTimestamps := getTimestamps(start, ec.End, ec.Step, ec.MaxPointsPerSeries)
|
||||
preFunc, rcs, err := getRollupConfigs(funcName, rf, expr, start, ec.End, ec.Step, ec.MaxPointsPerSeries, window, ec.LookbackDelta, sharedTimestamps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -48,3 +48,31 @@ m2{b="bar"} 1`, `{}`)
|
|||
f(`m1{a="foo",b="bar"} 1
|
||||
m2{b="bar",c="x"} 1`, `{b="bar"}`)
|
||||
}
|
||||
|
||||
func TestValidateMaxPointsPerSeriesFailure(t *testing.T) {
|
||||
f := func(start, end, step int64, maxPoints int) {
|
||||
t.Helper()
|
||||
if err := ValidateMaxPointsPerSeries(start, end, step, maxPoints); err == nil {
|
||||
t.Fatalf("expecint non-nil error for ValidateMaxPointsPerSeries(start=%d, end=%d, step=%d, maxPoints=%d)", start, end, step, maxPoints)
|
||||
}
|
||||
}
|
||||
// zero step
|
||||
f(0, 0, 0, 0)
|
||||
f(0, 0, 0, 1)
|
||||
// the maxPoints is smaller than the generated points
|
||||
f(0, 1, 1, 0)
|
||||
f(0, 1, 1, 1)
|
||||
f(1659962171908, 1659966077742, 5000, 700)
|
||||
}
|
||||
|
||||
func TestValidateMaxPointsPerSeriesSuccess(t *testing.T) {
|
||||
f := func(start, end, step int64, maxPoints int) {
|
||||
t.Helper()
|
||||
if err := ValidateMaxPointsPerSeries(start, end, step, maxPoints); err != nil {
|
||||
t.Fatalf("unexpected error in ValidateMaxPointsPerSeries(start=%d, end=%d, step=%d, maxPoints=%d): %s", start, end, step, maxPoints, err)
|
||||
}
|
||||
}
|
||||
f(1, 1, 1, 2)
|
||||
f(1659962171908, 1659966077742, 5000, 800)
|
||||
f(1659962150000, 1659966070000, 10000, 393)
|
||||
}
|
||||
|
|
|
@ -58,12 +58,13 @@ func TestExecSuccess(t *testing.T) {
|
|||
f := func(q string, resultExpected []netstorage.Result) {
|
||||
t.Helper()
|
||||
ec := &EvalConfig{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
MaxSeries: 1000,
|
||||
Deadline: searchutils.NewDeadline(time.Now(), time.Minute, ""),
|
||||
RoundDigits: 100,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
MaxSeries: 1000,
|
||||
Deadline: searchutils.NewDeadline(time.Now(), time.Minute, ""),
|
||||
RoundDigits: 100,
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
result, err := Exec(nil, ec, q, false)
|
||||
|
@ -7743,12 +7744,13 @@ func TestExecError(t *testing.T) {
|
|||
f := func(q string) {
|
||||
t.Helper()
|
||||
ec := &EvalConfig{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Step: 100,
|
||||
MaxSeries: 1000,
|
||||
Deadline: searchutils.NewDeadline(time.Now(), time.Minute, ""),
|
||||
RoundDigits: 100,
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Step: 100,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
MaxSeries: 1000,
|
||||
Deadline: searchutils.NewDeadline(time.Now(), time.Minute, ""),
|
||||
RoundDigits: 100,
|
||||
}
|
||||
for i := 0; i < 4; i++ {
|
||||
rv, err := Exec(nil, ec, q, false)
|
||||
|
@ -7930,6 +7932,7 @@ func TestExecError(t *testing.T) {
|
|||
f(`limit_offet(1, (alias(1,"foo"),alias(2,"bar")), 10)`)
|
||||
f(`round(1, 1 or label_set(2, "xx", "foo"))`)
|
||||
f(`histogram_quantile(1 or label_set(2, "xx", "foo"), 1)`)
|
||||
f(`histogram_quantiles("foo", 1 or label_set(2, "xxx", "foo"), 2)`)
|
||||
f(`label_set(1, 2, 3)`)
|
||||
f(`label_set(1, "foo", (label_set(1, "foo", bar") or label_set(2, "xxx", "yy")))`)
|
||||
f(`label_set(1, "foo", 3)`)
|
||||
|
|
|
@ -248,7 +248,7 @@ func getRollupAggrFuncNames(expr metricsql.Expr) ([]string, error) {
|
|||
return aggrFuncNames, nil
|
||||
}
|
||||
|
||||
func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, end, step, window int64, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, end, step int64, maxPointsPerSeries int, window, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func(values []float64, timestamps []int64), []*rollupConfig, error) {
|
||||
preFunc := func(values []float64, timestamps []int64) {}
|
||||
if rollupFuncsRemoveCounterResets[name] {
|
||||
|
@ -258,12 +258,15 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en
|
|||
}
|
||||
newRollupConfig := func(rf rollupFunc, tagValue string) *rollupConfig {
|
||||
return &rollupConfig{
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
|
||||
MaxPointsPerSeries: maxPointsPerSeries,
|
||||
|
||||
MayAdjustWindow: rollupFuncsCanAdjustWindow[name],
|
||||
LookbackDelta: lookbackDelta,
|
||||
Timestamps: sharedTimestamps,
|
||||
|
@ -400,6 +403,9 @@ type rollupConfig struct {
|
|||
Step int64
|
||||
Window int64
|
||||
|
||||
// The maximum number of points, which can be generated per each series.
|
||||
MaxPointsPerSeries int
|
||||
|
||||
// Whether window may be adjusted to 2 x interval between data points.
|
||||
// This is needed for functions which have dt in the denominator
|
||||
// such as rate, deriv, etc.
|
||||
|
@ -416,6 +422,10 @@ type rollupConfig struct {
|
|||
isDefaultRollup bool
|
||||
}
|
||||
|
||||
func (rc *rollupConfig) getTimestamps() []int64 {
|
||||
return getTimestamps(rc.Start, rc.End, rc.Step, rc.MaxPointsPerSeries)
|
||||
}
|
||||
|
||||
func (rc *rollupConfig) String() string {
|
||||
start := storage.TimestampToHumanReadableFormat(rc.Start)
|
||||
end := storage.TimestampToHumanReadableFormat(rc.End)
|
||||
|
@ -513,7 +523,7 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
|||
if rc.Window < 0 {
|
||||
logger.Panicf("BUG: Window must be non-negative; got %d", rc.Window)
|
||||
}
|
||||
if err := ValidateMaxPointsPerTimeseries(rc.Start, rc.End, rc.Step); err != nil {
|
||||
if err := ValidateMaxPointsPerSeries(rc.Start, rc.End, rc.Step, rc.MaxPointsPerSeries); err != nil {
|
||||
logger.Panicf("BUG: %s; this must be validated before the call to rollupConfig.Do", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -33,9 +33,10 @@ func TestRollupResultCache(t *testing.T) {
|
|||
ResetRollupResultCache()
|
||||
window := int64(456)
|
||||
ec := &EvalConfig{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Step: 200,
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Step: 200,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
|
||||
MayCache: true,
|
||||
}
|
||||
|
@ -291,9 +292,10 @@ func TestRollupResultCache(t *testing.T) {
|
|||
|
||||
func TestMergeTimeseries(t *testing.T) {
|
||||
ec := &EvalConfig{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Step: 200,
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Step: 200,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
bStart := int64(1400)
|
||||
|
||||
|
|
|
@ -578,13 +578,14 @@ func TestRollupNewRollupFuncError(t *testing.T) {
|
|||
func TestRollupNoWindowNoPoints(t *testing.T) {
|
||||
t.Run("beforeStart", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 4,
|
||||
Step: 1,
|
||||
Window: 0,
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 4,
|
||||
Step: 1,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
|
@ -595,13 +596,14 @@ func TestRollupNoWindowNoPoints(t *testing.T) {
|
|||
})
|
||||
t.Run("afterEnd", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDelta,
|
||||
Start: 120,
|
||||
End: 148,
|
||||
Step: 4,
|
||||
Window: 0,
|
||||
Func: rollupDelta,
|
||||
Start: 120,
|
||||
End: 148,
|
||||
Step: 4,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -615,13 +617,14 @@ func TestRollupNoWindowNoPoints(t *testing.T) {
|
|||
func TestRollupWindowNoPoints(t *testing.T) {
|
||||
t.Run("beforeStart", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 4,
|
||||
Step: 1,
|
||||
Window: 3,
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 4,
|
||||
Step: 1,
|
||||
Window: 3,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
|
@ -632,13 +635,14 @@ func TestRollupWindowNoPoints(t *testing.T) {
|
|||
})
|
||||
t.Run("afterEnd", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 161,
|
||||
End: 191,
|
||||
Step: 10,
|
||||
Window: 3,
|
||||
Func: rollupFirst,
|
||||
Start: 161,
|
||||
End: 191,
|
||||
Step: 10,
|
||||
Window: 3,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
|
@ -652,13 +656,14 @@ func TestRollupWindowNoPoints(t *testing.T) {
|
|||
func TestRollupNoWindowPartialPoints(t *testing.T) {
|
||||
t.Run("beforeStart", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 25,
|
||||
Step: 5,
|
||||
Window: 0,
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 25,
|
||||
Step: 5,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -669,13 +674,14 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
|||
})
|
||||
t.Run("afterEnd", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 100,
|
||||
End: 160,
|
||||
Step: 20,
|
||||
Window: 0,
|
||||
Func: rollupFirst,
|
||||
Start: 100,
|
||||
End: 160,
|
||||
Step: 20,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -686,13 +692,14 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
|||
})
|
||||
t.Run("middle", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: -50,
|
||||
End: 150,
|
||||
Step: 50,
|
||||
Window: 0,
|
||||
Func: rollupFirst,
|
||||
Start: -50,
|
||||
End: 150,
|
||||
Step: 50,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -706,13 +713,14 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
|||
func TestRollupWindowPartialPoints(t *testing.T) {
|
||||
t.Run("beforeStart", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLast,
|
||||
Start: 0,
|
||||
End: 20,
|
||||
Step: 5,
|
||||
Window: 8,
|
||||
Func: rollupLast,
|
||||
Start: 0,
|
||||
End: 20,
|
||||
Step: 5,
|
||||
Window: 8,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -723,13 +731,14 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
|||
})
|
||||
t.Run("afterEnd", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLast,
|
||||
Start: 100,
|
||||
End: 160,
|
||||
Step: 20,
|
||||
Window: 18,
|
||||
Func: rollupLast,
|
||||
Start: 100,
|
||||
End: 160,
|
||||
Step: 20,
|
||||
Window: 18,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -740,13 +749,14 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
|||
})
|
||||
t.Run("middle", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLast,
|
||||
Start: 0,
|
||||
End: 150,
|
||||
Step: 50,
|
||||
Window: 19,
|
||||
Func: rollupLast,
|
||||
Start: 0,
|
||||
End: 150,
|
||||
Step: 50,
|
||||
Window: 19,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -760,13 +770,14 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
|||
func TestRollupFuncsLookbackDelta(t *testing.T) {
|
||||
t.Run("1", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 80,
|
||||
End: 140,
|
||||
Step: 10,
|
||||
LookbackDelta: 1,
|
||||
Func: rollupFirst,
|
||||
Start: 80,
|
||||
End: 140,
|
||||
Step: 10,
|
||||
LookbackDelta: 1,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -777,13 +788,14 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
|||
})
|
||||
t.Run("7", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 80,
|
||||
End: 140,
|
||||
Step: 10,
|
||||
LookbackDelta: 7,
|
||||
Func: rollupFirst,
|
||||
Start: 80,
|
||||
End: 140,
|
||||
Step: 10,
|
||||
LookbackDelta: 7,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -794,13 +806,14 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
|||
})
|
||||
t.Run("0", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 80,
|
||||
End: 140,
|
||||
Step: 10,
|
||||
LookbackDelta: 0,
|
||||
Func: rollupFirst,
|
||||
Start: 80,
|
||||
End: 140,
|
||||
Step: 10,
|
||||
LookbackDelta: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -814,13 +827,14 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
|||
func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
t.Run("first", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -831,13 +845,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("count", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupCount,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupCount,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -848,13 +863,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("min", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupMin,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupMin,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -865,13 +881,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("max", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupMax,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupMax,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -882,13 +899,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("sum", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupSum,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupSum,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -899,13 +917,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("delta", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDelta,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupDelta,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -916,13 +935,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("delta_prometheus", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDeltaPrometheus,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupDeltaPrometheus,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -933,13 +953,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("idelta", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupIdelta,
|
||||
Start: 10,
|
||||
End: 130,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupIdelta,
|
||||
Start: 10,
|
||||
End: 130,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -950,13 +971,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("lag", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLag,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupLag,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -967,13 +989,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("lifetime_1", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLifetime,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupLifetime,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -984,13 +1007,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("lifetime_2", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLifetime,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 200,
|
||||
Func: rollupLifetime,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 200,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1001,13 +1025,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("scrape_interval_1", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupScrapeInterval,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupScrapeInterval,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1018,13 +1043,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("scrape_interval_2", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupScrapeInterval,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
Func: rollupScrapeInterval,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1035,13 +1061,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("changes", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupChanges,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupChanges,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1052,13 +1079,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("changes_prometheus", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupChangesPrometheus,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupChangesPrometheus,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1069,13 +1097,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("changes_small_window", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupChanges,
|
||||
Start: 0,
|
||||
End: 45,
|
||||
Step: 9,
|
||||
Window: 9,
|
||||
Func: rollupChanges,
|
||||
Start: 0,
|
||||
End: 45,
|
||||
Step: 9,
|
||||
Window: 9,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1086,13 +1115,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("resets", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupResets,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupResets,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1103,13 +1133,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("avg", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupAvg,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupAvg,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1120,13 +1151,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("deriv", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDerivSlow,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupDerivSlow,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1137,13 +1169,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("deriv_fast", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDerivFast,
|
||||
Start: 0,
|
||||
End: 20,
|
||||
Step: 4,
|
||||
Window: 0,
|
||||
Func: rollupDerivFast,
|
||||
Start: 0,
|
||||
End: 20,
|
||||
Step: 4,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1154,13 +1187,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("ideriv", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupIderiv,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupIderiv,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1171,13 +1205,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("stddev", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupStddev,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupStddev,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1188,13 +1223,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("integrate", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupIntegrate,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupIntegrate,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1205,13 +1241,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("distinct_over_time_1", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDistinct,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
Func: rollupDistinct,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1222,13 +1259,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("distinct_over_time_2", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDistinct,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
Func: rollupDistinct,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1239,13 +1277,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("mode_over_time", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupModeOverTime,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
Func: rollupModeOverTime,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1256,13 +1295,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("rate_over_sum", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupRateOverSum,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
Func: rollupRateOverSum,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1273,13 +1313,14 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
})
|
||||
t.Run("zscore_over_time", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupZScoreOverTime,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
Func: rollupZScoreOverTime,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 80,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
|
@ -1293,12 +1334,13 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
|||
func TestRollupBigNumberOfValues(t *testing.T) {
|
||||
const srcValuesCount = 1e4
|
||||
rc := rollupConfig{
|
||||
Func: rollupDefault,
|
||||
End: srcValuesCount,
|
||||
Step: srcValuesCount / 5,
|
||||
Window: srcValuesCount / 4,
|
||||
Func: rollupDefault,
|
||||
End: srcValuesCount,
|
||||
Step: srcValuesCount / 5,
|
||||
Window: srcValuesCount / 4,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
srcValues := make([]float64, srcValuesCount)
|
||||
srcTimestamps := make([]int64, srcValuesCount)
|
||||
for i := 0; i < srcValuesCount; i++ {
|
||||
|
|
|
@ -825,8 +825,12 @@ func transformHistogramQuantiles(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
tssOrig := args[len(args)-1]
|
||||
// Calculate quantile individually per each phi.
|
||||
var rvs []*timeseries
|
||||
for _, phiArg := range phiArgs {
|
||||
phiStr := fmt.Sprintf("%g", phiArg[0].Values[0])
|
||||
for i, phiArg := range phiArgs {
|
||||
phis, err := getScalar(phiArg, i)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse phi: %w", err)
|
||||
}
|
||||
phiStr := fmt.Sprintf("%g", phis[0])
|
||||
tss := copyTimeseries(tssOrig)
|
||||
tfaTmp := &transformFuncArg{
|
||||
ec: tfa.ec,
|
||||
|
|
|
@ -50,9 +50,11 @@ var (
|
|||
"When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. "+
|
||||
"This may be useful when multiple data sources with distinct retentions are hidden behind query-tee")
|
||||
maxHourlySeries = flag.Int("storage.maxHourlySeries", 0, "The maximum number of unique series can be added to the storage during the last hour. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See also -storage.maxDailySeries")
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/#cardinality-limiter . "+
|
||||
"See also -storage.maxDailySeries")
|
||||
maxDailySeries = flag.Int("storage.maxDailySeries", 0, "The maximum number of unique series can be added to the storage during the last 24 hours. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries")
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/#cardinality-limiter . "+
|
||||
"See also -storage.maxHourlySeries")
|
||||
|
||||
minFreeDiskSpaceBytes = flagutil.NewBytes("storage.minFreeDiskSpaceBytes", 10e6, "The minimum free disk space at -storageDataPath after which the storage stops accepting new data")
|
||||
|
||||
|
@ -630,12 +632,29 @@ func registerStorageMetrics(strg *storage.Storage) {
|
|||
return float64(m().SlowMetricNameLoads)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_hourly_series_limit_rows_dropped_total`, func() float64 {
|
||||
return float64(m().HourlySeriesLimitRowsDropped)
|
||||
})
|
||||
metrics.NewGauge(`vm_daily_series_limit_rows_dropped_total`, func() float64 {
|
||||
return float64(m().DailySeriesLimitRowsDropped)
|
||||
})
|
||||
if *maxHourlySeries > 0 {
|
||||
metrics.NewGauge(`vm_hourly_series_limit_current_series`, func() float64 {
|
||||
return float64(m().HourlySeriesLimitCurrentSeries)
|
||||
})
|
||||
metrics.NewGauge(`vm_hourly_series_limit_max_series`, func() float64 {
|
||||
return float64(m().HourlySeriesLimitMaxSeries)
|
||||
})
|
||||
metrics.NewGauge(`vm_hourly_series_limit_rows_dropped_total`, func() float64 {
|
||||
return float64(m().HourlySeriesLimitRowsDropped)
|
||||
})
|
||||
}
|
||||
|
||||
if *maxDailySeries > 0 {
|
||||
metrics.NewGauge(`vm_daily_series_limit_current_series`, func() float64 {
|
||||
return float64(m().DailySeriesLimitCurrentSeries)
|
||||
})
|
||||
metrics.NewGauge(`vm_daily_series_limit_max_series`, func() float64 {
|
||||
return float64(m().DailySeriesLimitMaxSeries)
|
||||
})
|
||||
metrics.NewGauge(`vm_daily_series_limit_rows_dropped_total`, func() float64 {
|
||||
return float64(m().DailySeriesLimitRowsDropped)
|
||||
})
|
||||
}
|
||||
|
||||
metrics.NewGauge(`vm_timestamps_blocks_merged_total`, func() float64 {
|
||||
return float64(m().TimestampsBlocksMerged)
|
||||
|
|
|
@ -6,7 +6,7 @@ COPY web/ /build/
|
|||
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.16.1
|
||||
FROM alpine:3.16.2
|
||||
USER root
|
||||
|
||||
COPY --from=build-web-stage /build/web-amd64 /app/web
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
|
||||
DOCKER_NAMESPACE := victoriametrics
|
||||
|
||||
ROOT_IMAGE ?= alpine:3.16.1
|
||||
CERTS_IMAGE := alpine:3.16.1
|
||||
ROOT_IMAGE ?= alpine:3.16.2
|
||||
CERTS_IMAGE := alpine:3.16.2
|
||||
GO_BUILDER_IMAGE := golang:1.19.0-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||
|
|
|
@ -4,21 +4,16 @@
|
|||
|
||||
1. To build the snapshot in DigitalOcean account you will need API Token and [packer](https://learn.hashicorp.com/tutorials/packer/get-started-install-cli).
|
||||
2. API Token can be generated on [https://cloud.digitalocean.com/account/api/tokens](https://cloud.digitalocean.com/account/api/tokens) or use already generated from OnePassword.
|
||||
3. Set variable `DIGITALOCEAN_API_TOKEN` for environment:
|
||||
3. Choose prefered version of VictoriaMetrics on [Github releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page.
|
||||
4. Set variables `DIGITALOCEAN_API_TOKEN` with `VM_VERSION` for `packer` environment and run make from example below:
|
||||
|
||||
```console
|
||||
export DIGITALOCEAN_API_TOKEN="your_token_here"
|
||||
```
|
||||
|
||||
or set it by with make:
|
||||
|
||||
```console
|
||||
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="your_token_here"
|
||||
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="your_token_here" VM_VERSION="prefered_release_version"
|
||||
```
|
||||
|
||||
## Release guide for DigitalOcean Kubernetes 1-Click App
|
||||
|
||||
## Submit a pull request
|
||||
### Submit a pull request
|
||||
|
||||
1. Fork [https://github.com/digitalocean/marketplace-kubernetes](https://github.com/digitalocean/marketplace-kubernetes).
|
||||
2. Apply changes to vmagent.yaml and vmcluster.yaml in https://github.com/digitalocean/marketplace-kubernetes/tree/master/stacks/victoria-metrics-cluster/yaml .
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# Wait for cloud-init
|
||||
cloud-init status --wait
|
||||
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/${VM_VER}/victoria-metrics-amd64-${VM_VER}.tar.gz -O /tmp/victoria-metrics.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/${VM_VERSION}/victoria-metrics-linux-amd64-${VM_VERSION}.tar.gz -O /tmp/victoria-metrics.tar.gz
|
||||
tar xvf /tmp/victoria-metrics.tar.gz -C /usr/bin
|
||||
chmod +x /usr/bin/victoria-metrics-prod
|
||||
chown root:root /usr/bin/victoria-metrics-prod
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
|
||||
{
|
||||
"variables": {
|
||||
"do_api_token": "{{env `DIGITALOCEAN_API_TOKEN`}}",
|
||||
"image_name": "vm-single-20-04-snapshot-{{timestamp}}",
|
||||
"apt_packages": "curl git wget software-properties-common net-tools",
|
||||
"application_name": "vm-single",
|
||||
"application_version": "{{ env `VM_VERSION` }}"
|
||||
},
|
||||
"sensitive-variables": ["do_api_token"],
|
||||
"builders": [
|
||||
{
|
||||
"type": "digitalocean",
|
||||
"api_token": "{{user `do_api_token`}}",
|
||||
"image": "ubuntu-20-04-x64",
|
||||
"region": "nyc3",
|
||||
"size": "s-1vcpu-1gb",
|
||||
"ssh_username": "root",
|
||||
"snapshot_name": "{{user `image_name`}}"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"inline": [
|
||||
"cloud-init status --wait"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "files/etc/",
|
||||
"destination": "/etc/"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "files/var/",
|
||||
"destination": "/var/"
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"environment_vars": [
|
||||
"DEBIAN_FRONTEND=noninteractive",
|
||||
"LC_ALL=C",
|
||||
"LANG=en_US.UTF-8",
|
||||
"LC_CTYPE=en_US.UTF-8"
|
||||
],
|
||||
"inline": [
|
||||
"apt -qqy update",
|
||||
"apt -qqy -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' install {{user `apt_packages`}}",
|
||||
"apt-get -qqy clean"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"environment_vars": [
|
||||
"application_name={{user `application_name`}}",
|
||||
"application_version={{user `application_version`}}",
|
||||
"DEBIAN_FRONTEND=noninteractive",
|
||||
"LC_ALL=C",
|
||||
"LANG=en_US.UTF-8",
|
||||
"LC_CTYPE=en_US.UTF-8"
|
||||
],
|
||||
"scripts": [
|
||||
"scripts/01-setup.sh",
|
||||
"scripts/02-firewall.sh",
|
||||
"scripts/04-install-victoriametrics.sh",
|
||||
"scripts/89-cleanup-logs.sh",
|
||||
"scripts/90-cleanup.sh",
|
||||
"scripts/99-img-check.sh"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -58,7 +58,7 @@ build {
|
|||
# Install VictoriaMetrics
|
||||
provisioner "shell" {
|
||||
environment_vars = [
|
||||
"VM_VER=${var.victoriametrics_version}",
|
||||
"VM_VERSION=${var.victoriametrics_version}",
|
||||
"DEBIAN_FRONTEND=noninteractive"
|
||||
]
|
||||
scripts = [
|
||||
|
|
|
@ -20,8 +20,14 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
**Update note 2:** [vmalert](https://docs.victoriametrics.com/vmalert.html) by default points alert source url to `/vmalert/alert?...` aka [web UI](https://docs.victoriametrics.com/vmalert.html#web) instead of `/vmalert/api/v1/alert?...` aka JSON handler. The old behavior can be achieved by setting {% raw %}`-external.alert.source=vmalert/api/v1/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}`{% endraw %} command-line flag.
|
||||
|
||||
* SECURITY: [vmalert](https://docs.victoriametrics.com/vmalert.html): do not expose `-remoteWrite.url`, `-remoteRead.url` and `-datasource.url` command-line flag values in logs and at `http://vmalert:8880/flags` page by default, since they may contain sensitive data such as auth keys. This aligns `vmalert` behaviour with [vmagent](https://docs.victoriametrics.com/vmagent.html), which doesn't expose `-remoteWrite.url` command-line flag value in logs and at `http://vmagent:8429/flags` page by default. Specify `-remoteWrite.showURL`, `-remoteRead.showURL` and `-datasource.showURL` command-line flags for showing values for the corresponding `-*.url` flags in logs. Thanks to @mble for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2965).
|
||||
* SECURITY: upgrade base docker image (alpine) from 3.16.1 to 3.16.2. See [alpine 3.16.2 release notes](https://alpinelinux.org/posts/Alpine-3.13.12-3.14.8-3.15.6-3.16.2-released.html).
|
||||
|
||||
* FEATURE: return shorter error messages to Grafana and to other clients requesting [/api/v1/query](https://docs.victoriametrics.com/keyConcepts.html#instant-query) and [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) endpoints. This should simplify reading these errors by humans. The long error message with full context is still written to logs.
|
||||
* FEATURE: add the ability to fine-tune the number of points, which can be generated per each matching time series during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation. This can be done with the `-search.maxPointsSubqueryPerTimeseries` command-line flag. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2922).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): improve the performance for relabeling rules with commonly used regular expressions in `regex` and `if` fields such as `some_string`, `prefix.*`, `prefix.+`, `foo|bar|baz`, `.*foo.*` and `.+foo.+`.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): reduce CPU usage when discovering big number of [Kubernetes targets](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs) with big number of labels and annotations.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add ability to accept [multitenant](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) data via OpenTSDB `/api/put` protocol at `/insert/<tenantID>/opentsdb/api/put` http endpoint if [multitenant support](https://docs.victoriametrics.com/vmagent.html#multitenancy) is enabled at `vmagent`. Thanks to @chengjianyun for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3015).
|
||||
* FEATURE: [monitoring](https://docs.victoriametrics.com/#monitoring): expose `vm_hourly_series_limit_max_series`, `vm_hourly_series_limit_current_series`, `vm_daily_series_limit_max_series` and `vm_daily_series_limit_current_series` metrics when `-search.maxHourlySeries` or `-search.maxDailySeries` limits are set. This allows alerting when the number of unique series reaches the configured limits. See [these docs](https://docs.victoriametrics.com/#cardinality-limiter) for details.
|
||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): reduce the amounts of logging at `vmstorage` when `vmselect` connects/disconnects to `vmstorage`.
|
||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve performance for heavy queries on systems with many CPU cores.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add ability to use {% raw %}`{{label_name}}`{% endraw %} placeholders in the `replacement` option of relabeling rules. This simplifies constructing label values from multiple existing label values. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling-enhancements) for details.
|
||||
|
@ -33,12 +39,13 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add a legend in the top right corner for shortcut keys. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2813).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `toTime()` template function in the same way as Prometheus 2.38 [does](https://github.com/prometheus/prometheus/pull/10993). See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/template_reference/#numbers).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `$alertID` and `$groupID` template variables. These variables may be used for templating annotations or `-external.alert.source` command-line flag. See the full list of supported variables [here](https://docs.victoriametrics.com/vmalert.html#templating).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `$activeAt` template variable. See more details [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2999). See the full list of supported variables [here](https://docs.victoriametrics.com/vmalert.html#templating). Thanks to @laixintao.
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `$activeAt` template variable. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2999). See the full list of supported variables [here](https://docs.victoriametrics.com/vmalert.html#templating). Thanks to @laixintao for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3000).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): point alert source to [vmalert's UI](https://docs.victoriametrics.com/vmalert.html#web) at `/vmalert/alert?...` instead of JSON handler at `/vmalert/api/v1/alert?...`. This improves user experience. The old behavior can be achieved by setting {% raw %}`-external.alert.source=vmalert/api/v1/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}`{% endraw %} command-line flag.
|
||||
|
||||
* BUGFIX: prevent from excess CPU usage when the storage enters [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
* BUGFIX: improve performance for requests to [/api/v1/labels](https://docs.victoriametrics.com/url-examples.html#apiv1labels) and [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples.html#apiv1labelvalues) when the filter in the `match[]` query arg matches small number of time series. The performance for this case has been reduced in [v1.78.0](https://docs.victoriametrics.com/CHANGELOG.html#v1780). See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1533) issues.
|
||||
* BUGFIX: increase the default limit on the number of concurrent merges for small parts from 8 to 16. This should help resolving potential issues with heavy data ingestion. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2673#issuecomment-1218185978) from @lukepalmer .
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): fix panic when incorrect arg is passed as `phi` into [histogram_quantiles](https://docs.victoriametrics.com/MetricsQL.html#histogram_quantiles) function. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3026).
|
||||
|
||||
## [v1.80.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.80.0)
|
||||
|
||||
|
|
|
@ -193,6 +193,17 @@ or [an alternative dashboard for VictoriaMetrics cluster](https://grafana.com/gr
|
|||
|
||||
It is recommended setting up alerts in [vmalert](https://docs.victoriametrics.com/vmalert.html) or in Prometheus from [this config](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/cluster/deployment/docker/alerts.yml).
|
||||
|
||||
## Cardinality limiter
|
||||
|
||||
`vmstorage` nodes can be configured with limits on the number of unique time series across all the tenants with the following command-line flags:
|
||||
|
||||
- `-storage.maxHourlySeries` is the limit on the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) during the last hour.
|
||||
- `-storage.maxDailySeries` is the limit on the number of unique time series during the day. This limit can be used for limiting daily [time series churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
|
||||
Note that these limits are set and applied individually per each `vmstorage` node in the cluster. So, if the cluster has `N` `vmstorage` nodes, then the cluster-level limits will be `N` times bigger than the per-`vmstorage` limits.
|
||||
|
||||
See more details about cardinality limiter in [these docs](https://docs.victoriametrics.com/#cardinality-limiter).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
See [trobuleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html).
|
||||
|
@ -302,45 +313,47 @@ with new configs.
|
|||
|
||||
There are the following cluster update / upgrade approaches exist:
|
||||
|
||||
* `No downtime` strategy. Gracefully restart every node in the cluster one-by-one with the updated config / upgraded binary.
|
||||
### No downtime strategy
|
||||
|
||||
It is recommended restarting the nodes in the following order:
|
||||
Gracefully restart every node in the cluster one-by-one with the updated config / upgraded binary.
|
||||
|
||||
1. Restart `vmstorage` nodes.
|
||||
2. Restart `vminsert` nodes.
|
||||
3. Restart `vmselect` nodes.
|
||||
It is recommended restarting the nodes in the following order:
|
||||
|
||||
This strategy allows upgrading the cluster without downtime if the following conditions are met:
|
||||
1. Restart `vmstorage` nodes.
|
||||
2. Restart `vminsert` nodes.
|
||||
3. Restart `vmselect` nodes.
|
||||
|
||||
- The cluster has at least a pair of nodes of each type - `vminsert`, `vmselect` and `vmstorage`,
|
||||
so it can continue accept new data and serve incoming requests when a single node is temporary unavailable
|
||||
during its restart. See [cluster availability docs](#cluster-availability) for details.
|
||||
- The cluster has enough compute resources (CPU, RAM, network bandwidth, disk IO) for processing
|
||||
the current workload when a single node of any type (`vminsert`, `vmselect` or `vmstorage`)
|
||||
is temporarily unavailable during its restart.
|
||||
- The updated config / upgraded binary is compatible with the remaining components in the cluster.
|
||||
See the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) for compatibility notes between different releases.
|
||||
This strategy allows upgrading the cluster without downtime if the following conditions are met:
|
||||
|
||||
- The cluster has at least a pair of nodes of each type - `vminsert`, `vmselect` and `vmstorage`,
|
||||
so it can continue accept new data and serve incoming requests when a single node is temporary unavailable
|
||||
during its restart. See [cluster availability docs](#cluster-availability) for details.
|
||||
- The cluster has enough compute resources (CPU, RAM, network bandwidth, disk IO) for processing
|
||||
the current workload when a single node of any type (`vminsert`, `vmselect` or `vmstorage`)
|
||||
is temporarily unavailable during its restart.
|
||||
- The updated config / upgraded binary is compatible with the remaining components in the cluster.
|
||||
See the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) for compatibility notes between different releases.
|
||||
|
||||
If at least a single condition isn't met, then the rolling restart may result in cluster unavailability
|
||||
during the config update / version upgrade. In this case the following strategy is recommended.
|
||||
|
||||
* `Minimum downtime` strategy:
|
||||
### Minimum downtime strategy
|
||||
|
||||
1. Gracefully stop all the `vminsert` and `vmselect` nodes in parallel.
|
||||
2. Gracefully restart all the `vmstorage` nodes in parallel.
|
||||
3. Start all the `vminsert` and `vmselect` nodes in parallel.
|
||||
1. Gracefully stop all the `vminsert` and `vmselect` nodes in parallel.
|
||||
2. Gracefully restart all the `vmstorage` nodes in parallel.
|
||||
3. Start all the `vminsert` and `vmselect` nodes in parallel.
|
||||
|
||||
The cluster is unavailable for data ingestion and querying when performing the steps above.
|
||||
The downtime is minimized by restarting cluster nodes in parallel at every step above.
|
||||
The `minimum downtime` strategy has the following benefits comparing to `no downtime` startegy:
|
||||
The cluster is unavailable for data ingestion and querying when performing the steps above.
|
||||
The downtime is minimized by restarting cluster nodes in parallel at every step above.
|
||||
The `minimum downtime` strategy has the following benefits comparing to `no downtime` startegy:
|
||||
|
||||
- It allows performing config update / version upgrade with minimum disruption
|
||||
when the previous config / version is incompatible with the new config / version.
|
||||
- It allows perorming config update / version upgrade with minimum disruption
|
||||
when the cluster has no enough compute resources (CPU, RAM, disk IO, network bandwidth)
|
||||
for rolling upgrade.
|
||||
- It allows minimizing the duration of config update / version ugprade for clusters with big number of nodes
|
||||
of for clusters with big `vmstorage` nodes, which may take long time for graceful restart.
|
||||
- It allows performing config update / version upgrade with minimum disruption
|
||||
when the previous config / version is incompatible with the new config / version.
|
||||
- It allows perorming config update / version upgrade with minimum disruption
|
||||
when the cluster has no enough compute resources (CPU, RAM, disk IO, network bandwidth)
|
||||
for rolling upgrade.
|
||||
- It allows minimizing the duration of config update / version ugprade for clusters with big number of nodes
|
||||
of for clusters with big `vmstorage` nodes, which may take long time for graceful restart.
|
||||
|
||||
## Cluster availability
|
||||
|
||||
|
@ -429,11 +442,13 @@ By default cluster components of VictoriaMetrics are tuned for an optimal resour
|
|||
- `-search.maxConcurrentRequests` at `vmselect` limits the number of concurrent requests a single `vmselect` node can process. Bigger number of concurrent requests usually means bigger memory usage at both `vmselect` and `vmstorage`. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. `vmselect` provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries.
|
||||
- `-search.maxSamplesPerSeries` at `vmselect` limits the number of raw samples the query can process per each time series. `vmselect` sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage at `vmselect` in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-search.maxSamplesPerQuery` at `vmselect` limits the number of raw samples a single query can process. This allows limiting CPU usage at `vmselect` for heavy queries.
|
||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsSubqueryPerTimeseries` limits the number of calculated points, which can be generated per each matching time series during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation.
|
||||
- `-search.maxSeries` at `vmselect` limits the number of time series, which may be returned from [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers). This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
- `-search.maxTagKeys` at `vmstorage` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagValues` at `vmstorage` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
- `-storage.maxDailySeries` at `vmstorage` can be used for limiting the number of time series seen per day.
|
||||
- `-storage.maxHourlySeries` at `vmstorage` can be used for limiting the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series).
|
||||
- `-storage.maxDailySeries` at `vmstorage` can be used for limiting the number of time series seen per day aka [time series churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). See [cardinality limiter docs](#cardinality-limiter).
|
||||
- `-storage.maxHourlySeries` at `vmstorage` can be used for limiting the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series). See [cardinality limiter docs](#cardinality-limiter).
|
||||
|
||||
See also [capacity planning docs](#capacity-planning) and [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
|
||||
|
@ -552,6 +567,8 @@ Example command for collecting memory profile from `vminsert` (replace `0.0.0.0`
|
|||
curl http://0.0.0.0:8480/debug/pprof/heap > mem.pprof
|
||||
```
|
||||
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
</div>
|
||||
|
||||
## vmalert
|
||||
|
@ -901,7 +918,9 @@ Below is the output for `/path/to/vmselect -help`:
|
|||
-search.maxLookback duration
|
||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
||||
-search.maxPointsPerTimeseries int
|
||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||
-search.maxPointsSubqueryPerTimeseries int
|
||||
The maximum number of points per series, which can be generated by subquery. See https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3 (default 100000)
|
||||
-search.maxQueryDuration duration
|
||||
The maximum duration for query execution (default 30s)
|
||||
-search.maxQueryLen size
|
||||
|
@ -1099,9 +1118,9 @@ Below is the output for `/path/to/vmstorage -help`:
|
|||
Overrides max size for storage/tsid cache. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
-storage.maxDailySeries int
|
||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
|
||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/#cardinality-limiter . See also -storage.maxHourlySeries
|
||||
-storage.maxHourlySeries int
|
||||
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See also -storage.maxDailySeries
|
||||
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/#cardinality-limiter . See also -storage.maxDailySeries
|
||||
-storage.minFreeDiskSpaceBytes size
|
||||
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 10000000)
|
||||
|
|
|
@ -491,7 +491,7 @@ The list of supported transform functions:
|
|||
|
||||
#### histogram_quantiles
|
||||
|
||||
`histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` calculates the given `phi*`-quantiles over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). `phi*` must be in the range `[0...1]`. Each calculated quantile is returned in a separate time series with the corresponding `{phiLabel="phi*"}` label. See also [histogram_quantile](#histogram_quantile).
|
||||
`histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` calculates the given `phi*`-quantiles over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). Argument `phi*` must be in the range `[0...1]`. For example, `histogram_quantiles('le', 0.3, 0.5, sum(rate(http_request_duration_seconds_bucket[5m]) by (le))`. Each calculated quantile is returned in a separate time series with the corresponding `{phiLabel="phi*"}` label. See also [histogram_quantile](#histogram_quantile).
|
||||
|
||||
#### histogram_share
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
|||
|
||||
## How to send data from DataDog agent
|
||||
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD]() via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
|
||||
Run DataDog agent with `DD_DD_URL=http://victoriametrics-host:8428/datadog` environment variable in order to write data to VictoriaMetrics at `victoriametrics-host` host. Another option is to set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||
|
||||
|
@ -1213,6 +1213,8 @@ By default VictoriaMetrics is tuned for an optimal resource usage under typical
|
|||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries.
|
||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsSubqueryPerTimeseries` limits the number of calculated points, which can be generated per each matching time series during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation.
|
||||
- `-search.maxSeries` limits the number of time series, which may be returned from [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers). This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagValues` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
|
@ -1547,8 +1549,27 @@ Both limits can be set simultaneously. If any of these limits is reached, then i
|
|||
The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
||||
|
||||
* `vm_hourly_series_limit_rows_dropped_total` - the number of metrics dropped due to exceeded hourly limit on the number of unique time series.
|
||||
|
||||
* `vm_hourly_series_limit_max_series` - the hourly series limit set via `-storage.maxHourlySeries` command-line flag.
|
||||
|
||||
* `vm_hourly_series_limit_current_series` - the current number of unique series during the last hour.
|
||||
The following query can be useful for alerting when the number of unique series during the last hour exceeds 90% of the `-storage.maxHourlySeries`:
|
||||
|
||||
```metricsql
|
||||
vm_hourly_series_limit_current_series / vm_hourly_series_limit_max_series > 0.9
|
||||
```
|
||||
|
||||
* `vm_daily_series_limit_rows_dropped_total` - the number of metrics dropped due to exceeded daily limit on the number of unique time series.
|
||||
|
||||
* `vm_daily_series_limit_max_series` - the daily series limit set via `-storage.maxDailySeries` command-line flag.
|
||||
|
||||
* `vm_daily_series_limit_current_series` - the current number of unique series during the last day.
|
||||
The following query can be useful for alerting when the number of unique series during the last day exceeds 90% of the `-storage.maxDailySeries`:
|
||||
|
||||
```metricsql
|
||||
vm_daily_series_limit_current_series / vm_daily_series_limit_max_series > 0.9
|
||||
```
|
||||
|
||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||
|
||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
|
@ -1803,6 +1824,7 @@ curl http://0.0.0.0:8428/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Integrations
|
||||
|
||||
|
@ -2153,7 +2175,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-search.maxLookback duration
|
||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
||||
-search.maxPointsPerTimeseries int
|
||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||
-search.maxPointsSubqueryPerTimeseries int
|
||||
The maximum number of points per series, which can be generated by subquery. See https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3 (default 100000)
|
||||
-search.maxQueryDuration duration
|
||||
The maximum duration for query execution (default 30s)
|
||||
-search.maxQueryLen size
|
||||
|
@ -2225,9 +2249,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Overrides max size for storage/tsid cache. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
-storage.maxDailySeries int
|
||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
|
||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/#cardinality-limiter . See also -storage.maxHourlySeries
|
||||
-storage.maxHourlySeries int
|
||||
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See also -storage.maxDailySeries
|
||||
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/#cardinality-limiter . See also -storage.maxDailySeries
|
||||
-storage.minFreeDiskSpaceBytes size
|
||||
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 10000000)
|
||||
|
|
|
@ -327,7 +327,7 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
|||
|
||||
## How to send data from DataDog agent
|
||||
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD]() via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||
|
||||
Run DataDog agent with `DD_DD_URL=http://victoriametrics-host:8428/datadog` environment variable in order to write data to VictoriaMetrics at `victoriametrics-host` host. Another option is to set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||
|
||||
|
@ -1217,6 +1217,8 @@ By default VictoriaMetrics is tuned for an optimal resource usage under typical
|
|||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries.
|
||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsSubqueryPerTimeseries` limits the number of calculated points, which can be generated per each matching time series during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation.
|
||||
- `-search.maxSeries` limits the number of time series, which may be returned from [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers). This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagValues` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
|
@ -1551,8 +1553,27 @@ Both limits can be set simultaneously. If any of these limits is reached, then i
|
|||
The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
||||
|
||||
* `vm_hourly_series_limit_rows_dropped_total` - the number of metrics dropped due to exceeded hourly limit on the number of unique time series.
|
||||
|
||||
* `vm_hourly_series_limit_max_series` - the hourly series limit set via `-storage.maxHourlySeries` command-line flag.
|
||||
|
||||
* `vm_hourly_series_limit_current_series` - the current number of unique series during the last hour.
|
||||
The following query can be useful for alerting when the number of unique series during the last hour exceeds 90% of the `-storage.maxHourlySeries`:
|
||||
|
||||
```metricsql
|
||||
vm_hourly_series_limit_current_series / vm_hourly_series_limit_max_series > 0.9
|
||||
```
|
||||
|
||||
* `vm_daily_series_limit_rows_dropped_total` - the number of metrics dropped due to exceeded daily limit on the number of unique time series.
|
||||
|
||||
* `vm_daily_series_limit_max_series` - the daily series limit set via `-storage.maxDailySeries` command-line flag.
|
||||
|
||||
* `vm_daily_series_limit_current_series` - the current number of unique series during the last day.
|
||||
The following query can be useful for alerting when the number of unique series during the last day exceeds 90% of the `-storage.maxDailySeries`:
|
||||
|
||||
```metricsql
|
||||
vm_daily_series_limit_current_series / vm_daily_series_limit_max_series > 0.9
|
||||
```
|
||||
|
||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||
|
||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||
|
@ -1807,6 +1828,7 @@ curl http://0.0.0.0:8428/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Integrations
|
||||
|
||||
|
@ -2157,7 +2179,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-search.maxLookback duration
|
||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
||||
-search.maxPointsPerTimeseries int
|
||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||
-search.maxPointsSubqueryPerTimeseries int
|
||||
The maximum number of points per series, which can be generated by subquery. See https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3 (default 100000)
|
||||
-search.maxQueryDuration duration
|
||||
The maximum duration for query execution (default 30s)
|
||||
-search.maxQueryLen size
|
||||
|
@ -2229,9 +2253,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Overrides max size for storage/tsid cache. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||
-storage.maxDailySeries int
|
||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
|
||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/#cardinality-limiter . See also -storage.maxHourlySeries
|
||||
-storage.maxHourlySeries int
|
||||
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See also -storage.maxDailySeries
|
||||
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/#cardinality-limiter . See also -storage.maxDailySeries
|
||||
-storage.minFreeDiskSpaceBytes size
|
||||
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 10000000)
|
||||
|
|
|
@ -10,7 +10,7 @@ readme_index:
|
|||
with_frontmatter: true
|
||||
|
||||
google:
|
||||
gtag: UA-129683199-1
|
||||
gtag: G-N9SVT8S3HK
|
||||
|
||||
plugins:
|
||||
- github-pages
|
||||
|
|
|
@ -42,11 +42,14 @@ Here is a Quickstart guide for [vmagent](https://docs.victoriametrics.com/vmagen
|
|||
|
||||
You can use one of the following options:
|
||||
|
||||
1. Regional endpoints - use one regional endpoint as default and switch to another if there is an issue.
|
||||
2. Load balancer - that sends queries to a particular region. The benefit and disadvantage of this setup is that it's simple.
|
||||
3. Promxy - proxy that reads data from multiple Prometheus-like sources. It allows reading data more intelligently to cover the region's unavailability out of the box. It doesn't support MetricsQL yet (please check this issue).
|
||||
4. Global vmselect in cluster setup - you can set up an additional subset of vmselects that knows about all storages in all regions.
|
||||
* The deduplication in 1ms on the vmselect side must be turned on. This setup allows you to query data using MetricsQL.
|
||||
1. Multi-level [vmselect setup](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup) in cluster setup, top-level vmselect(s) reads data from cluster-level vmselects
|
||||
* Returns data in one of the clusters is unavailable
|
||||
* Merges data from both sources. You need to turn on [deduplication](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#deduplication) to remove duplicates
|
||||
2. Regional endpoints - use one regional endpoint as default and switch to another if there is an issue.
|
||||
3. Load balancer - that sends queries to a particular region. The benefit and disadvantage of this setup is that it's simple.
|
||||
4. Promxy - proxy that reads data from multiple Prometheus-like sources. It allows reading data more intelligently to cover the region's unavailability out of the box. It doesn't support MetricsQL yet (please check this issue).
|
||||
5. Global vmselect in cluster setup - you can set up an additional subset of vmselects that knows about all storages in all regions.
|
||||
* The [deduplication](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#deduplication) in 1ms on the vmselect side must be turned on. This setup allows you to query data using MetricsQL.
|
||||
* The downside is that vmselect waits for a response from all storages in all regions.
|
||||
|
||||
|
||||
|
|
|
@ -897,6 +897,7 @@ curl http://0.0.0.0:8429/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Advanced usage
|
||||
|
||||
|
|
|
@ -634,6 +634,35 @@ Use the official [Grafana dashboard](https://grafana.com/grafana/dashboards/1495
|
|||
If you have suggestions for improvements or have found a bug - please open an issue on github or add
|
||||
a review to the dashboard.
|
||||
|
||||
## Profiling
|
||||
|
||||
`vmalert` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
||||
* Memory profile. It can be collected with the following command (replace `0.0.0.0` with hostname if needed):
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://0.0.0.0:8880/debug/pprof/heap > mem.pprof
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
* CPU profile. It can be collected with the following command (replace `0.0.0.0` with hostname if needed):
|
||||
|
||||
<div class="with-copy" markdown="1">
|
||||
|
||||
```console
|
||||
curl http://0.0.0.0:8880/debug/pprof/profile > cpu.pprof
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Flags
|
||||
|
|
|
@ -221,6 +221,7 @@ curl http://0.0.0.0:8427/debug/pprof/profile > cpu.pprof
|
|||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
It is safe sharing the collected profiles from security point of view, since they do not contain sensitive information.
|
||||
|
||||
## Advanced usage
|
||||
|
||||
|
|
18
go.mod
18
go.mod
|
@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
|
|||
go 1.18
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.25.0
|
||||
cloud.google.com/go/storage v1.26.0
|
||||
github.com/VictoriaMetrics/fastcache v1.10.0
|
||||
|
||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
|
@ -11,7 +11,7 @@ require (
|
|||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.22.2
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1
|
||||
github.com/aws/aws-sdk-go v1.44.81
|
||||
github.com/aws/aws-sdk-go v1.44.87
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
|
||||
|
@ -35,15 +35,15 @@ require (
|
|||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/gozstd v1.17.0
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234
|
||||
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6
|
||||
google.golang.org/api v0.93.0
|
||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261
|
||||
google.golang.org/api v0.94.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.103.0 // indirect
|
||||
cloud.google.com/go v0.104.0 // indirect
|
||||
cloud.google.com/go/compute v1.9.0 // indirect
|
||||
cloud.google.com/go/iam v0.3.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
|
@ -75,7 +75,7 @@ require (
|
|||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220819174105-e9f053255caa // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf // indirect
|
||||
google.golang.org/grpc v1.49.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
|
45
go.sum
45
go.sum
|
@ -30,9 +30,8 @@ cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Ud
|
|||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
|
||||
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
|
||||
cloud.google.com/go v0.103.0 h1:YXtxp9ymmZjlGzxV7VrYQ8aaQuAgcqxSy6YhDX4I458=
|
||||
cloud.google.com/go v0.103.0/go.mod h1:vwLx1nqLrzLX/fpwSMOXmFIqBOyHsvHbnAdbGSJ+mKk=
|
||||
cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8=
|
||||
cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -62,9 +61,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
|
||||
cloud.google.com/go/storage v1.25.0 h1:D2Dn0PslpK7Z3B2AvuUHyIC762bDbGJdlmQlCBR71os=
|
||||
cloud.google.com/go/storage v1.25.0/go.mod h1:Qys4JU+jeup3QnuKKAosWuxrD95C4MSqxfVDnSirDsI=
|
||||
cloud.google.com/go/storage v1.26.0 h1:lYAGjknyDJirSzfwUlkv4Nsnj7od7foxQNH/fqZqles=
|
||||
cloud.google.com/go/storage v1.26.0/go.mod h1:mk/N7YwIKEWyTvXAWQCIeiCTdLoRH6Pd5xmSnolQLTI=
|
||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
|
@ -149,8 +147,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.81 h1:C8oBZ+a+ka0qk3Q24MohQIFq0tkbO8IAu5tfpAMKVWE=
|
||||
github.com/aws/aws-sdk-go v1.44.81/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.87 h1:u/1sm8MNUSQHt8MGLEQHAj4r3lns3w0B1IXelPKbpn4=
|
||||
github.com/aws/aws-sdk-go v1.44.87/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -1006,10 +1004,9 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E=
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY=
|
||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1030,9 +1027,8 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j
|
|||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 h1:dtndE8FcEta75/4kHF3AbpuWzV6f1LjnLrM4pe2SZrw=
|
||||
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 h1:2o1E+E8TpNLklK9nHiPiK1uzIYrIHt+cQx3ynCwq9V8=
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1141,11 +1137,10 @@ golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 h1:Sx/u41w+OwrInGdEckYmEuU5gHoGSL4QbDz3S9s6j4U=
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY=
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1291,10 +1286,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69
|
|||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
|
||||
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.93.0 h1:T2xt9gi0gHdxdnRkVQhT8mIvPaXKNsDNWz+L696M66M=
|
||||
google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.94.0 h1:KtKM9ru3nzQioV1HLlUf1cR7vMYJIpgls5VhAYQXIwA=
|
||||
google.golang.org/api v0.94.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1385,11 +1378,9 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP
|
|||
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220819174105-e9f053255caa h1:Ux9yJCyf598uEniFPSyp8g1jtGTt77m+lzYyVgrWQaQ=
|
||||
google.golang.org/genproto v0.0.0-20220819174105-e9f053255caa/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
|
||||
google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf h1:Q5xNKbTSFwkuaaGaR7CMcXEM5sy19KYdUU8iF8/iRC0=
|
||||
google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1426,8 +1417,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11
|
|||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
|
||||
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
|
@ -188,6 +190,13 @@ func ParseRelabelConfigs(rcs []RelabelConfig, relabelDebug bool) (*ParsedConfigs
|
|||
var (
|
||||
defaultOriginalRegexForRelabelConfig = regexp.MustCompile(".*")
|
||||
defaultRegexForRelabelConfig = regexp.MustCompile("^(.*)$")
|
||||
defaultPromRegex = func() *regexutil.PromRegex {
|
||||
pr, err := regexutil.NewPromRegex(".*")
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: unexpected error: %s", err))
|
||||
}
|
||||
return pr
|
||||
}()
|
||||
)
|
||||
|
||||
func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
|
||||
|
@ -196,25 +205,36 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
|
|||
if rc.Separator != nil {
|
||||
separator = *rc.Separator
|
||||
}
|
||||
action := strings.ToLower(rc.Action)
|
||||
if action == "" {
|
||||
action = "replace"
|
||||
}
|
||||
targetLabel := rc.TargetLabel
|
||||
regexCompiled := defaultRegexForRelabelConfig
|
||||
regexAnchored := defaultRegexForRelabelConfig
|
||||
regexOriginalCompiled := defaultOriginalRegexForRelabelConfig
|
||||
if rc.Regex != nil {
|
||||
promRegex := defaultPromRegex
|
||||
if rc.Regex != nil && !isDefaultRegex(rc.Regex.S) {
|
||||
regex := rc.Regex.S
|
||||
regexOrig := regex
|
||||
if rc.Action != "replace_all" && rc.Action != "labelmap_all" {
|
||||
regex = regexutil.RemoveStartEndAnchors(regex)
|
||||
regexOrig = regex
|
||||
regex = "^(?:" + regex + ")$"
|
||||
}
|
||||
re, err := regexp.Compile(regex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `regex` %q: %w", regex, err)
|
||||
}
|
||||
regexCompiled = re
|
||||
regexAnchored = re
|
||||
reOriginal, err := regexp.Compile(regexOrig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `regex` %q: %w", regexOrig, err)
|
||||
}
|
||||
regexOriginalCompiled = reOriginal
|
||||
promRegex, err = regexutil.NewPromRegex(regexOrig)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot parse already parsed regex %q: %s", regexOrig, err)
|
||||
}
|
||||
}
|
||||
modulus := rc.Modulus
|
||||
replacement := "$1"
|
||||
|
@ -229,10 +249,6 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
|
|||
if rc.Labels != nil {
|
||||
graphiteLabelRules = newGraphiteLabelRules(rc.Labels)
|
||||
}
|
||||
action := rc.Action
|
||||
if action == "" {
|
||||
action = "replace"
|
||||
}
|
||||
switch action {
|
||||
case "graphite":
|
||||
if graphiteMatchTemplate == nil {
|
||||
|
@ -331,18 +347,19 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
|
|||
}
|
||||
}
|
||||
return &parsedRelabelConfig{
|
||||
SourceLabels: sourceLabels,
|
||||
Separator: separator,
|
||||
TargetLabel: targetLabel,
|
||||
Regex: regexCompiled,
|
||||
Modulus: modulus,
|
||||
Replacement: replacement,
|
||||
Action: action,
|
||||
If: rc.If,
|
||||
SourceLabels: sourceLabels,
|
||||
Separator: separator,
|
||||
TargetLabel: targetLabel,
|
||||
RegexAnchored: regexAnchored,
|
||||
Modulus: modulus,
|
||||
Replacement: replacement,
|
||||
Action: action,
|
||||
If: rc.If,
|
||||
|
||||
graphiteMatchTemplate: graphiteMatchTemplate,
|
||||
graphiteLabelRules: graphiteLabelRules,
|
||||
|
||||
regex: promRegex,
|
||||
regexOriginal: regexOriginalCompiled,
|
||||
|
||||
hasCaptureGroupInTargetLabel: strings.Contains(targetLabel, "$"),
|
||||
|
@ -350,3 +367,11 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
|
|||
hasLabelReferenceInReplacement: strings.Contains(replacement, "{{"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isDefaultRegex(expr string) bool {
|
||||
prefix, suffix := regexutil.Simplify(expr)
|
||||
if prefix != "" {
|
||||
return false
|
||||
}
|
||||
return suffix == ".*"
|
||||
}
|
||||
|
|
|
@ -126,7 +126,7 @@ func TestParsedConfigsString(t *testing.T) {
|
|||
TargetLabel: "foo",
|
||||
SourceLabels: []string{"aaa"},
|
||||
},
|
||||
}, "[SourceLabels=[aaa], Separator=;, TargetLabel=foo, Regex=^(.*)$, Modulus=0, Replacement=$1, Action=replace, If=, "+
|
||||
}, "[SourceLabels=[aaa], Separator=;, TargetLabel=foo, Regex=.*, Modulus=0, Replacement=$1, Action=replace, If=, "+
|
||||
"graphiteMatchTemplate=<nil>, graphiteLabelRules=[]], relabelDebug=false")
|
||||
var ie IfExpression
|
||||
if err := ie.Parse("{foo=~'bar'}"); err != nil {
|
||||
|
@ -141,7 +141,7 @@ func TestParsedConfigsString(t *testing.T) {
|
|||
},
|
||||
If: &ie,
|
||||
},
|
||||
}, "[SourceLabels=[], Separator=;, TargetLabel=, Regex=^(.*)$, Modulus=0, Replacement=$1, Action=graphite, If={foo=~'bar'}, "+
|
||||
}, "[SourceLabels=[], Separator=;, TargetLabel=, Regex=.*, Modulus=0, Replacement=$1, Action=graphite, If={foo=~'bar'}, "+
|
||||
"graphiteMatchTemplate=foo.*.bar, graphiteLabelRules=[replaceTemplate=$1-zz, targetLabel=job]], relabelDebug=false")
|
||||
f([]RelabelConfig{
|
||||
{
|
||||
|
@ -150,7 +150,7 @@ func TestParsedConfigsString(t *testing.T) {
|
|||
TargetLabel: "x",
|
||||
If: &ie,
|
||||
},
|
||||
}, "[SourceLabels=[foo bar], Separator=;, TargetLabel=x, Regex=^(.*)$, Modulus=0, Replacement=$1, Action=replace, If={foo=~'bar'}, "+
|
||||
}, "[SourceLabels=[foo bar], Separator=;, TargetLabel=x, Regex=.*, Modulus=0, Replacement=$1, Action=replace, If={foo=~'bar'}, "+
|
||||
"graphiteMatchTemplate=<nil>, graphiteLabelRules=[]], relabelDebug=false")
|
||||
}
|
||||
|
||||
|
@ -174,13 +174,14 @@ func TestParseRelabelConfigsSuccess(t *testing.T) {
|
|||
}, &ParsedConfigs{
|
||||
prcs: []*parsedRelabelConfig{
|
||||
{
|
||||
SourceLabels: []string{"foo", "bar"},
|
||||
Separator: ";",
|
||||
TargetLabel: "xxx",
|
||||
Regex: defaultRegexForRelabelConfig,
|
||||
Replacement: "$1",
|
||||
Action: "replace",
|
||||
SourceLabels: []string{"foo", "bar"},
|
||||
Separator: ";",
|
||||
TargetLabel: "xxx",
|
||||
RegexAnchored: defaultRegexForRelabelConfig,
|
||||
Replacement: "$1",
|
||||
Action: "replace",
|
||||
|
||||
regex: defaultPromRegex,
|
||||
regexOriginal: defaultOriginalRegexForRelabelConfig,
|
||||
hasCaptureGroupInReplacement: true,
|
||||
},
|
||||
|
@ -455,3 +456,21 @@ func TestParseRelabelConfigsFailure(t *testing.T) {
|
|||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDefaultRegex(t *testing.T) {
|
||||
f := func(s string, resultExpected bool) {
|
||||
t.Helper()
|
||||
result := isDefaultRegex(s)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result for isDefaultRegex(%q); got %v; want %v", s, result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("", false)
|
||||
f("foo", false)
|
||||
f(".+", false)
|
||||
f("a.*", false)
|
||||
f(".*", true)
|
||||
f("(.*)", true)
|
||||
f("^.*$", true)
|
||||
f("(?:.*)", true)
|
||||
}
|
||||
|
|
|
@ -3,10 +3,10 @@ package promrelabel
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
|
@ -105,7 +105,7 @@ type labelFilter struct {
|
|||
value string
|
||||
|
||||
// re contains compiled regexp for `=~` and `!~` op.
|
||||
re *regexp.Regexp
|
||||
re *regexutil.PromRegex
|
||||
}
|
||||
|
||||
func newLabelFilter(mlf *metricsql.LabelFilter) (*labelFilter, error) {
|
||||
|
@ -115,10 +115,7 @@ func newLabelFilter(mlf *metricsql.LabelFilter) (*labelFilter, error) {
|
|||
value: mlf.Value,
|
||||
}
|
||||
if lf.op == "=~" || lf.op == "!~" {
|
||||
// PromQL regexps are anchored by default.
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors
|
||||
reString := "^(?:" + lf.value + ")$"
|
||||
re, err := regexp.Compile(reString)
|
||||
re, err := regexutil.NewPromRegex(lf.value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse regexp for %s: %w", mlf.AppendString(nil), err)
|
||||
}
|
||||
|
@ -134,9 +131,9 @@ func (lf *labelFilter) match(labels []prompbmarshal.Label) bool {
|
|||
case "!=":
|
||||
return !lf.equalValue(labels)
|
||||
case "=~":
|
||||
return lf.equalRegexp(labels)
|
||||
return lf.matchRegexp(labels)
|
||||
case "!~":
|
||||
return !lf.equalRegexp(labels)
|
||||
return !lf.matchRegexp(labels)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected operation for label filter: %s", lf.op)
|
||||
}
|
||||
|
@ -161,7 +158,7 @@ func (lf *labelFilter) equalValue(labels []prompbmarshal.Label) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (lf *labelFilter) equalRegexp(labels []prompbmarshal.Label) bool {
|
||||
func (lf *labelFilter) matchRegexp(labels []prompbmarshal.Label) bool {
|
||||
labelNameMatches := 0
|
||||
for _, label := range labels {
|
||||
if toCanonicalLabelName(label.Name) != lf.label {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
|
@ -16,18 +17,19 @@ import (
|
|||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
|
||||
type parsedRelabelConfig struct {
|
||||
SourceLabels []string
|
||||
Separator string
|
||||
TargetLabel string
|
||||
Regex *regexp.Regexp
|
||||
Modulus uint64
|
||||
Replacement string
|
||||
Action string
|
||||
If *IfExpression
|
||||
SourceLabels []string
|
||||
Separator string
|
||||
TargetLabel string
|
||||
RegexAnchored *regexp.Regexp
|
||||
Modulus uint64
|
||||
Replacement string
|
||||
Action string
|
||||
If *IfExpression
|
||||
|
||||
graphiteMatchTemplate *graphiteMatchTemplate
|
||||
graphiteLabelRules []graphiteLabelRule
|
||||
|
||||
regex *regexutil.PromRegex
|
||||
regexOriginal *regexp.Regexp
|
||||
|
||||
hasCaptureGroupInTargetLabel bool
|
||||
|
@ -38,7 +40,8 @@ type parsedRelabelConfig struct {
|
|||
// String returns human-readable representation for prc.
|
||||
func (prc *parsedRelabelConfig) String() string {
|
||||
return fmt.Sprintf("SourceLabels=%s, Separator=%s, TargetLabel=%s, Regex=%s, Modulus=%d, Replacement=%s, Action=%s, If=%s, graphiteMatchTemplate=%s, graphiteLabelRules=%s",
|
||||
prc.SourceLabels, prc.Separator, prc.TargetLabel, prc.Regex, prc.Modulus, prc.Replacement, prc.Action, prc.If, prc.graphiteMatchTemplate, prc.graphiteLabelRules)
|
||||
prc.SourceLabels, prc.Separator, prc.TargetLabel, prc.regexOriginal, prc.Modulus, prc.Replacement,
|
||||
prc.Action, prc.If, prc.graphiteMatchTemplate, prc.graphiteLabelRules)
|
||||
}
|
||||
|
||||
// Apply applies pcs to labels starting from the labelsOffset.
|
||||
|
@ -182,7 +185,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
replacement = string(bb.B)
|
||||
}
|
||||
bb.B = concatLabelValues(bb.B[:0], src, prc.SourceLabels, prc.Separator)
|
||||
if prc.Regex == defaultRegexForRelabelConfig && !prc.hasCaptureGroupInTargetLabel {
|
||||
if prc.RegexAnchored == defaultRegexForRelabelConfig && !prc.hasCaptureGroupInTargetLabel {
|
||||
if replacement == "$1" {
|
||||
// Fast path for the rule that copies source label values to destination:
|
||||
// - source_labels: [...]
|
||||
|
@ -200,7 +203,12 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
return labels
|
||||
}
|
||||
}
|
||||
match := prc.Regex.FindSubmatchIndex(bb.B)
|
||||
if re := prc.regex; re.HasPrefix() && !re.MatchString(bytesutil.ToUnsafeString(bb.B)) {
|
||||
// Fast path - regexp mismatch.
|
||||
relabelBufPool.Put(bb)
|
||||
return labels
|
||||
}
|
||||
match := prc.RegexAnchored.FindSubmatchIndex(bb.B)
|
||||
if match == nil {
|
||||
// Fast path - nothing to replace.
|
||||
relabelBufPool.Put(bb)
|
||||
|
@ -252,7 +260,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
return labels
|
||||
case "keep":
|
||||
// Keep the target if `source_labels` joined with `separator` match the `regex`.
|
||||
if prc.Regex == defaultRegexForRelabelConfig {
|
||||
if prc.RegexAnchored == defaultRegexForRelabelConfig {
|
||||
// Fast path for the case with `if` and without explicitly set `regex`:
|
||||
//
|
||||
// - action: keep
|
||||
|
@ -262,7 +270,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
}
|
||||
bb := relabelBufPool.Get()
|
||||
bb.B = concatLabelValues(bb.B[:0], src, prc.SourceLabels, prc.Separator)
|
||||
keep := prc.matchString(bytesutil.ToUnsafeString(bb.B))
|
||||
keep := prc.regex.MatchString(bytesutil.ToUnsafeString(bb.B))
|
||||
relabelBufPool.Put(bb)
|
||||
if !keep {
|
||||
return labels[:labelsOffset]
|
||||
|
@ -270,7 +278,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
return labels
|
||||
case "drop":
|
||||
// Drop the target if `source_labels` joined with `separator` don't match the `regex`.
|
||||
if prc.Regex == defaultRegexForRelabelConfig {
|
||||
if prc.RegexAnchored == defaultRegexForRelabelConfig {
|
||||
// Fast path for the case with `if` and without explicitly set `regex`:
|
||||
//
|
||||
// - action: drop
|
||||
|
@ -280,7 +288,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
}
|
||||
bb := relabelBufPool.Get()
|
||||
bb.B = concatLabelValues(bb.B[:0], src, prc.SourceLabels, prc.Separator)
|
||||
drop := prc.matchString(bytesutil.ToUnsafeString(bb.B))
|
||||
drop := prc.regex.MatchString(bytesutil.ToUnsafeString(bb.B))
|
||||
relabelBufPool.Put(bb)
|
||||
if drop {
|
||||
return labels[:labelsOffset]
|
||||
|
@ -296,8 +304,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
return setLabelValue(labels, labelsOffset, prc.TargetLabel, value)
|
||||
case "labelmap":
|
||||
// Replace label names with the `replacement` if they match `regex`
|
||||
for i := range src {
|
||||
label := &src[i]
|
||||
for _, label := range src {
|
||||
labelName, ok := prc.replaceFullString(label.Name, prc.Replacement, prc.hasCaptureGroupInReplacement)
|
||||
if ok {
|
||||
labels = setLabelValue(labels, labelsOffset, labelName, label.Value)
|
||||
|
@ -314,20 +321,20 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
case "labeldrop":
|
||||
// Drop labels with names matching the `regex`
|
||||
dst := labels[:labelsOffset]
|
||||
for i := range src {
|
||||
label := &src[i]
|
||||
if !prc.matchString(label.Name) {
|
||||
dst = append(dst, *label)
|
||||
re := prc.regex
|
||||
for _, label := range src {
|
||||
if !re.MatchString(label.Name) {
|
||||
dst = append(dst, label)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
case "labelkeep":
|
||||
// Keep labels with names matching the `regex`
|
||||
dst := labels[:labelsOffset]
|
||||
for i := range src {
|
||||
label := &src[i]
|
||||
if prc.matchString(label.Name) {
|
||||
dst = append(dst, *label)
|
||||
re := prc.regex
|
||||
for _, label := range src {
|
||||
if re.MatchString(label.Name) {
|
||||
dst = append(dst, label)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
|
@ -385,13 +392,17 @@ func (prc *parsedRelabelConfig) replaceFullString(s, replacement string, hasCapt
|
|||
}
|
||||
}
|
||||
}
|
||||
if re := prc.regex; re.HasPrefix() && !re.MatchString(s) {
|
||||
// Fast path - regex mismatch
|
||||
return s, false
|
||||
}
|
||||
// Slow path - regexp processing
|
||||
match := prc.Regex.FindStringSubmatchIndex(s)
|
||||
match := prc.RegexAnchored.FindStringSubmatchIndex(s)
|
||||
if match == nil {
|
||||
return s, false
|
||||
}
|
||||
bb := relabelBufPool.Get()
|
||||
bb.B = prc.Regex.ExpandString(bb.B[:0], replacement, s, match)
|
||||
bb.B = prc.RegexAnchored.ExpandString(bb.B[:0], replacement, s, match)
|
||||
result := string(bb.B)
|
||||
relabelBufPool.Put(bb)
|
||||
return result, true
|
||||
|
@ -412,31 +423,9 @@ func (prc *parsedRelabelConfig) replaceStringSubmatches(s, replacement string, h
|
|||
return re.ReplaceAllString(s, replacement), true
|
||||
}
|
||||
|
||||
func (prc *parsedRelabelConfig) matchString(s string) bool {
|
||||
prefix, complete := prc.regexOriginal.LiteralPrefix()
|
||||
if complete {
|
||||
return prefix == s
|
||||
}
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
return false
|
||||
}
|
||||
reStr := prc.regexOriginal.String()
|
||||
if strings.HasPrefix(reStr, prefix) {
|
||||
// Fast path for `foo.*` and `bar.+` regexps
|
||||
reSuffix := reStr[len(prefix):]
|
||||
switch reSuffix {
|
||||
case ".+", "(.+)":
|
||||
return len(s) > len(prefix)
|
||||
case ".*", "(.*)":
|
||||
return true
|
||||
}
|
||||
}
|
||||
return prc.Regex.MatchString(s)
|
||||
}
|
||||
|
||||
func (prc *parsedRelabelConfig) expandCaptureGroups(template, source string, match []int) string {
|
||||
bb := relabelBufPool.Get()
|
||||
bb.B = prc.Regex.ExpandString(bb.B[:0], template, source, match)
|
||||
bb.B = prc.RegexAnchored.ExpandString(bb.B[:0], template, source, match)
|
||||
s := string(bb.B)
|
||||
relabelBufPool.Put(bb)
|
||||
return s
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package promrelabel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
|
@ -726,3 +727,57 @@ func TestFillLabelReferences(t *testing.T) {
|
|||
f(`{{bar}}-aa`, `foo{bar="baz"}`, `baz-aa`)
|
||||
f(`{{bar}}-aa{{__name__}}.{{bar}}{{non-existing-label}}`, `foo{bar="baz"}`, `baz-aafoo.baz`)
|
||||
}
|
||||
|
||||
func TestRegexMatchStringSuccess(t *testing.T) {
|
||||
f := func(pattern, s string) {
|
||||
t.Helper()
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
if !prc.regex.MatchString(s) {
|
||||
t.Fatalf("unexpected MatchString(%q) result; got false; want true", s)
|
||||
}
|
||||
}
|
||||
f("", "")
|
||||
f("foo", "foo")
|
||||
f(".*", "")
|
||||
f(".*", "foo")
|
||||
f("foo.*", "foobar")
|
||||
f("foo.+", "foobar")
|
||||
f("f.+o", "foo")
|
||||
f("foo|bar", "bar")
|
||||
f("^(foo|bar)$", "foo")
|
||||
f("foo.+", "foobar")
|
||||
f("^foo$", "foo")
|
||||
}
|
||||
|
||||
func TestRegexpMatchStringFailure(t *testing.T) {
|
||||
f := func(pattern, s string) {
|
||||
t.Helper()
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
if prc.regex.MatchString(s) {
|
||||
t.Fatalf("unexpected MatchString(%q) result; got true; want false", s)
|
||||
}
|
||||
}
|
||||
f("", "foo")
|
||||
f("foo", "")
|
||||
f("foo.*", "foa")
|
||||
f("foo.+", "foo")
|
||||
f("f.+o", "foor")
|
||||
f("foo|bar", "barz")
|
||||
f("^(foo|bar)$", "xfoo")
|
||||
f("foo.+", "foo")
|
||||
f("^foo$", "foobar")
|
||||
}
|
||||
|
||||
func newTestRegexRelabelConfig(pattern string) *parsedRelabelConfig {
|
||||
rc := &RelabelConfig{
|
||||
Action: "labeldrop",
|
||||
Regex: &MultiLineRegex{
|
||||
S: pattern,
|
||||
},
|
||||
}
|
||||
prc, err := parseRelabelConfig(rc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error in parseRelabelConfig: %s", err))
|
||||
}
|
||||
return prc
|
||||
}
|
||||
|
|
|
@ -2,11 +2,252 @@ package promrelabel
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func BenchmarkMatchRegexPrefixDotPlusMatchOptimized(b *testing.B) {
|
||||
const pattern = "^foo.+$"
|
||||
const s = "foobar"
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !prc.regex.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string mismatch for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexPrefixDotPlusMatchUnoptimized(b *testing.B) {
|
||||
const pattern = "^foo.+$"
|
||||
const s = "foobar"
|
||||
re := regexp.MustCompile(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !re.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string mismatch for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexPrefixDotPlusMismatchOptimized(b *testing.B) {
|
||||
const pattern = "^foo.+$"
|
||||
const s = "xfoobar"
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if prc.regex.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string match for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexPrefixDotPlusMismatchUnoptimized(b *testing.B) {
|
||||
const pattern = "^foo.+$"
|
||||
const s = "xfoobar"
|
||||
re := regexp.MustCompile(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if re.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string match for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexPrefixDotStarMatchOptimized(b *testing.B) {
|
||||
const pattern = "^foo.*$"
|
||||
const s = "foobar"
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !prc.regex.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string mismatch for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexPrefixDotStarMatchUnoptimized(b *testing.B) {
|
||||
const pattern = "^foo.*$"
|
||||
const s = "foobar"
|
||||
re := regexp.MustCompile(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !re.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string mismatch for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexPrefixDotStarMismatchOptimized(b *testing.B) {
|
||||
const pattern = "^foo.*$"
|
||||
const s = "xfoobar"
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if prc.regex.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string match for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexPrefixDotStarMismatchUnoptimized(b *testing.B) {
|
||||
const pattern = "^foo.*$"
|
||||
const s = "xfoobar"
|
||||
re := regexp.MustCompile(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if re.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string match for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexSingleValueMatchOptimized(b *testing.B) {
|
||||
const pattern = "^foo$"
|
||||
const s = "foo"
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !prc.regex.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string mismatch for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexSingleValueMatchUnoptimized(b *testing.B) {
|
||||
const pattern = "^foo$"
|
||||
const s = "foo"
|
||||
re := regexp.MustCompile(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !re.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string mismatch for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexSingleValueMismatchOptimized(b *testing.B) {
|
||||
const pattern = "^foo$"
|
||||
const s = "bar"
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if prc.regex.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string match for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexSingleValueMismatchUnoptimized(b *testing.B) {
|
||||
const pattern = "^foo$"
|
||||
const s = "bar"
|
||||
re := regexp.MustCompile(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if re.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string match for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexOrValuesMatchOptimized(b *testing.B) {
|
||||
const pattern = "^(foo|bar|baz|abc)$"
|
||||
const s = "foo"
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !prc.regex.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string mismatch for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexOrValuesMatchUnoptimized(b *testing.B) {
|
||||
const pattern = "^(foo|bar|baz|abc)$"
|
||||
const s = "foo"
|
||||
re := regexp.MustCompile(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if !re.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string mismatch for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexOrValuesMismatchOptimized(b *testing.B) {
|
||||
const pattern = "^(foo|bar|baz|abc)"
|
||||
const s = "qwert"
|
||||
prc := newTestRegexRelabelConfig(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if prc.regex.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string match for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMatchRegexOrValuesMismatchUnoptimized(b *testing.B) {
|
||||
const pattern = "^(foo|bar|baz|abc)$"
|
||||
const s = "qwert"
|
||||
re := regexp.MustCompile(pattern)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if re.MatchString(s) {
|
||||
panic(fmt.Errorf("unexpected string match for pattern=%q, s=%q", pattern, s))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||
b.Run("replace-label-copy", func(b *testing.B) {
|
||||
pcs := mustParseRelabelConfigs(`
|
||||
|
@ -256,7 +497,7 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
pcs := mustParseRelabelConfigs(`
|
||||
- action: drop
|
||||
source_labels: [id]
|
||||
regex: yes
|
||||
regex: "yes"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
|
@ -343,7 +584,7 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
|||
pcs := mustParseRelabelConfigs(`
|
||||
- action: keep
|
||||
source_labels: [id]
|
||||
regex: yes
|
||||
regex: "yes"
|
||||
`)
|
||||
labelsOrig := []prompbmarshal.Label{
|
||||
{
|
||||
|
|
|
@ -4,16 +4,15 @@ import (
|
|||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||
|
@ -35,6 +34,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kubernetes"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/openstack"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/yandexcloud"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -904,7 +904,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
|
|||
if metricsPath == "" {
|
||||
metricsPath = "/metrics"
|
||||
}
|
||||
scheme := sc.Scheme
|
||||
scheme := strings.ToLower(sc.Scheme)
|
||||
if scheme == "" {
|
||||
scheme = "http"
|
||||
}
|
||||
|
@ -1330,37 +1330,11 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
|||
func internLabelStrings(labels []prompbmarshal.Label) {
|
||||
for i := range labels {
|
||||
label := &labels[i]
|
||||
label.Name = internString(label.Name)
|
||||
label.Value = internString(label.Value)
|
||||
label.Name = discoveryutils.InternString(label.Name)
|
||||
label.Value = discoveryutils.InternString(label.Value)
|
||||
}
|
||||
}
|
||||
|
||||
func internString(s string) string {
|
||||
m := internStringsMap.Load().(*sync.Map)
|
||||
if v, ok := m.Load(s); ok {
|
||||
sp := v.(*string)
|
||||
return *sp
|
||||
}
|
||||
// Make a new copy for s in order to remove references from possible bigger string s refers to.
|
||||
sCopy := string(append([]byte{}, s...))
|
||||
m.Store(sCopy, &sCopy)
|
||||
n := atomic.AddUint64(&internStringsMapLen, 1)
|
||||
if n > 100e3 {
|
||||
atomic.StoreUint64(&internStringsMapLen, 0)
|
||||
internStringsMap.Store(&sync.Map{})
|
||||
}
|
||||
return sCopy
|
||||
}
|
||||
|
||||
var (
|
||||
internStringsMap atomic.Value
|
||||
internStringsMapLen uint64
|
||||
)
|
||||
|
||||
func init() {
|
||||
internStringsMap.Store(&sync.Map{})
|
||||
}
|
||||
|
||||
func getParamsFromLabels(labels []prompbmarshal.Label, paramsOrig map[string][]string) map[string][]string {
|
||||
// See https://www.robustperception.io/life-of-a-label
|
||||
m := make(map[string][]string)
|
||||
|
|
|
@ -17,44 +17,6 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
|
||||
)
|
||||
|
||||
func TestInternStringSerial(t *testing.T) {
|
||||
if err := testInternString(t); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternStringConcurrent(t *testing.T) {
|
||||
concurrency := 5
|
||||
resultCh := make(chan error, concurrency)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
resultCh <- testInternString(t)
|
||||
}()
|
||||
}
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
select {
|
||||
case err := <-resultCh:
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
case <-timer.C:
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testInternString(t *testing.T) error {
|
||||
for i := 0; i < 1000; i++ {
|
||||
s := fmt.Sprintf("foo_%d", i)
|
||||
s1 := internString(s)
|
||||
if s != s1 {
|
||||
return fmt.Errorf("unexpected string returned from internString; got %q; want %q", s1, s)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestMergeLabels(t *testing.T) {
|
||||
f := func(swc *scrapeWorkConfig, target string, extraLabels, metaLabels map[string]string, resultExpected string) {
|
||||
t.Helper()
|
||||
|
|
|
@ -94,16 +94,13 @@ func (sn *ServiceNode) appendTargetLabels(ms []map[string]string, serviceName, t
|
|||
m["__meta_consul_tags"] = tagSeparator + strings.Join(sn.Service.Tags, tagSeparator) + tagSeparator
|
||||
|
||||
for k, v := range sn.Node.Meta {
|
||||
key := discoveryutils.SanitizeLabelName(k)
|
||||
m["__meta_consul_metadata_"+key] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_consul_metadata_"+k)] = v
|
||||
}
|
||||
for k, v := range sn.Service.Meta {
|
||||
key := discoveryutils.SanitizeLabelName(k)
|
||||
m["__meta_consul_service_metadata_"+key] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_consul_service_metadata_"+k)] = v
|
||||
}
|
||||
for k, v := range sn.Node.TaggedAddresses {
|
||||
key := discoveryutils.SanitizeLabelName(k)
|
||||
m["__meta_consul_tagged_address_"+key] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_consul_tagged_address_"+k)] = v
|
||||
}
|
||||
ms = append(ms, m)
|
||||
return ms
|
||||
|
|
|
@ -107,7 +107,7 @@ func addCommonLabels(m map[string]string, c *container, networkLabels map[string
|
|||
m["__meta_docker_container_name"] = c.Names[0]
|
||||
m["__meta_docker_container_network_mode"] = c.HostConfig.NetworkMode
|
||||
for k, v := range c.Labels {
|
||||
m["__meta_docker_container_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_docker_container_label_"+k)] = v
|
||||
}
|
||||
for k, v := range networkLabels {
|
||||
m[k] = v
|
||||
|
|
|
@ -53,7 +53,7 @@ func getNetworkLabelsByNetworkID(networks []network) map[string]map[string]strin
|
|||
"__meta_docker_network_scope": network.Scope,
|
||||
}
|
||||
for k, v := range network.Labels {
|
||||
m["__meta_docker_network_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_docker_network_label_"+k)] = v
|
||||
}
|
||||
ms[network.ID] = m
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ func getNetworkLabelsByNetworkID(networks []network) map[string]map[string]strin
|
|||
"__meta_dockerswarm_network_scope": network.Scope,
|
||||
}
|
||||
for k, v := range network.Labels {
|
||||
m["__meta_dockerswarm_network_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_dockerswarm_network_label_"+k)] = v
|
||||
}
|
||||
ms[network.ID] = m
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ func addNodeLabels(nodes []node, port int) []map[string]string {
|
|||
"__meta_dockerswarm_node_status": node.Status.State,
|
||||
}
|
||||
for k, v := range node.Spec.Labels {
|
||||
m["__meta_dockerswarm_node_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_dockerswarm_node_label_"+k)] = v
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ func addServicesLabels(services []service, networksLabels map[string]map[string]
|
|||
"__meta_dockerswarm_service_updating_status": service.UpdateStatus.State,
|
||||
}
|
||||
for k, v := range service.Spec.Labels {
|
||||
commonLabels["__meta_dockerswarm_service_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
commonLabels[discoveryutils.SanitizeLabelName("__meta_dockerswarm_service_label_"+k)] = v
|
||||
}
|
||||
for _, vip := range service.Endpoint.VirtualIPs {
|
||||
// skip services without virtual address.
|
||||
|
|
|
@ -87,7 +87,7 @@ func addTasksLabels(tasks []task, nodesLabels, servicesLabels []map[string]strin
|
|||
"__meta_dockerswarm_task_state": task.Status.State,
|
||||
}
|
||||
for k, v := range task.Spec.ContainerSpec.Labels {
|
||||
commonLabels["__meta_dockerswarm_container_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
commonLabels[discoveryutils.SanitizeLabelName("__meta_dockerswarm_container_label_"+k)] = v
|
||||
}
|
||||
var svcPorts []portConfig
|
||||
for i, v := range services {
|
||||
|
|
|
@ -186,8 +186,7 @@ func (inst *Instance) appendTargetLabels(ms []map[string]string, ownerID string,
|
|||
if len(t.Key) == 0 || len(t.Value) == 0 {
|
||||
continue
|
||||
}
|
||||
name := discoveryutils.SanitizeLabelName(t.Key)
|
||||
m["__meta_ec2_tag_"+name] = t.Value
|
||||
m[discoveryutils.SanitizeLabelName("__meta_ec2_tag_"+t.Key)] = t.Value
|
||||
}
|
||||
ms = append(ms, m)
|
||||
return ms
|
||||
|
|
|
@ -139,11 +139,11 @@ func addInstanceLabels(apps *applications) []map[string]string {
|
|||
if len(instance.DataCenterInfo.Name) > 0 {
|
||||
m["__meta_eureka_app_instance_datacenterinfo_name"] = instance.DataCenterInfo.Name
|
||||
for _, tag := range instance.DataCenterInfo.Metadata.Items {
|
||||
m["__meta_eureka_app_instance_datacenterinfo_metadata_"+discoveryutils.SanitizeLabelName(tag.XMLName.Local)] = tag.Content
|
||||
m[discoveryutils.SanitizeLabelName("__meta_eureka_app_instance_datacenterinfo_metadata_"+tag.XMLName.Local)] = tag.Content
|
||||
}
|
||||
}
|
||||
for _, tag := range instance.Metadata.Items {
|
||||
m["__meta_eureka_app_instance_metadata_"+discoveryutils.SanitizeLabelName(tag.XMLName.Local)] = tag.Content
|
||||
m[discoveryutils.SanitizeLabelName("__meta_eureka_app_instance_metadata_"+tag.XMLName.Local)] = tag.Content
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
|
|
|
@ -150,8 +150,7 @@ func (inst *Instance) appendTargetLabels(ms []map[string]string, project, tagSep
|
|||
"__meta_gce_zone": inst.Zone,
|
||||
}
|
||||
for _, iface := range inst.NetworkInterfaces {
|
||||
ifaceName := discoveryutils.SanitizeLabelName(iface.Name)
|
||||
m["__meta_gce_interface_ipv4_"+ifaceName] = iface.NetworkIP
|
||||
m[discoveryutils.SanitizeLabelName("__meta_gce_interface_ipv4_"+iface.Name)] = iface.NetworkIP
|
||||
}
|
||||
if len(inst.Tags.Items) > 0 {
|
||||
// We surround the separated list with the separator as well. This way regular expressions
|
||||
|
@ -159,12 +158,10 @@ func (inst *Instance) appendTargetLabels(ms []map[string]string, project, tagSep
|
|||
m["__meta_gce_tags"] = tagSeparator + strings.Join(inst.Tags.Items, tagSeparator) + tagSeparator
|
||||
}
|
||||
for _, item := range inst.Metadata.Items {
|
||||
key := discoveryutils.SanitizeLabelName(item.Key)
|
||||
m["__meta_gce_metadata_"+key] = item.Value
|
||||
m[discoveryutils.SanitizeLabelName("__meta_gce_metadata_"+item.Key)] = item.Value
|
||||
}
|
||||
for _, label := range inst.Labels {
|
||||
name := discoveryutils.SanitizeLabelName(label.Name)
|
||||
m["__meta_gce_label_"+name] = label.Value
|
||||
m[discoveryutils.SanitizeLabelName("__meta_gce_label_"+label.Name)] = label.Value
|
||||
}
|
||||
if len(iface.AccessConfigs) > 0 {
|
||||
ac := iface.AccessConfigs[0]
|
||||
|
|
|
@ -28,14 +28,12 @@ type ListMeta struct {
|
|||
|
||||
func (om *ObjectMeta) registerLabelsAndAnnotations(prefix string, m map[string]string) {
|
||||
for _, lb := range om.Labels {
|
||||
ln := discoveryutils.SanitizeLabelName(lb.Name)
|
||||
m[prefix+"_label_"+ln] = lb.Value
|
||||
m[prefix+"_labelpresent_"+ln] = "true"
|
||||
m[discoveryutils.SanitizeLabelName(prefix+"_label_"+lb.Name)] = lb.Value
|
||||
m[discoveryutils.SanitizeLabelName(prefix+"_labelpresent_"+lb.Name)] = "true"
|
||||
}
|
||||
for _, a := range om.Annotations {
|
||||
an := discoveryutils.SanitizeLabelName(a.Name)
|
||||
m[prefix+"_annotation_"+an] = a.Value
|
||||
m[prefix+"_annotationpresent_"+an] = "true"
|
||||
m[discoveryutils.SanitizeLabelName(prefix+"_annotation_"+a.Name)] = a.Value
|
||||
m[discoveryutils.SanitizeLabelName(prefix+"_annotationpresent_"+a.Name)] = "true"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -151,8 +151,8 @@ func getEndpointSliceLabels(eps *EndpointSlice, addr string, ea Endpoint, epp En
|
|||
m["__meta_kubernetes_endpointslice_endpoint_hostname"] = ea.Hostname
|
||||
}
|
||||
for k, v := range ea.Topology {
|
||||
m["__meta_kubernetes_endpointslice_endpoint_topology_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
m["__meta_kubernetes_endpointslice_endpoint_topology_present_"+discoveryutils.SanitizeLabelName(k)] = "true"
|
||||
m[discoveryutils.SanitizeLabelName("__meta_kubernetes_endpointslice_endpoint_topology_"+k)] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_kubernetes_endpointslice_endpoint_topology_present_"+k)] = "true"
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
|
|
@ -104,8 +104,7 @@ func (n *Node) getTargetLabels(gw *groupWatcher) []map[string]string {
|
|||
continue
|
||||
}
|
||||
addrTypesUsed[a.Type] = true
|
||||
ln := discoveryutils.SanitizeLabelName(a.Type)
|
||||
m["__meta_kubernetes_node_address_"+ln] = a.Address
|
||||
m[discoveryutils.SanitizeLabelName("__meta_kubernetes_node_address_"+a.Type)] = a.Address
|
||||
}
|
||||
return []map[string]string{m}
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ func addInstanceLabels(servers []server, port int) []map[string]string {
|
|||
"__meta_openstack_instance_flavor": server.Flavor.ID,
|
||||
}
|
||||
for k, v := range server.Metadata {
|
||||
m["__meta_openstack_tag_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_openstack_tag_"+k)] = v
|
||||
}
|
||||
// Traverse server.Addresses in alphabetical order of pool name
|
||||
// in order to return targets in deterministic order.
|
||||
|
|
|
@ -51,7 +51,7 @@ func addInstanceLabels(instances []instance) []map[string]string {
|
|||
"__meta_yandexcloud_folder_id": server.FolderID,
|
||||
}
|
||||
for k, v := range server.Labels {
|
||||
m["__meta_yandexcloud_instance_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
m[discoveryutils.SanitizeLabelName("__meta_yandexcloud_instance_label_"+k)] = v
|
||||
}
|
||||
|
||||
for _, ni := range server.NetworkInterfaces {
|
||||
|
|
35
lib/promscrape/discoveryutils/internstring.go
Normal file
35
lib/promscrape/discoveryutils/internstring.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package discoveryutils
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// InternString returns interned s.
|
||||
//
|
||||
// This may be needed for reducing the amounts of allocated memory.
|
||||
func InternString(s string) string {
|
||||
m := internStringsMap.Load().(*sync.Map)
|
||||
if v, ok := m.Load(s); ok {
|
||||
sp := v.(*string)
|
||||
return *sp
|
||||
}
|
||||
// Make a new copy for s in order to remove references from possible bigger string s refers to.
|
||||
sCopy := string(append([]byte{}, s...))
|
||||
m.Store(sCopy, &sCopy)
|
||||
n := atomic.AddUint64(&internStringsMapLen, 1)
|
||||
if n > 100e3 {
|
||||
atomic.StoreUint64(&internStringsMapLen, 0)
|
||||
internStringsMap.Store(&sync.Map{})
|
||||
}
|
||||
return sCopy
|
||||
}
|
||||
|
||||
var (
|
||||
internStringsMap atomic.Value
|
||||
internStringsMapLen uint64
|
||||
)
|
||||
|
||||
func init() {
|
||||
internStringsMap.Store(&sync.Map{})
|
||||
}
|
45
lib/promscrape/discoveryutils/internstring_test.go
Normal file
45
lib/promscrape/discoveryutils/internstring_test.go
Normal file
|
@ -0,0 +1,45 @@
|
|||
package discoveryutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestInternStringSerial(t *testing.T) {
|
||||
if err := testInternString(t); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternStringConcurrent(t *testing.T) {
|
||||
concurrency := 5
|
||||
resultCh := make(chan error, concurrency)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
resultCh <- testInternString(t)
|
||||
}()
|
||||
}
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
select {
|
||||
case err := <-resultCh:
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
case <-timer.C:
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testInternString(t *testing.T) error {
|
||||
for i := 0; i < 1000; i++ {
|
||||
s := fmt.Sprintf("foo_%d", i)
|
||||
s1 := InternString(s)
|
||||
if s != s1 {
|
||||
return fmt.Errorf("unexpected string returned from internString; got %q; want %q", s1, s)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package promscrape
|
||||
package discoveryutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -15,7 +15,7 @@ func BenchmarkInternString(b *testing.B) {
|
|||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for _, s := range a {
|
||||
sResult := internString(s)
|
||||
sResult := InternString(s)
|
||||
if sResult != s {
|
||||
panic(fmt.Sprintf("unexpected string obtained; got %q; want %q", sResult, s))
|
||||
}
|
|
@ -6,6 +6,8 @@ import (
|
|||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
@ -15,13 +17,44 @@ import (
|
|||
//
|
||||
// This has been copied from Prometheus sources at util/strutil/strconv.go
|
||||
func SanitizeLabelName(name string) string {
|
||||
return invalidLabelCharRE.ReplaceAllString(name, "_")
|
||||
m := sanitizedLabelNames.Load().(*sync.Map)
|
||||
v, ok := m.Load(name)
|
||||
if ok {
|
||||
// Fast path - the sanitized label name is found in the cache.
|
||||
sp := v.(*string)
|
||||
return *sp
|
||||
}
|
||||
// Slow path - sanitize name and store it in the cache.
|
||||
sanitizedName := invalidLabelCharRE.ReplaceAllString(name, "_")
|
||||
// Make a copy of name in order to limit memory usage to the name length,
|
||||
// since the name may point to bigger string.
|
||||
s := string(append([]byte{}, name...))
|
||||
if sanitizedName == name {
|
||||
// point sanitizedName to just allocated s, since it may point to name,
|
||||
// which, in turn, can point to bigger string.
|
||||
sanitizedName = s
|
||||
}
|
||||
sp := &sanitizedName
|
||||
m.Store(s, sp)
|
||||
n := atomic.AddUint64(&sanitizedLabelNamesLen, 1)
|
||||
if n > 100e3 {
|
||||
atomic.StoreUint64(&sanitizedLabelNamesLen, 0)
|
||||
sanitizedLabelNames.Store(&sync.Map{})
|
||||
}
|
||||
return sanitizedName
|
||||
}
|
||||
|
||||
var (
|
||||
sanitizedLabelNames atomic.Value
|
||||
sanitizedLabelNamesLen uint64
|
||||
|
||||
invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
sanitizedLabelNames.Store(&sync.Map{})
|
||||
}
|
||||
|
||||
// JoinHostPort returns host:port.
|
||||
//
|
||||
// Host may be dns name, ipv4 or ipv6 address.
|
||||
|
|
56
lib/promscrape/discoveryutils/utils_test.go
Normal file
56
lib/promscrape/discoveryutils/utils_test.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package discoveryutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSanitizeLabelNameSerial(t *testing.T) {
|
||||
if err := testSanitizeLabelName(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeLabelNameParallel(t *testing.T) {
|
||||
goroutines := 5
|
||||
ch := make(chan error, goroutines)
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func() {
|
||||
ch <- testSanitizeLabelName()
|
||||
}()
|
||||
}
|
||||
tch := time.After(5 * time.Second)
|
||||
for i := 0; i < goroutines; i++ {
|
||||
select {
|
||||
case <-tch:
|
||||
t.Fatalf("timeout!")
|
||||
case err := <-ch:
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testSanitizeLabelName() error {
|
||||
f := func(name, expectedSanitizedName string) error {
|
||||
for i := 0; i < 5; i++ {
|
||||
sanitizedName := SanitizeLabelName(name)
|
||||
if sanitizedName != expectedSanitizedName {
|
||||
return fmt.Errorf("unexpected sanitized label name %q; got %q; want %q", name, sanitizedName, expectedSanitizedName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := f("", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f("foo", "foo"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f("foo-bar/baz", "foo_bar_baz"); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
21
lib/promscrape/discoveryutils/utils_timing_test.go
Normal file
21
lib/promscrape/discoveryutils/utils_timing_test.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
package discoveryutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkSanitizeLabelName(b *testing.B) {
|
||||
labelName := "foo-bar/baz/aaaa+bbb"
|
||||
expectedLabelNameSanitized := "foo_bar_baz_aaaa_bbb"
|
||||
b.SetBytes(1)
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
labelNameSanitized := SanitizeLabelName(labelName)
|
||||
if labelNameSanitized != expectedLabelNameSanitized {
|
||||
panic(fmt.Errorf("unexpected sanitized label name; got %q; want %q", labelNameSanitized, expectedLabelNameSanitized))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
126
lib/regexutil/promregex.go
Normal file
126
lib/regexutil/promregex.go
Normal file
|
@ -0,0 +1,126 @@
|
|||
package regexutil
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PromRegex implements an optimized string matching for Prometheus-like regex.
|
||||
//
|
||||
// The following regexs are optimized:
|
||||
//
|
||||
// - plain string such as "foobar"
|
||||
// - alternate strings such as "foo|bar|baz"
|
||||
// - prefix match such as "foo.*" or "foo.+"
|
||||
// - substring match such as ".*foo.*" or ".+bar.+"
|
||||
type PromRegex struct {
|
||||
// prefix contains literal prefix for regex.
|
||||
// For example, prefix="foo" for regex="foo(a|b)"
|
||||
prefix string
|
||||
|
||||
// Suffix contains regex suffix left after removing the prefix.
|
||||
// For example, suffix="a|b" for regex="foo(a|b)"
|
||||
suffix string
|
||||
|
||||
// substrDotStar contains literal string for regex suffix=".*string.*"
|
||||
substrDotStar string
|
||||
|
||||
// substrDotPlus contains literal string for regex suffix=".+string.+"
|
||||
substrDotPlus string
|
||||
|
||||
// orValues contains or values for the suffix regex.
|
||||
// For example, orValues contain ["foo","bar","baz"] for regex suffix="foo|bar|baz"
|
||||
orValues []string
|
||||
|
||||
// reSuffix contains an anchored regexp built from suffix:
|
||||
// "^(?:suffix)$"
|
||||
reSuffix *regexp.Regexp
|
||||
}
|
||||
|
||||
// NewPromRegex returns PromRegex for the given expr.
|
||||
func NewPromRegex(expr string) (*PromRegex, error) {
|
||||
if _, err := regexp.Compile(expr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prefix, suffix := Simplify(expr)
|
||||
orValues := GetOrValues(suffix)
|
||||
substrDotStar := getSubstringLiteral(suffix, ".*")
|
||||
substrDotPlus := getSubstringLiteral(suffix, ".+")
|
||||
// It is expected that Optimize returns valid regexp in suffix, so use MustCompile here.
|
||||
// Anchor suffix to the beginning and the end of the matching string.
|
||||
suffixExpr := "^(?:" + suffix + ")$"
|
||||
reSuffix := regexp.MustCompile(suffixExpr)
|
||||
pr := &PromRegex{
|
||||
prefix: prefix,
|
||||
suffix: suffix,
|
||||
substrDotStar: substrDotStar,
|
||||
substrDotPlus: substrDotPlus,
|
||||
orValues: orValues,
|
||||
reSuffix: reSuffix,
|
||||
}
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
// HasPrefix returns true if pr contains non-empty literal prefix.
|
||||
//
|
||||
// For example, if pr is "foo(bar|baz)", then the prefix is "foo",
|
||||
// so HasPrefix() returns true.
|
||||
func (pr *PromRegex) HasPrefix() bool {
|
||||
return len(pr.prefix) > 0
|
||||
}
|
||||
|
||||
// MatchString retruns true if s matches pr.
|
||||
//
|
||||
// The pr is automatically anchored to the beginning and to the end
|
||||
// of the matching string with '^' and '$'.
|
||||
func (pr *PromRegex) MatchString(s string) bool {
|
||||
if !strings.HasPrefix(s, pr.prefix) {
|
||||
// Fast path - s has another prefix than pr.
|
||||
return false
|
||||
}
|
||||
s = s[len(pr.prefix):]
|
||||
if len(pr.orValues) > 0 {
|
||||
// Fast path - pr contains only alternate strings such as 'foo|bar|baz'
|
||||
for _, v := range pr.orValues {
|
||||
if s == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if pr.substrDotStar != "" {
|
||||
// Fast path - pr contains ".*someText.*"
|
||||
return strings.Contains(s, pr.substrDotStar)
|
||||
}
|
||||
if pr.substrDotPlus != "" {
|
||||
// Fast path - pr contains ".+someText.+"
|
||||
n := strings.Index(s, pr.substrDotPlus)
|
||||
return n > 0 && n+len(pr.substrDotPlus) < len(s)
|
||||
}
|
||||
switch pr.suffix {
|
||||
case ".*":
|
||||
// Fast path - the pr contains "prefix.*"
|
||||
return true
|
||||
case ".+":
|
||||
// Fast path - the pr contains "prefix.+"
|
||||
return len(s) > 0
|
||||
}
|
||||
// Fall back to slow path by matching the original regexp.
|
||||
return pr.reSuffix.MatchString(s)
|
||||
}
|
||||
|
||||
func getSubstringLiteral(expr, prefixSuffix string) string {
|
||||
if !strings.HasPrefix(expr, prefixSuffix) {
|
||||
return ""
|
||||
}
|
||||
expr = expr[len(prefixSuffix):]
|
||||
if !strings.HasSuffix(expr, prefixSuffix) {
|
||||
return ""
|
||||
}
|
||||
expr = expr[:len(expr)-len(prefixSuffix)]
|
||||
prefix, suffix := Simplify(expr)
|
||||
if suffix != "" {
|
||||
return ""
|
||||
}
|
||||
return prefix
|
||||
}
|
92
lib/regexutil/promregex_test.go
Normal file
92
lib/regexutil/promregex_test.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
package regexutil
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPromRegexParseFailure(t *testing.T) {
|
||||
f := func(expr string) {
|
||||
t.Helper()
|
||||
pr, err := NewPromRegex(expr)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error for expr=%s", expr)
|
||||
}
|
||||
if pr != nil {
|
||||
t.Fatalf("expecting nil pr for expr=%s", expr)
|
||||
}
|
||||
}
|
||||
f("fo[bar")
|
||||
f("foo(bar")
|
||||
}
|
||||
|
||||
func TestPromRegex(t *testing.T) {
|
||||
f := func(expr, s string, resultExpected bool) {
|
||||
t.Helper()
|
||||
pr, err := NewPromRegex(expr)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
result := pr.MatchString(s)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result when matching %s against %s; got %v; want %v", expr, s, result, resultExpected)
|
||||
}
|
||||
|
||||
// Make sure the result is the same for regular regexp
|
||||
exprAnchored := "^(?:" + expr + ")$"
|
||||
re := regexp.MustCompile(exprAnchored)
|
||||
result = re.MatchString(s)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result when matching %s against %s during sanity check; got %v; want %v", exprAnchored, s, result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("", "", true)
|
||||
f("", "foo", false)
|
||||
f("foo", "", false)
|
||||
f(".*", "", true)
|
||||
f(".*", "foo", true)
|
||||
f(".+", "", false)
|
||||
f(".+", "foo", true)
|
||||
f("foo.*", "bar", false)
|
||||
f("foo.*", "foo", true)
|
||||
f("foo.*", "foobar", true)
|
||||
f("foo.+", "bar", false)
|
||||
f("foo.+", "foo", false)
|
||||
f("foo.+", "foobar", true)
|
||||
f("foo|bar", "", false)
|
||||
f("foo|bar", "a", false)
|
||||
f("foo|bar", "foo", true)
|
||||
f("foo|bar", "bar", true)
|
||||
f("foo|bar", "foobar", false)
|
||||
f("foo(bar|baz)", "a", false)
|
||||
f("foo(bar|baz)", "foobar", true)
|
||||
f("foo(bar|baz)", "foobaz", true)
|
||||
f("foo(bar|baz)", "foobaza", false)
|
||||
f("foo(bar|baz)", "foobal", false)
|
||||
f("^foo|b(ar)$", "foo", true)
|
||||
f("^foo|b(ar)$", "bar", true)
|
||||
f("^foo|b(ar)$", "ar", false)
|
||||
f(".*foo.*", "foo", true)
|
||||
f(".*foo.*", "afoobar", true)
|
||||
f(".*foo.*", "abc", false)
|
||||
f("foo.*bar.*", "foobar", true)
|
||||
f("foo.*bar.*", "foo_bar_", true)
|
||||
f("foo.*bar.*", "foobaz", false)
|
||||
f(".+foo.+", "foo", false)
|
||||
f(".+foo.+", "afoobar", true)
|
||||
f(".+foo.+", "afoo", false)
|
||||
f(".+foo.+", "abc", false)
|
||||
f("foo.+bar.+", "foobar", false)
|
||||
f("foo.+bar.+", "foo_bar_", true)
|
||||
f("foo.+bar.+", "foobaz", false)
|
||||
f(".+foo.*", "foo", false)
|
||||
f(".+foo.*", "afoo", true)
|
||||
f(".+foo.*", "afoobar", true)
|
||||
f(".*(a|b).*", "a", true)
|
||||
f(".*(a|b).*", "ax", true)
|
||||
f(".*(a|b).*", "xa", true)
|
||||
f(".*(a|b).*", "xay", true)
|
||||
f(".*(a|b).*", "xzy", false)
|
||||
f("^(?:true)$", "true", true)
|
||||
f("^(?:true)$", "false", false)
|
||||
}
|
111
lib/regexutil/promregex_timing_test.go
Normal file
111
lib/regexutil/promregex_timing_test.go
Normal file
|
@ -0,0 +1,111 @@
|
|||
package regexutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkPromRegexMatchString(b *testing.B) {
|
||||
b.Run("unpotimized-noprefix-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "xbar.*|baz", "xbarz", true)
|
||||
})
|
||||
b.Run("unpotimized-noprefix-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "xbar.*|baz", "zfoobarz", false)
|
||||
})
|
||||
b.Run("unpotimized-prefix-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo(bar.*|baz)", "foobarz", true)
|
||||
})
|
||||
b.Run("unpotimized-prefix-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo(bar.*|baz)", "zfoobarz", false)
|
||||
})
|
||||
b.Run("dot-star-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, ".*", "foo", true)
|
||||
})
|
||||
b.Run("dot-plus-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, ".+", "foo", true)
|
||||
})
|
||||
b.Run("dot-plus-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, ".+", "", false)
|
||||
})
|
||||
b.Run("literal-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo", "foo", true)
|
||||
})
|
||||
b.Run("literal-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo", "bar", false)
|
||||
})
|
||||
b.Run("prefix-dot-star-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo.*", "foobar", true)
|
||||
})
|
||||
b.Run("prefix-dot-star-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo.*", "afoobar", false)
|
||||
})
|
||||
b.Run("prefix-dot-plus-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo.+", "foobar", true)
|
||||
})
|
||||
b.Run("prefix-dot-plus-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo.+", "afoobar", false)
|
||||
})
|
||||
b.Run("or-values-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo|bar|baz", "baz", true)
|
||||
})
|
||||
b.Run("or-values-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "foo|bar|baz", "abaz", false)
|
||||
})
|
||||
b.Run("prefix-or-values-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "x(foo|bar|baz)", "xbaz", true)
|
||||
})
|
||||
b.Run("prefix-or-values-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "x(foo|bar|baz)", "abaz", false)
|
||||
})
|
||||
b.Run("substring-dot-star-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, ".*foo.*", "afoobar", true)
|
||||
})
|
||||
b.Run("substring-dot-star-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, ".*foo.*", "abarbaz", false)
|
||||
})
|
||||
b.Run("substring-dot-plus-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, ".+foo.+", "afoobar", true)
|
||||
})
|
||||
b.Run("substring-dot-plus-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, ".+foo.+", "abarbaz", false)
|
||||
})
|
||||
b.Run("prefix-substring-dot-star-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "a.*foo.*", "afoobar", true)
|
||||
})
|
||||
b.Run("prefix-substring-dot-star-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "a.*foo.*", "abarbaz", false)
|
||||
})
|
||||
b.Run("prefix-substring-dot-plus-match", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "a.+foo.+", "abfoobar", true)
|
||||
})
|
||||
b.Run("prefix-substring-dot-plus-mismatch", func(b *testing.B) {
|
||||
benchmarkPromRegexMatchString(b, "a.+foo.+", "abarbaz", false)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkPromRegexMatchString(b *testing.B, expr, s string, resultExpected bool) {
|
||||
pr, err := NewPromRegex(expr)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
}
|
||||
re := regexp.MustCompile("^(?:" + expr + ")$")
|
||||
f := func(b *testing.B, matchString func(s string) bool) {
|
||||
b.SetBytes(1)
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
result := matchString(s)
|
||||
if result != resultExpected {
|
||||
panic(fmt.Errorf("unexpected result when matching %s against %s; got %v; want %v", s, expr, result, resultExpected))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
b.Run("PromRegex", func(b *testing.B) {
|
||||
f(b, pr.MatchString)
|
||||
})
|
||||
b.Run("StandardRegex", func(b *testing.B) {
|
||||
f(b, re.MatchString)
|
||||
})
|
||||
}
|
259
lib/regexutil/regexutil.go
Normal file
259
lib/regexutil/regexutil.go
Normal file
|
@ -0,0 +1,259 @@
|
|||
package regexutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp/syntax"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RemoveStartEndAnchors removes '^' at the start of expr and '$' at the end of the expr.
|
||||
func RemoveStartEndAnchors(expr string) string {
|
||||
for strings.HasPrefix(expr, "^") {
|
||||
expr = expr[1:]
|
||||
}
|
||||
for strings.HasSuffix(expr, "$") {
|
||||
expr = expr[:len(expr)-1]
|
||||
}
|
||||
return expr
|
||||
}
|
||||
|
||||
// GetOrValues returns "or" values from the given regexp expr.
|
||||
//
|
||||
// It ignores start and end anchors ('^') and ('$') at the start and the end of expr.
|
||||
// It returns ["foo", "bar"] for "foo|bar" regexp.
|
||||
// It returns ["foo"] for "foo" regexp.
|
||||
// It returns [""] for "" regexp.
|
||||
// It returns an empty list if it is impossible to extract "or" values from the regexp.
|
||||
func GetOrValues(expr string) []string {
|
||||
expr = RemoveStartEndAnchors(expr)
|
||||
prefix, tailExpr := Simplify(expr)
|
||||
if tailExpr == "" {
|
||||
return []string{prefix}
|
||||
}
|
||||
sre, err := syntax.Parse(tailExpr, syntax.Perl)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: unexpected error when parsing verified tailExpr=%q: %w", tailExpr, err))
|
||||
}
|
||||
orValues := getOrValuesExt(sre)
|
||||
|
||||
// Sort orValues for faster index seek later
|
||||
sort.Strings(orValues)
|
||||
|
||||
if len(prefix) > 0 {
|
||||
// Add prefix to orValues
|
||||
for i, orValue := range orValues {
|
||||
orValues[i] = prefix + orValue
|
||||
}
|
||||
}
|
||||
|
||||
return orValues
|
||||
}
|
||||
|
||||
func getOrValuesExt(sre *syntax.Regexp) []string {
|
||||
switch sre.Op {
|
||||
case syntax.OpCapture:
|
||||
return getOrValuesExt(sre.Sub[0])
|
||||
case syntax.OpLiteral:
|
||||
if !isLiteral(sre) {
|
||||
return nil
|
||||
}
|
||||
return []string{string(sre.Rune)}
|
||||
case syntax.OpEmptyMatch:
|
||||
return []string{""}
|
||||
case syntax.OpAlternate:
|
||||
a := make([]string, 0, len(sre.Sub))
|
||||
for _, reSub := range sre.Sub {
|
||||
ca := getOrValuesExt(reSub)
|
||||
if len(ca) == 0 {
|
||||
return nil
|
||||
}
|
||||
a = append(a, ca...)
|
||||
if len(a) > maxOrValues {
|
||||
// It is cheaper to use regexp here.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return a
|
||||
case syntax.OpCharClass:
|
||||
a := make([]string, 0, len(sre.Rune)/2)
|
||||
for i := 0; i < len(sre.Rune); i += 2 {
|
||||
start := sre.Rune[i]
|
||||
end := sre.Rune[i+1]
|
||||
for start <= end {
|
||||
a = append(a, string(start))
|
||||
start++
|
||||
if len(a) > maxOrValues {
|
||||
// It is cheaper to use regexp here.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return a
|
||||
case syntax.OpConcat:
|
||||
if len(sre.Sub) < 1 {
|
||||
return []string{""}
|
||||
}
|
||||
prefixes := getOrValuesExt(sre.Sub[0])
|
||||
if len(prefixes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sre.Sub) == 1 {
|
||||
return prefixes
|
||||
}
|
||||
sre.Sub = sre.Sub[1:]
|
||||
suffixes := getOrValuesExt(sre)
|
||||
if len(suffixes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(prefixes)*len(suffixes) > maxOrValues {
|
||||
// It is cheaper to use regexp here.
|
||||
return nil
|
||||
}
|
||||
a := make([]string, 0, len(prefixes)*len(suffixes))
|
||||
for _, prefix := range prefixes {
|
||||
for _, suffix := range suffixes {
|
||||
s := prefix + suffix
|
||||
a = append(a, s)
|
||||
}
|
||||
}
|
||||
return a
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func isLiteral(sre *syntax.Regexp) bool {
|
||||
if sre.Op == syntax.OpCapture {
|
||||
return isLiteral(sre.Sub[0])
|
||||
}
|
||||
return sre.Op == syntax.OpLiteral && sre.Flags&syntax.FoldCase == 0
|
||||
}
|
||||
|
||||
const maxOrValues = 100
|
||||
|
||||
// Simplify simplifies the given expr.
|
||||
//
|
||||
// It returns plaintext prefix and the remaining regular expression
|
||||
// with dropped '^' and '$' anchors at the beginning and the end
|
||||
// of the regular expression.
|
||||
//
|
||||
// The function removes capturing parens from the expr,
|
||||
// so it cannot be used when capturing parens are necessary.
|
||||
func Simplify(expr string) (string, string) {
|
||||
sre, err := syntax.Parse(expr, syntax.Perl)
|
||||
if err != nil {
|
||||
// Cannot parse the regexp. Return it all as prefix.
|
||||
return expr, ""
|
||||
}
|
||||
sre = simplifyRegexp(sre, false)
|
||||
if sre == emptyRegexp {
|
||||
return "", ""
|
||||
}
|
||||
if isLiteral(sre) {
|
||||
return string(sre.Rune), ""
|
||||
}
|
||||
var prefix string
|
||||
if sre.Op == syntax.OpConcat {
|
||||
sub0 := sre.Sub[0]
|
||||
if isLiteral(sub0) {
|
||||
prefix = string(sub0.Rune)
|
||||
sre.Sub = sre.Sub[1:]
|
||||
if len(sre.Sub) == 0 {
|
||||
return prefix, ""
|
||||
}
|
||||
sre = simplifyRegexp(sre, true)
|
||||
}
|
||||
}
|
||||
if _, err := syntax.Compile(sre); err != nil {
|
||||
// Cannot compile the regexp. Return it all as prefix.
|
||||
return expr, ""
|
||||
}
|
||||
s := sre.String()
|
||||
s = strings.ReplaceAll(s, "(?:)", "")
|
||||
s = strings.ReplaceAll(s, "(?-s:.)", ".")
|
||||
s = strings.ReplaceAll(s, "(?-m:$)", "$")
|
||||
return prefix, s
|
||||
}
|
||||
|
||||
func simplifyRegexp(sre *syntax.Regexp, hasPrefix bool) *syntax.Regexp {
|
||||
s := sre.String()
|
||||
for {
|
||||
sre = simplifyRegexpExt(sre, hasPrefix, false)
|
||||
sre = sre.Simplify()
|
||||
if sre.Op == syntax.OpBeginText || sre.Op == syntax.OpEndText {
|
||||
sre = emptyRegexp
|
||||
}
|
||||
sNew := sre.String()
|
||||
if sNew == s {
|
||||
return sre
|
||||
}
|
||||
var err error
|
||||
sre, err = syntax.Parse(sNew, syntax.Perl)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot parse simplified regexp %q: %w", sNew, err))
|
||||
}
|
||||
s = sNew
|
||||
}
|
||||
}
|
||||
|
||||
func simplifyRegexpExt(sre *syntax.Regexp, hasPrefix, hasSuffix bool) *syntax.Regexp {
|
||||
switch sre.Op {
|
||||
case syntax.OpCapture:
|
||||
// Substitute all the capture regexps with non-capture regexps.
|
||||
sre.Op = syntax.OpAlternate
|
||||
sre.Sub[0] = simplifyRegexpExt(sre.Sub[0], hasPrefix, hasSuffix)
|
||||
if sre.Sub[0] == emptyRegexp {
|
||||
return emptyRegexp
|
||||
}
|
||||
return sre
|
||||
case syntax.OpStar, syntax.OpPlus, syntax.OpQuest, syntax.OpRepeat:
|
||||
sre.Sub[0] = simplifyRegexpExt(sre.Sub[0], hasPrefix, hasSuffix)
|
||||
if sre.Sub[0] == emptyRegexp {
|
||||
return emptyRegexp
|
||||
}
|
||||
return sre
|
||||
case syntax.OpAlternate:
|
||||
// Do not remove empty captures from OpAlternate, since this may break regexp.
|
||||
for i, sub := range sre.Sub {
|
||||
sre.Sub[i] = simplifyRegexpExt(sub, hasPrefix, hasSuffix)
|
||||
}
|
||||
return sre
|
||||
case syntax.OpConcat:
|
||||
subs := sre.Sub[:0]
|
||||
for i, sub := range sre.Sub {
|
||||
sub = simplifyRegexpExt(sub, hasPrefix || len(subs) > 0, hasSuffix || i+1 < len(sre.Sub))
|
||||
if sub != emptyRegexp {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
}
|
||||
sre.Sub = subs
|
||||
// Remove anchros from the beginning and the end of regexp, since they
|
||||
// will be added later.
|
||||
if !hasPrefix {
|
||||
for len(sre.Sub) > 0 && sre.Sub[0].Op == syntax.OpBeginText {
|
||||
sre.Sub = sre.Sub[1:]
|
||||
}
|
||||
}
|
||||
if !hasSuffix {
|
||||
for len(sre.Sub) > 0 && sre.Sub[len(sre.Sub)-1].Op == syntax.OpEndText {
|
||||
sre.Sub = sre.Sub[:len(sre.Sub)-1]
|
||||
}
|
||||
}
|
||||
if len(sre.Sub) == 0 {
|
||||
return emptyRegexp
|
||||
}
|
||||
if len(sre.Sub) == 1 {
|
||||
return sre.Sub[0]
|
||||
}
|
||||
return sre
|
||||
case syntax.OpEmptyMatch:
|
||||
return emptyRegexp
|
||||
default:
|
||||
return sre
|
||||
}
|
||||
}
|
||||
|
||||
var emptyRegexp = &syntax.Regexp{
|
||||
Op: syntax.OpEmptyMatch,
|
||||
}
|
112
lib/regexutil/regexutil_test.go
Normal file
112
lib/regexutil/regexutil_test.go
Normal file
|
@ -0,0 +1,112 @@
|
|||
package regexutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetOrValues(t *testing.T) {
|
||||
f := func(s string, valuesExpected []string) {
|
||||
t.Helper()
|
||||
values := GetOrValues(s)
|
||||
if !reflect.DeepEqual(values, valuesExpected) {
|
||||
t.Fatalf("unexpected values for s=%q; got %q; want %q", s, values, valuesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", []string{""})
|
||||
f("foo", []string{"foo"})
|
||||
f("^foo$", []string{"foo"})
|
||||
f("|foo", []string{"", "foo"})
|
||||
f("|foo|", []string{"", "", "foo"})
|
||||
f("foo.+", nil)
|
||||
f("foo.*", nil)
|
||||
f(".*", nil)
|
||||
f("foo|.*", nil)
|
||||
f("(fo((o)))|(bar)", []string{"bar", "foo"})
|
||||
f("foobar", []string{"foobar"})
|
||||
f("z|x|c", []string{"c", "x", "z"})
|
||||
f("foo|bar", []string{"bar", "foo"})
|
||||
f("(foo|bar)", []string{"bar", "foo"})
|
||||
f("(foo|bar)baz", []string{"barbaz", "foobaz"})
|
||||
f("[a-z][a-z]", nil)
|
||||
f("[a-d]", []string{"a", "b", "c", "d"})
|
||||
f("x[a-d]we", []string{"xawe", "xbwe", "xcwe", "xdwe"})
|
||||
f("foo(bar|baz)", []string{"foobar", "foobaz"})
|
||||
f("foo(ba[rz]|(xx|o))", []string{"foobar", "foobaz", "fooo", "fooxx"})
|
||||
f("foo(?:bar|baz)x(qwe|rt)", []string{"foobarxqwe", "foobarxrt", "foobazxqwe", "foobazxrt"})
|
||||
f("foo(bar||baz)", []string{"foo", "foobar", "foobaz"})
|
||||
f("(a|b|c)(d|e|f|0|1|2)(g|h|k|x|y|z)", nil)
|
||||
f("(?i)foo", nil)
|
||||
f("(?i)(foo|bar)", nil)
|
||||
f("^foo|bar$", []string{"bar", "foo"})
|
||||
f("^(foo|bar)$", []string{"bar", "foo"})
|
||||
f("^a(foo|b(?:a|r))$", []string{"aba", "abr", "afoo"})
|
||||
f("^a(foo$|b(?:a$|r))$", []string{"aba", "abr", "afoo"})
|
||||
f("^a(^foo|bar$)z$", nil)
|
||||
}
|
||||
|
||||
func TestSimplify(t *testing.T) {
|
||||
f := func(s, expectedPrefix, expectedSuffix string) {
|
||||
t.Helper()
|
||||
prefix, suffix := Simplify(s)
|
||||
if prefix != expectedPrefix {
|
||||
t.Fatalf("unexpected prefix for s=%q; got %q; want %q", s, prefix, expectedPrefix)
|
||||
}
|
||||
if suffix != expectedSuffix {
|
||||
t.Fatalf("unexpected suffix for s=%q; got %q; want %q", s, suffix, expectedSuffix)
|
||||
}
|
||||
}
|
||||
|
||||
f("", "", "")
|
||||
f("^", "", "")
|
||||
f("$", "", "")
|
||||
f("^()$", "", "")
|
||||
f("^(?:)$", "", "")
|
||||
f("^foo|^bar$|baz", "", "foo|ba[rz]")
|
||||
f("^(foo$|^bar)$", "", "foo|bar")
|
||||
f("^a(foo$|bar)$", "a", "foo|bar")
|
||||
f("^a(^foo|bar$)z$", "a", "(?:\\Afoo|bar$)z")
|
||||
f("foobar", "foobar", "")
|
||||
f("foo$|^foobar", "foo", "|bar")
|
||||
f("^(foo$|^foobar)$", "foo", "|bar")
|
||||
f("foobar|foobaz", "fooba", "[rz]")
|
||||
f("(fo|(zar|bazz)|x)", "", "fo|zar|bazz|x")
|
||||
f("(тестЧЧ|тест)", "тест", "ЧЧ|")
|
||||
f("foo(bar|baz|bana)", "fooba", "[rz]|na")
|
||||
f("^foobar|foobaz", "fooba", "[rz]")
|
||||
f("^foobar|^foobaz$", "fooba", "[rz]")
|
||||
f("foobar|foobaz", "fooba", "[rz]")
|
||||
f("(?:^foobar|^foobaz)aa.*", "fooba", "[rz]aa.*")
|
||||
f("foo[bar]+", "foo", "[a-br]+")
|
||||
f("foo[a-z]+", "foo", "[a-z]+")
|
||||
f("foo[bar]*", "foo", "[a-br]*")
|
||||
f("foo[a-z]*", "foo", "[a-z]*")
|
||||
f("foo[x]+", "foo", "x+")
|
||||
f("foo[^x]+", "foo", "[^x]+")
|
||||
f("foo[x]*", "foo", "x*")
|
||||
f("foo[^x]*", "foo", "[^x]*")
|
||||
f("foo[x]*bar", "foo", "x*bar")
|
||||
f("fo\\Bo[x]*bar?", "fo", "\\Box*bar?")
|
||||
f("foo.+bar", "foo", ".+bar")
|
||||
f("a(b|c.*).+", "a", "(?:b|c.*).+")
|
||||
f("ab|ac", "a", "[b-c]")
|
||||
f("(?i)xyz", "", "(?i:XYZ)")
|
||||
f("(?i)foo|bar", "", "(?i:FOO)|(?i:BAR)")
|
||||
f("(?i)up.+x", "", "(?i:UP).+(?i:X)")
|
||||
f("(?smi)xy.*z$", "", "(?i:XY)(?s:.)*(?i:Z)(?m:$)")
|
||||
|
||||
// test invalid regexps
|
||||
f("a(", "a(", "")
|
||||
f("a[", "a[", "")
|
||||
f("a[]", "a[]", "")
|
||||
f("a{", "a{", "")
|
||||
f("a{}", "a{}", "")
|
||||
f("invalid(regexp", "invalid(regexp", "")
|
||||
|
||||
// The transformed regexp mustn't match aba
|
||||
f("a?(^ba|c)", "", "a?(?:\\Aba|c)")
|
||||
|
||||
// The transformed regexp mustn't match barx
|
||||
f("(foo|bar$)x*", "", "(?:foo|bar$)x*")
|
||||
}
|
|
@ -66,8 +66,8 @@ func (tag *Tag) copyFrom(src *Tag) {
|
|||
tag.Value = append(tag.Value[:0], src.Value...)
|
||||
}
|
||||
|
||||
func marshalTagValueNoTrailingTagSeparator(dst, src []byte) []byte {
|
||||
dst = marshalTagValue(dst, src)
|
||||
func marshalTagValueNoTrailingTagSeparator(dst []byte, src string) []byte {
|
||||
dst = marshalTagValue(dst, bytesutil.ToUnsafeBytes(src))
|
||||
// Remove trailing tagSeparatorChar
|
||||
return dst[:len(dst)-1]
|
||||
}
|
||||
|
|
|
@ -469,8 +469,13 @@ type Metrics struct {
|
|||
SlowPerDayIndexInserts uint64
|
||||
SlowMetricNameLoads uint64
|
||||
|
||||
HourlySeriesLimitRowsDropped uint64
|
||||
DailySeriesLimitRowsDropped uint64
|
||||
HourlySeriesLimitRowsDropped uint64
|
||||
HourlySeriesLimitMaxSeries uint64
|
||||
HourlySeriesLimitCurrentSeries uint64
|
||||
|
||||
DailySeriesLimitRowsDropped uint64
|
||||
DailySeriesLimitMaxSeries uint64
|
||||
DailySeriesLimitCurrentSeries uint64
|
||||
|
||||
TimestampsBlocksMerged uint64
|
||||
TimestampsBytesSaved uint64
|
||||
|
@ -546,8 +551,17 @@ func (s *Storage) UpdateMetrics(m *Metrics) {
|
|||
m.SlowPerDayIndexInserts += atomic.LoadUint64(&s.slowPerDayIndexInserts)
|
||||
m.SlowMetricNameLoads += atomic.LoadUint64(&s.slowMetricNameLoads)
|
||||
|
||||
m.HourlySeriesLimitRowsDropped += atomic.LoadUint64(&s.hourlySeriesLimitRowsDropped)
|
||||
m.DailySeriesLimitRowsDropped += atomic.LoadUint64(&s.dailySeriesLimitRowsDropped)
|
||||
if sl := s.hourlySeriesLimiter; sl != nil {
|
||||
m.HourlySeriesLimitRowsDropped += atomic.LoadUint64(&s.hourlySeriesLimitRowsDropped)
|
||||
m.HourlySeriesLimitMaxSeries += uint64(sl.MaxItems())
|
||||
m.HourlySeriesLimitCurrentSeries += uint64(sl.CurrentItems())
|
||||
}
|
||||
|
||||
if sl := s.dailySeriesLimiter; sl != nil {
|
||||
m.DailySeriesLimitRowsDropped += atomic.LoadUint64(&s.dailySeriesLimitRowsDropped)
|
||||
m.DailySeriesLimitMaxSeries += uint64(sl.MaxItems())
|
||||
m.DailySeriesLimitCurrentSeries += uint64(sl.CurrentItems())
|
||||
}
|
||||
|
||||
m.TimestampsBlocksMerged = atomic.LoadUint64(×tampsBlocksMerged)
|
||||
m.TimestampsBytesSaved = atomic.LoadUint64(×tampsBytesSaved)
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/lrucache"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
|
||||
)
|
||||
|
||||
// convertToCompositeTagFilterss converts tfss to composite filters.
|
||||
|
@ -362,7 +363,7 @@ func (tf *tagFilter) InitFromGraphiteQuery(commonPrefix, query []byte, paths []s
|
|||
tf.regexpPrefix = prefix
|
||||
tf.prefix = append(tf.prefix[:0], commonPrefix...)
|
||||
tf.prefix = marshalTagValue(tf.prefix, nil)
|
||||
tf.prefix = marshalTagValueNoTrailingTagSeparator(tf.prefix, []byte(prefix))
|
||||
tf.prefix = marshalTagValueNoTrailingTagSeparator(tf.prefix, prefix)
|
||||
tf.orSuffixes = append(tf.orSuffixes[:0], orSuffixes...)
|
||||
tf.reSuffixMatch, tf.matchCost = newMatchFuncForOrSuffixes(orSuffixes)
|
||||
}
|
||||
|
@ -418,15 +419,15 @@ func (tf *tagFilter) Init(commonPrefix, key, value []byte, isNegative, isRegexp
|
|||
tf.prefix = append(tf.prefix, commonPrefix...)
|
||||
tf.prefix = marshalTagValue(tf.prefix, key)
|
||||
|
||||
var expr []byte
|
||||
prefix := tf.value
|
||||
var expr string
|
||||
prefix := bytesutil.ToUnsafeString(tf.value)
|
||||
if tf.isRegexp {
|
||||
prefix, expr = getRegexpPrefix(tf.value)
|
||||
prefix, expr = simplifyRegexp(prefix)
|
||||
if len(expr) == 0 {
|
||||
tf.value = append(tf.value[:0], prefix...)
|
||||
tf.isRegexp = false
|
||||
} else {
|
||||
tf.regexpPrefix = string(prefix)
|
||||
tf.regexpPrefix = prefix
|
||||
}
|
||||
}
|
||||
tf.prefix = marshalTagValueNoTrailingTagSeparator(tf.prefix, prefix)
|
||||
|
@ -507,23 +508,23 @@ func RegexpCacheMisses() uint64 {
|
|||
return regexpCache.Misses()
|
||||
}
|
||||
|
||||
func getRegexpFromCache(expr []byte) (*regexpCacheValue, error) {
|
||||
if rcv := regexpCache.GetEntry(bytesutil.ToUnsafeString(expr)); rcv != nil {
|
||||
func getRegexpFromCache(expr string) (*regexpCacheValue, error) {
|
||||
if rcv := regexpCache.GetEntry(expr); rcv != nil {
|
||||
// Fast path - the regexp found in the cache.
|
||||
return rcv.(*regexpCacheValue), nil
|
||||
}
|
||||
// Slow path - build the regexp.
|
||||
exprOrig := string(expr)
|
||||
exprOrig := expr
|
||||
|
||||
expr = []byte(tagCharsRegexpEscaper.Replace(exprOrig))
|
||||
expr = tagCharsRegexpEscaper.Replace(exprOrig)
|
||||
exprStr := fmt.Sprintf("^(%s)$", expr)
|
||||
re, err := regexp.Compile(exprStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid regexp %q: %w", exprStr, err)
|
||||
}
|
||||
|
||||
sExpr := string(expr)
|
||||
orValues := getOrValues(sExpr)
|
||||
sExpr := expr
|
||||
orValues := regexutil.GetOrValues(sExpr)
|
||||
var reMatch func(b []byte) bool
|
||||
var reCost uint64
|
||||
var literalSuffix string
|
||||
|
@ -787,91 +788,6 @@ func isLiteral(sre *syntax.Regexp) bool {
|
|||
return sre.Op == syntax.OpLiteral && sre.Flags&syntax.FoldCase == 0
|
||||
}
|
||||
|
||||
func getOrValues(expr string) []string {
|
||||
sre, err := syntax.Parse(expr, syntax.Perl)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error when parsing verified expr=%q: %s", expr, err)
|
||||
}
|
||||
orValues := getOrValuesExt(sre)
|
||||
|
||||
// Sort orValues for faster index seek later
|
||||
sort.Strings(orValues)
|
||||
|
||||
return orValues
|
||||
}
|
||||
|
||||
func getOrValuesExt(sre *syntax.Regexp) []string {
|
||||
switch sre.Op {
|
||||
case syntax.OpCapture:
|
||||
return getOrValuesExt(sre.Sub[0])
|
||||
case syntax.OpLiteral:
|
||||
if !isLiteral(sre) {
|
||||
return nil
|
||||
}
|
||||
return []string{string(sre.Rune)}
|
||||
case syntax.OpEmptyMatch:
|
||||
return []string{""}
|
||||
case syntax.OpAlternate:
|
||||
a := make([]string, 0, len(sre.Sub))
|
||||
for _, reSub := range sre.Sub {
|
||||
ca := getOrValuesExt(reSub)
|
||||
if len(ca) == 0 {
|
||||
return nil
|
||||
}
|
||||
a = append(a, ca...)
|
||||
if len(a) > maxOrValues {
|
||||
// It is cheaper to use regexp here.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return a
|
||||
case syntax.OpCharClass:
|
||||
a := make([]string, 0, len(sre.Rune)/2)
|
||||
for i := 0; i < len(sre.Rune); i += 2 {
|
||||
start := sre.Rune[i]
|
||||
end := sre.Rune[i+1]
|
||||
for start <= end {
|
||||
a = append(a, string(start))
|
||||
start++
|
||||
if len(a) > maxOrValues {
|
||||
// It is cheaper to use regexp here.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return a
|
||||
case syntax.OpConcat:
|
||||
if len(sre.Sub) < 1 {
|
||||
return []string{""}
|
||||
}
|
||||
prefixes := getOrValuesExt(sre.Sub[0])
|
||||
if len(prefixes) == 0 {
|
||||
return nil
|
||||
}
|
||||
sre.Sub = sre.Sub[1:]
|
||||
suffixes := getOrValuesExt(sre)
|
||||
if len(suffixes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(prefixes)*len(suffixes) > maxOrValues {
|
||||
// It is cheaper to use regexp here.
|
||||
return nil
|
||||
}
|
||||
a := make([]string, 0, len(prefixes)*len(suffixes))
|
||||
for _, prefix := range prefixes {
|
||||
for _, suffix := range suffixes {
|
||||
s := prefix + suffix
|
||||
a = append(a, s)
|
||||
}
|
||||
}
|
||||
return a
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const maxOrValues = 20
|
||||
|
||||
var tagCharsRegexpEscaper = strings.NewReplacer(
|
||||
"\\x00", "\\x000", // escapeChar
|
||||
"\x00", "\\x000", // escapeChar
|
||||
|
@ -919,22 +835,28 @@ func (rcv *regexpCacheValue) SizeBytes() int {
|
|||
return rcv.sizeBytes
|
||||
}
|
||||
|
||||
func getRegexpPrefix(b []byte) ([]byte, []byte) {
|
||||
// Fast path - search the prefix in the cache.
|
||||
if ps := prefixesCache.GetEntry(bytesutil.ToUnsafeString(b)); ps != nil {
|
||||
func simplifyRegexp(expr string) (string, string) {
|
||||
// It is safe to pass the expr constructed via bytesutil.ToUnsafeString()
|
||||
// to GetEntry() here.
|
||||
if ps := prefixesCache.GetEntry(expr); ps != nil {
|
||||
// Fast path - the simplified expr is found in the cache.
|
||||
ps := ps.(*prefixSuffix)
|
||||
return ps.prefix, ps.suffix
|
||||
}
|
||||
|
||||
// Slow path - extract the regexp prefix from b.
|
||||
prefix, suffix := extractRegexpPrefix(b)
|
||||
// Slow path - simplify the expr.
|
||||
|
||||
// Make a copy of expr before using it,
|
||||
// since it may be constructed via bytesutil.ToUnsafeString()
|
||||
expr = string(append([]byte{}, expr...))
|
||||
prefix, suffix := regexutil.Simplify(expr)
|
||||
|
||||
// Put the prefix and the suffix to the cache.
|
||||
ps := &prefixSuffix{
|
||||
prefix: prefix,
|
||||
suffix: suffix,
|
||||
}
|
||||
prefixesCache.PutEntry(string(b), ps)
|
||||
prefixesCache.PutEntry(expr, ps)
|
||||
|
||||
return prefix, suffix
|
||||
}
|
||||
|
@ -981,120 +903,11 @@ func RegexpPrefixesCacheMisses() uint64 {
|
|||
}
|
||||
|
||||
type prefixSuffix struct {
|
||||
prefix []byte
|
||||
suffix []byte
|
||||
prefix string
|
||||
suffix string
|
||||
}
|
||||
|
||||
// SizeBytes implements lrucache.Entry interface
|
||||
func (ps *prefixSuffix) SizeBytes() int {
|
||||
return cap(ps.prefix) + cap(ps.suffix) + int(unsafe.Sizeof(*ps))
|
||||
}
|
||||
|
||||
func extractRegexpPrefix(b []byte) ([]byte, []byte) {
|
||||
sre, err := syntax.Parse(string(b), syntax.Perl)
|
||||
if err != nil {
|
||||
// Cannot parse the regexp. Return it all as prefix.
|
||||
return b, nil
|
||||
}
|
||||
sre = simplifyRegexp(sre)
|
||||
if sre == emptyRegexp {
|
||||
return nil, nil
|
||||
}
|
||||
if isLiteral(sre) {
|
||||
return []byte(string(sre.Rune)), nil
|
||||
}
|
||||
var prefix []byte
|
||||
if sre.Op == syntax.OpConcat {
|
||||
sub0 := sre.Sub[0]
|
||||
if isLiteral(sub0) {
|
||||
prefix = []byte(string(sub0.Rune))
|
||||
sre.Sub = sre.Sub[1:]
|
||||
if len(sre.Sub) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := syntax.Compile(sre); err != nil {
|
||||
// Cannot compile the regexp. Return it all as prefix.
|
||||
return b, nil
|
||||
}
|
||||
return prefix, []byte(sre.String())
|
||||
}
|
||||
|
||||
func simplifyRegexp(sre *syntax.Regexp) *syntax.Regexp {
|
||||
s := sre.String()
|
||||
for {
|
||||
sre = simplifyRegexpExt(sre, false, false)
|
||||
sre = sre.Simplify()
|
||||
if sre.Op == syntax.OpBeginText || sre.Op == syntax.OpEndText {
|
||||
sre = emptyRegexp
|
||||
}
|
||||
sNew := sre.String()
|
||||
if sNew == s {
|
||||
return sre
|
||||
}
|
||||
var err error
|
||||
sre, err = syntax.Parse(sNew, syntax.Perl)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot parse simplified regexp %q: %s", sNew, err)
|
||||
}
|
||||
s = sNew
|
||||
}
|
||||
}
|
||||
|
||||
func simplifyRegexpExt(sre *syntax.Regexp, hasPrefix, hasSuffix bool) *syntax.Regexp {
|
||||
switch sre.Op {
|
||||
case syntax.OpCapture:
|
||||
// Substitute all the capture regexps with non-capture regexps.
|
||||
sre.Op = syntax.OpAlternate
|
||||
sre.Sub[0] = simplifyRegexpExt(sre.Sub[0], hasPrefix, hasSuffix)
|
||||
if sre.Sub[0] == emptyRegexp {
|
||||
return emptyRegexp
|
||||
}
|
||||
return sre
|
||||
case syntax.OpStar, syntax.OpPlus, syntax.OpQuest, syntax.OpRepeat:
|
||||
sre.Sub[0] = simplifyRegexpExt(sre.Sub[0], hasPrefix, hasSuffix)
|
||||
if sre.Sub[0] == emptyRegexp {
|
||||
return emptyRegexp
|
||||
}
|
||||
return sre
|
||||
case syntax.OpAlternate:
|
||||
// Do not remove empty captures from OpAlternate, since this may break regexp.
|
||||
for i, sub := range sre.Sub {
|
||||
sre.Sub[i] = simplifyRegexpExt(sub, hasPrefix, hasSuffix)
|
||||
}
|
||||
return sre
|
||||
case syntax.OpConcat:
|
||||
subs := sre.Sub[:0]
|
||||
for i, sub := range sre.Sub {
|
||||
if sub = simplifyRegexpExt(sub, i > 0, i+1 < len(sre.Sub)); sub != emptyRegexp {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
}
|
||||
sre.Sub = subs
|
||||
// Remove anchros from the beginning and the end of regexp, since they
|
||||
// will be added later.
|
||||
if !hasPrefix {
|
||||
for len(sre.Sub) > 0 && sre.Sub[0].Op == syntax.OpBeginText {
|
||||
sre.Sub = sre.Sub[1:]
|
||||
}
|
||||
}
|
||||
if !hasSuffix {
|
||||
for len(sre.Sub) > 0 && sre.Sub[len(sre.Sub)-1].Op == syntax.OpEndText {
|
||||
sre.Sub = sre.Sub[:len(sre.Sub)-1]
|
||||
}
|
||||
}
|
||||
if len(sre.Sub) == 0 {
|
||||
return emptyRegexp
|
||||
}
|
||||
return sre
|
||||
case syntax.OpEmptyMatch:
|
||||
return emptyRegexp
|
||||
default:
|
||||
return sre
|
||||
}
|
||||
}
|
||||
|
||||
var emptyRegexp = &syntax.Regexp{
|
||||
Op: syntax.OpEmptyMatch,
|
||||
return len(ps.prefix) + len(ps.suffix) + int(unsafe.Sizeof(*ps))
|
||||
}
|
||||
|
|
|
@ -675,26 +675,11 @@ func TestGetCommonPrefix(t *testing.T) {
|
|||
f([]string{"foo1", "foo2", "foo34"}, "foo")
|
||||
}
|
||||
|
||||
func TestExtractRegexpPrefix(t *testing.T) {
|
||||
f := func(s string, expectedPrefix, expectedSuffix string) {
|
||||
t.Helper()
|
||||
prefix, suffix := extractRegexpPrefix([]byte(s))
|
||||
if string(prefix) != expectedPrefix {
|
||||
t.Fatalf("unexpected prefix for %q; got %q; want %q", s, prefix, expectedPrefix)
|
||||
}
|
||||
if string(suffix) != expectedSuffix {
|
||||
t.Fatalf("unexpected suffix for %q; got %q; want %q", s, suffix, expectedSuffix)
|
||||
}
|
||||
}
|
||||
f("", "", "")
|
||||
f("foobar", "foobar", "")
|
||||
}
|
||||
|
||||
func TestGetRegexpFromCache(t *testing.T) {
|
||||
f := func(s string, orValuesExpected, expectedMatches, expectedMismatches []string, suffixExpected string) {
|
||||
t.Helper()
|
||||
for i := 0; i < 3; i++ {
|
||||
rcv, err := getRegexpFromCache([]byte(s))
|
||||
rcv, err := getRegexpFromCache(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error for s=%q: %s", s, err)
|
||||
}
|
||||
|
@ -764,7 +749,7 @@ func TestTagFilterMatchSuffix(t *testing.T) {
|
|||
var tf tagFilter
|
||||
|
||||
tvNoTrailingTagSeparator := func(s string) string {
|
||||
return string(marshalTagValueNoTrailingTagSeparator(nil, []byte(s)))
|
||||
return string(marshalTagValueNoTrailingTagSeparator(nil, s))
|
||||
}
|
||||
init := func(value string, isNegative, isRegexp bool, expectedPrefix string) {
|
||||
t.Helper()
|
||||
|
@ -1145,108 +1130,75 @@ func TestTagFilterMatchSuffix(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestGetOrValues(t *testing.T) {
|
||||
f := func(s string, valuesExpected []string) {
|
||||
t.Helper()
|
||||
values := getOrValues(s)
|
||||
if !reflect.DeepEqual(values, valuesExpected) {
|
||||
t.Fatalf("unexpected values for s=%q; got %q; want %q", s, values, valuesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", []string{""})
|
||||
f("|foo", []string{"", "foo"})
|
||||
f("|foo|", []string{"", "", "foo"})
|
||||
f("foo.+", nil)
|
||||
f("foo.*", nil)
|
||||
f(".*", nil)
|
||||
f("foo|.*", nil)
|
||||
f("foobar", []string{"foobar"})
|
||||
f("z|x|c", []string{"c", "x", "z"})
|
||||
f("foo|bar", []string{"bar", "foo"})
|
||||
f("(foo|bar)", []string{"bar", "foo"})
|
||||
f("(foo|bar)baz", []string{"barbaz", "foobaz"})
|
||||
f("[a-z]", nil)
|
||||
f("[a-d]", []string{"a", "b", "c", "d"})
|
||||
f("x[a-d]we", []string{"xawe", "xbwe", "xcwe", "xdwe"})
|
||||
f("foo(bar|baz)", []string{"foobar", "foobaz"})
|
||||
f("foo(ba[rz]|(xx|o))", []string{"foobar", "foobaz", "fooo", "fooxx"})
|
||||
f("foo(?:bar|baz)x(qwe|rt)", []string{"foobarxqwe", "foobarxrt", "foobazxqwe", "foobazxrt"})
|
||||
f("foo(bar||baz)", []string{"foo", "foobar", "foobaz"})
|
||||
f("(a|b|c)(d|e|f)(g|h|k)", nil)
|
||||
f("(?i)foo", nil)
|
||||
f("(?i)(foo|bar)", nil)
|
||||
}
|
||||
|
||||
func TestGetRegexpPrefix(t *testing.T) {
|
||||
f := func(t *testing.T, s, expectedPrefix, expectedSuffix string) {
|
||||
func TestSimplifyRegexp(t *testing.T) {
|
||||
f := func(s, expectedPrefix, expectedSuffix string) {
|
||||
t.Helper()
|
||||
|
||||
prefix, suffix := getRegexpPrefix([]byte(s))
|
||||
if string(prefix) != expectedPrefix {
|
||||
prefix, suffix := simplifyRegexp(s)
|
||||
if prefix != expectedPrefix {
|
||||
t.Fatalf("unexpected prefix for s=%q; got %q; want %q", s, prefix, expectedPrefix)
|
||||
}
|
||||
if string(suffix) != expectedSuffix {
|
||||
if suffix != expectedSuffix {
|
||||
t.Fatalf("unexpected suffix for s=%q; got %q; want %q", s, suffix, expectedSuffix)
|
||||
}
|
||||
|
||||
// Get the prefix from cache.
|
||||
prefix, suffix = getRegexpPrefix([]byte(s))
|
||||
if string(prefix) != expectedPrefix {
|
||||
prefix, suffix = simplifyRegexp(s)
|
||||
if prefix != expectedPrefix {
|
||||
t.Fatalf("unexpected prefix for s=%q; got %q; want %q", s, prefix, expectedPrefix)
|
||||
}
|
||||
if string(suffix) != expectedSuffix {
|
||||
if suffix != expectedSuffix {
|
||||
t.Fatalf("unexpected suffix for s=%q; got %q; want %q", s, suffix, expectedSuffix)
|
||||
}
|
||||
}
|
||||
|
||||
f(t, "", "", "")
|
||||
f(t, "^", "", "")
|
||||
f(t, "$", "", "")
|
||||
f(t, "^()$", "", "")
|
||||
f(t, "^(?:)$", "", "")
|
||||
f(t, "foobar", "foobar", "")
|
||||
f(t, "foo$|^foobar", "foo", "(?:(?:)|bar)")
|
||||
f(t, "^(foo$|^foobar)$", "foo", "(?:(?:)|bar)")
|
||||
f(t, "foobar|foobaz", "fooba", "[rz]")
|
||||
f(t, "(fo|(zar|bazz)|x)", "", "fo|zar|bazz|x")
|
||||
f(t, "(тестЧЧ|тест)", "тест", "(?:ЧЧ|(?:))")
|
||||
f(t, "foo(bar|baz|bana)", "fooba", "(?:[rz]|na)")
|
||||
f(t, "^foobar|foobaz", "fooba", "[rz]")
|
||||
f(t, "^foobar|^foobaz$", "fooba", "[rz]")
|
||||
f(t, "foobar|foobaz", "fooba", "[rz]")
|
||||
f(t, "(?:^foobar|^foobaz)aa.*", "fooba", "[rz]aa(?-s:.)*")
|
||||
f(t, "foo[bar]+", "foo", "[a-br]+")
|
||||
f(t, "foo[a-z]+", "foo", "[a-z]+")
|
||||
f(t, "foo[bar]*", "foo", "[a-br]*")
|
||||
f(t, "foo[a-z]*", "foo", "[a-z]*")
|
||||
f(t, "foo[x]+", "foo", "x+")
|
||||
f(t, "foo[^x]+", "foo", "[^x]+")
|
||||
f(t, "foo[x]*", "foo", "x*")
|
||||
f(t, "foo[^x]*", "foo", "[^x]*")
|
||||
f(t, "foo[x]*bar", "foo", "x*bar")
|
||||
f(t, "fo\\Bo[x]*bar?", "fo", "\\Box*bar?")
|
||||
f(t, "foo.+bar", "foo", "(?-s:.)+bar")
|
||||
f(t, "a(b|c.*).+", "a", "(?:b|c(?-s:.)*)(?-s:.)+")
|
||||
f(t, "ab|ac", "a", "[b-c]")
|
||||
f(t, "(?i)xyz", "", "(?i:XYZ)")
|
||||
f(t, "(?i)foo|bar", "", "(?i:FOO)|(?i:BAR)")
|
||||
f(t, "(?i)up.+x", "", "(?i:UP)(?-s:.)+(?i:X)")
|
||||
f(t, "(?smi)xy.*z$", "", "(?i:XY)(?s:.)*(?i:Z)(?m:$)")
|
||||
f("", "", "")
|
||||
f("^", "", "")
|
||||
f("$", "", "")
|
||||
f("^()$", "", "")
|
||||
f("^(?:)$", "", "")
|
||||
f("foobar", "foobar", "")
|
||||
f("foo$|^foobar", "foo", "|bar")
|
||||
f("^(foo$|^foobar)$", "foo", "|bar")
|
||||
f("foobar|foobaz", "fooba", "[rz]")
|
||||
f("(fo|(zar|bazz)|x)", "", "fo|zar|bazz|x")
|
||||
f("(тестЧЧ|тест)", "тест", "ЧЧ|")
|
||||
f("foo(bar|baz|bana)", "fooba", "[rz]|na")
|
||||
f("^foobar|foobaz", "fooba", "[rz]")
|
||||
f("^foobar|^foobaz$", "fooba", "[rz]")
|
||||
f("foobar|foobaz", "fooba", "[rz]")
|
||||
f("(?:^foobar|^foobaz)aa.*", "fooba", "[rz]aa.*")
|
||||
f("foo[bar]+", "foo", "[a-br]+")
|
||||
f("foo[a-z]+", "foo", "[a-z]+")
|
||||
f("foo[bar]*", "foo", "[a-br]*")
|
||||
f("foo[a-z]*", "foo", "[a-z]*")
|
||||
f("foo[x]+", "foo", "x+")
|
||||
f("foo[^x]+", "foo", "[^x]+")
|
||||
f("foo[x]*", "foo", "x*")
|
||||
f("foo[^x]*", "foo", "[^x]*")
|
||||
f("foo[x]*bar", "foo", "x*bar")
|
||||
f("fo\\Bo[x]*bar?", "fo", "\\Box*bar?")
|
||||
f("foo.+bar", "foo", ".+bar")
|
||||
f("a(b|c.*).+", "a", "(?:b|c.*).+")
|
||||
f("ab|ac", "a", "[b-c]")
|
||||
f("(?i)xyz", "", "(?i:XYZ)")
|
||||
f("(?i)foo|bar", "", "(?i:FOO)|(?i:BAR)")
|
||||
f("(?i)up.+x", "", "(?i:UP).+(?i:X)")
|
||||
f("(?smi)xy.*z$", "", "(?i:XY)(?s:.)*(?i:Z)(?m:$)")
|
||||
|
||||
// test invalid regexps
|
||||
f(t, "a(", "a(", "")
|
||||
f(t, "a[", "a[", "")
|
||||
f(t, "a[]", "a[]", "")
|
||||
f(t, "a{", "a{", "")
|
||||
f(t, "a{}", "a{}", "")
|
||||
f(t, "invalid(regexp", "invalid(regexp", "")
|
||||
f("a(", "a(", "")
|
||||
f("a[", "a[", "")
|
||||
f("a[]", "a[]", "")
|
||||
f("a{", "a{", "")
|
||||
f("a{}", "a{}", "")
|
||||
f("invalid(regexp", "invalid(regexp", "")
|
||||
|
||||
// The transformed regexp mustn't match aba
|
||||
f(t, "a?(^ba|c)", "", "a?(?:\\Aba|c)")
|
||||
f("a?(^ba|c)", "", "a?(?:\\Aba|c)")
|
||||
|
||||
// The transformed regexp mustn't match barx
|
||||
f(t, "(foo|bar$)x*", "", "(?:foo|bar(?-m:$))x*")
|
||||
f("(foo|bar$)x*", "", "(?:foo|bar$)x*")
|
||||
}
|
||||
|
||||
func TestTagFiltersString(t *testing.T) {
|
||||
|
|
|
@ -32,6 +32,29 @@ func BenchmarkTagFilterMatchSuffix(b *testing.B) {
|
|||
}
|
||||
})
|
||||
})
|
||||
b.Run("regexp-any-suffix-match-anchored", func(b *testing.B) {
|
||||
key := []byte("^foo.*$")
|
||||
isNegative := false
|
||||
isRegexp := true
|
||||
suffix := marshalTagValue(nil, []byte("ojksdfds"))
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(1))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var tf tagFilter
|
||||
if err := tf.Init(nil, nil, key, isNegative, isRegexp); err != nil {
|
||||
logger.Panicf("BUG: unexpected error: %s", err)
|
||||
}
|
||||
for pb.Next() {
|
||||
ok, err := tf.matchSuffix(suffix)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error: %s", err)
|
||||
}
|
||||
if !ok {
|
||||
logger.Panicf("BUG: unexpected suffix mismatch")
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
b.Run("regexp-any-nonzero-suffix-match", func(b *testing.B) {
|
||||
key := []byte("foo.+")
|
||||
isNegative := false
|
||||
|
@ -55,6 +78,29 @@ func BenchmarkTagFilterMatchSuffix(b *testing.B) {
|
|||
}
|
||||
})
|
||||
})
|
||||
b.Run("regexp-any-nonzero-suffix-match-anchored", func(b *testing.B) {
|
||||
key := []byte("^foo.+$")
|
||||
isNegative := false
|
||||
isRegexp := true
|
||||
suffix := marshalTagValue(nil, []byte("ojksdfds"))
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(1))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var tf tagFilter
|
||||
if err := tf.Init(nil, nil, key, isNegative, isRegexp); err != nil {
|
||||
logger.Panicf("BUG: unexpected error: %s", err)
|
||||
}
|
||||
for pb.Next() {
|
||||
ok, err := tf.matchSuffix(suffix)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error: %s", err)
|
||||
}
|
||||
if !ok {
|
||||
logger.Panicf("BUG: unexpected suffix mismatch")
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
b.Run("regexp-any-nonzero-suffix-mismatch", func(b *testing.B) {
|
||||
key := []byte("foo.+")
|
||||
isNegative := false
|
||||
|
|
|
@ -5,7 +5,7 @@ icon: logo.png
|
|||
summary: VictoriaMetrics is fast, cost-effective and scalable time-series database.
|
||||
description: |
|
||||
* VictoriaMetrics can be used as long-term storage for Prometheus or for vmagent.
|
||||
See [these docs](#prometheus-setup) for details.
|
||||
See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-setup for details.
|
||||
* Supports Prometheus querying API, so it can be used as Prometheus drop-in replacement in Grafana.
|
||||
VictoriaMetrics implements MetricsQL, https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL query language, which is inspired by PromQL.
|
||||
* Supports global query view. Multiple Prometheus instances may write data into VictoriaMetrics. Later this data may be used in a single query.
|
||||
|
@ -18,7 +18,7 @@ description: |
|
|||
* All the configuration is done via explicit command-line flags with reasonable defaults.
|
||||
* All the data is stored in a single directory pointed by `-storageDataPath` flag.
|
||||
* Easy and fast backups from
|
||||
to S3 or GCS with [vmbackup] / [vmrestore].
|
||||
to S3 or GCS with https://docs.victoriametrics.com/vmbackup.html / https://docs.victoriametrics.com/vmrestore.html.
|
||||
* Storage is protected from corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to the storage architecture.
|
||||
* Supports metrics' scraping, ingestion and backfilling via the following protocols:
|
||||
* [Metrics from Prometheus exporters]
|
||||
|
@ -33,7 +33,7 @@ description: |
|
|||
* Native binary format.
|
||||
* Prometheus exposition format.
|
||||
* Arbitrary CSV data.
|
||||
* Supports metrics' relabeling. See [these docs](#relabeling) for details.
|
||||
* Supports metrics' relabeling. See https://docs.victoriametrics.com/#relabeling for details.
|
||||
* Ideally works with big amounts of time series data from Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various Enterprise workloads.
|
||||
* Has open source cluster version (https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
|
|
58
vendor/cloud.google.com/go/.release-please-manifest-submodules.json
generated
vendored
58
vendor/cloud.google.com/go/.release-please-manifest-submodules.json
generated
vendored
|
@ -1,47 +1,51 @@
|
|||
{
|
||||
"accessapproval": "1.3.0",
|
||||
"accesscontextmanager": "1.2.0",
|
||||
"aiplatform": "1.14.0",
|
||||
"analytics": "0.8.0",
|
||||
"aiplatform": "1.17.0",
|
||||
"analytics": "0.9.0",
|
||||
"apigateway": "1.2.0",
|
||||
"apigeeconnect": "1.2.0",
|
||||
"apigeeregistry": "0.2.0",
|
||||
"apikeys": "0.1.0",
|
||||
"appengine": "1.3.0",
|
||||
"area120": "0.4.0",
|
||||
"artifactregistry": "1.3.0",
|
||||
"asset": "1.3.0",
|
||||
"assuredworkloads": "1.0.0",
|
||||
"artifactregistry": "1.4.0",
|
||||
"asset": "1.4.0",
|
||||
"assuredworkloads": "1.2.0",
|
||||
"automl": "1.4.0",
|
||||
"baremetalsolution": "0.2.0",
|
||||
"batch": "0.1.0",
|
||||
"beyondcorp": "0.1.0",
|
||||
"billing": "1.2.0",
|
||||
"binaryauthorization": "1.0.0",
|
||||
"certificatemanager": "0.2.0",
|
||||
"certificatemanager": "0.2.1",
|
||||
"channel": "1.7.0",
|
||||
"cloudbuild": "1.2.0",
|
||||
"clouddms": "1.2.0",
|
||||
"cloudtasks": "1.4.0",
|
||||
"compute": "1.7.0",
|
||||
"contactcenterinsights": "1.2.0",
|
||||
"container": "1.2.0",
|
||||
"compute": "1.9.0",
|
||||
"contactcenterinsights": "1.2.3",
|
||||
"container": "1.3.1",
|
||||
"containeranalysis": "0.4.0",
|
||||
"datacatalog": "1.3.0",
|
||||
"dataflow": "0.5.0",
|
||||
"datacatalog": "1.3.1",
|
||||
"dataflow": "0.5.1",
|
||||
"dataform": "0.2.0",
|
||||
"datafusion": "1.3.0",
|
||||
"datalabeling": "0.3.0",
|
||||
"dataplex": "1.0.0",
|
||||
"dataplex": "1.1.0",
|
||||
"dataproc": "1.5.0",
|
||||
"dataqna": "0.4.0",
|
||||
"datastream": "1.0.0",
|
||||
"deploy": "1.2.0",
|
||||
"dialogflow": "1.11.0",
|
||||
"deploy": "1.2.1",
|
||||
"dialogflow": "1.12.1",
|
||||
"dlp": "1.4.0",
|
||||
"documentai": "1.4.0",
|
||||
"documentai": "1.5.0",
|
||||
"domains": "0.5.0",
|
||||
"essentialcontacts": "1.2.0",
|
||||
"eventarc": "1.6.0",
|
||||
"filestore": "1.2.0",
|
||||
"functions": "1.4.0",
|
||||
"gaming": "1.3.0",
|
||||
"functions": "1.5.0",
|
||||
"gaming": "1.3.1",
|
||||
"gkebackup": "0.1.0",
|
||||
"gkeconnect": "0.3.0",
|
||||
"gkehub": "0.8.0",
|
||||
|
@ -59,10 +63,10 @@
|
|||
"mediatranslation": "0.3.0",
|
||||
"memcache": "1.3.0",
|
||||
"metastore": "1.3.0",
|
||||
"monitoring": "1.5.0",
|
||||
"monitoring": "1.6.0",
|
||||
"networkconnectivity": "1.2.0",
|
||||
"networkmanagement": "1.3.0",
|
||||
"networksecurity": "0.3.0",
|
||||
"networksecurity": "0.3.1",
|
||||
"notebooks": "1.0.0",
|
||||
"optimization": "1.0.0",
|
||||
"orchestration": "1.2.0",
|
||||
|
@ -73,33 +77,33 @@
|
|||
"policytroubleshooter": "1.2.0",
|
||||
"privatecatalog": "0.4.0",
|
||||
"recaptchaenterprise/v2": "2.0.1",
|
||||
"recommendationengine": "0.2.0",
|
||||
"recommendationengine": "0.3.0",
|
||||
"recommender": "1.4.0",
|
||||
"redis": "1.6.0",
|
||||
"resourcemanager": "1.2.0",
|
||||
"resourcesettings": "1.2.0",
|
||||
"retail": "1.4.0",
|
||||
"retail": "1.5.0",
|
||||
"run": "0.1.1",
|
||||
"scheduler": "1.3.0",
|
||||
"secretmanager": "1.5.0",
|
||||
"security": "1.4.0",
|
||||
"securitycenter": "1.8.0",
|
||||
"security": "1.4.1",
|
||||
"securitycenter": "1.10.0",
|
||||
"servicecontrol": "1.3.0",
|
||||
"servicedirectory": "1.3.0",
|
||||
"servicemanagement": "1.3.0",
|
||||
"servicemanagement": "1.3.1",
|
||||
"serviceusage": "1.2.0",
|
||||
"shell": "1.2.0",
|
||||
"speech": "1.5.0",
|
||||
"storagetransfer": "1.3.0",
|
||||
"talent": "0.9.0",
|
||||
"talent": "1.0.0",
|
||||
"texttospeech": "1.3.0",
|
||||
"tpu": "1.2.0",
|
||||
"trace": "1.2.0",
|
||||
"translate": "1.2.0",
|
||||
"video": "1.7.0",
|
||||
"videointelligence": "1.4.0",
|
||||
"vision/v2": "2.0.0",
|
||||
"vmmigration": "1.0.0",
|
||||
"vision/v2": "2.1.0",
|
||||
"vmmigration": "1.1.0",
|
||||
"vpcaccess": "1.2.0",
|
||||
"webrisk": "1.3.0",
|
||||
"websecurityscanner": "1.2.0",
|
||||
|
|
2
vendor/cloud.google.com/go/.release-please-manifest.json
generated
vendored
2
vendor/cloud.google.com/go/.release-please-manifest.json
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
{
|
||||
".": "0.103.0"
|
||||
".": "0.104.0"
|
||||
}
|
||||
|
|
7
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
7
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
|
@ -1,5 +1,12 @@
|
|||
# Changes
|
||||
|
||||
## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490))
|
||||
|
||||
## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29)
|
||||
|
||||
|
||||
|
|
1
vendor/cloud.google.com/go/README.md
generated
vendored
1
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -35,6 +35,7 @@ For an updated list of all of our released APIs please see our
|
|||
Our libraries are compatible with at least the three most recent, major Go
|
||||
releases. They are currently compatible with:
|
||||
|
||||
- Go 1.19
|
||||
- Go 1.18
|
||||
- Go 1.17
|
||||
- Go 1.16
|
||||
|
|
173
vendor/cloud.google.com/go/doc.go
generated
vendored
173
vendor/cloud.google.com/go/doc.go
generated
vendored
|
@ -17,14 +17,12 @@ Package cloud is the root of the packages used to access Google Cloud
|
|||
Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
of sub-packages.
|
||||
|
||||
|
||||
Client Options
|
||||
# Client Options
|
||||
|
||||
All clients in sub-packages are configurable via client options. These options are
|
||||
described here: https://godoc.org/google.golang.org/api/option.
|
||||
|
||||
|
||||
Authentication and Authorization
|
||||
# Authentication and Authorization
|
||||
|
||||
All the clients in sub-packages support authentication via Google Application Default
|
||||
Credentials (see https://cloud.google.com/docs/authentication/production), or
|
||||
|
@ -35,11 +33,12 @@ and authenticate clients. For information on how to create and obtain
|
|||
Application Default Credentials, see
|
||||
https://cloud.google.com/docs/authentication/production. Here is an example
|
||||
of a client using ADC to authenticate:
|
||||
client, err := secretmanager.NewClient(context.Background())
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
client, err := secretmanager.NewClient(context.Background())
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
You can use a file with credentials to authenticate and authorize, such as a JSON
|
||||
key file associated with a Google service account. Service Account keys can be
|
||||
|
@ -47,12 +46,13 @@ created and downloaded from
|
|||
https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses
|
||||
the Secret Manger client, but the same steps apply to the other client libraries
|
||||
underneath this package. Example:
|
||||
client, err := secretmanager.NewClient(context.Background(),
|
||||
option.WithCredentialsFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
client, err := secretmanager.NewClient(context.Background(),
|
||||
option.WithCredentialsFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
In some cases (for instance, you don't want to store secrets on disk), you can
|
||||
create credentials from in-memory JSON and use the WithCredentials option.
|
||||
|
@ -62,19 +62,19 @@ the other client libraries underneath this package. Note that scopes can be
|
|||
found at https://developers.google.com/identity/protocols/oauth2/scopes, and
|
||||
are also provided in all auto-generated libraries: for example,
|
||||
cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
|
||||
ctx := context.Background()
|
||||
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
ctx := context.Background()
|
||||
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
Timeouts and Cancellation
|
||||
# Timeouts and Cancellation
|
||||
|
||||
By default, non-streaming methods, like Create or Get, will have a default deadline applied to the
|
||||
context provided at call time, unless a context deadline is already set. Streaming
|
||||
|
@ -83,40 +83,42 @@ arrange for cancellation, use contexts. Transient
|
|||
errors will be retried when correctness allows.
|
||||
|
||||
Here is an example of how to set a timeout for an RPC, use context.WithTimeout:
|
||||
ctx := context.Background()
|
||||
// Do not set a timeout on the context passed to NewClient: dialing happens
|
||||
// asynchronously, and the context is used to refresh credentials in the
|
||||
// background.
|
||||
client, err := secretmanager.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Time out if it takes more than 10 seconds to create a dataset.
|
||||
tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
|
||||
if err := client.DeleteSecret(tctx, req); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
ctx := context.Background()
|
||||
// Do not set a timeout on the context passed to NewClient: dialing happens
|
||||
// asynchronously, and the context is used to refresh credentials in the
|
||||
// background.
|
||||
client, err := secretmanager.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Time out if it takes more than 10 seconds to create a dataset.
|
||||
tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
|
||||
if err := client.DeleteSecret(tctx, req); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
|
||||
Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel:
|
||||
ctx := context.Background()
|
||||
// Do not cancel the context passed to NewClient: dialing happens asynchronously,
|
||||
// and the context is used to refresh credentials in the background.
|
||||
client, err := secretmanager.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
// TODO: Make the cancel function available to whatever might want to cancel the
|
||||
// call--perhaps a GUI button.
|
||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"}
|
||||
if err := client.DeleteSecret(cctx, req); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
ctx := context.Background()
|
||||
// Do not cancel the context passed to NewClient: dialing happens asynchronously,
|
||||
// and the context is used to refresh credentials in the background.
|
||||
client, err := secretmanager.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
// TODO: Make the cancel function available to whatever might want to cancel the
|
||||
// call--perhaps a GUI button.
|
||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"}
|
||||
if err := client.DeleteSecret(cctx, req); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
|
||||
To opt out of default deadlines, set the temporary environment variable
|
||||
GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client
|
||||
|
@ -130,8 +132,7 @@ timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts
|
|||
would be ineffective and would only interfere with credential refreshing, which uses
|
||||
the same context.
|
||||
|
||||
|
||||
Connection Pooling
|
||||
# Connection Pooling
|
||||
|
||||
Connection pooling differs in clients based on their transport. Cloud
|
||||
clients either rely on HTTP or gRPC transports to communicate
|
||||
|
@ -147,23 +148,20 @@ of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a clie
|
|||
option to NewClient calls. This configures the underlying gRPC connections to be
|
||||
pooled and addressed in a round robin fashion.
|
||||
|
||||
|
||||
Using the Libraries with Docker
|
||||
# Using the Libraries with Docker
|
||||
|
||||
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
|
||||
hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928
|
||||
for more information.
|
||||
|
||||
|
||||
Debugging
|
||||
# Debugging
|
||||
|
||||
To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
|
||||
https://godoc.org/google.golang.org/grpc/grpclog for more information.
|
||||
|
||||
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
|
||||
|
||||
|
||||
Inspecting errors
|
||||
# Inspecting errors
|
||||
|
||||
Most of the errors returned by the generated clients are wrapped in an
|
||||
`apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror)
|
||||
|
@ -175,35 +173,38 @@ while debugging.
|
|||
`apierror.APIError` gives access to specific details in the
|
||||
error. The transport-specific errors can still be unwrapped using the
|
||||
`apierror.APIError`.
|
||||
if err != nil {
|
||||
var ae *apierror.APIError
|
||||
if errors.As(err, &ae) {
|
||||
log.Println(ae.Reason())
|
||||
log.Println(ae.Details().Help.GetLinks())
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
var ae *apierror.APIError
|
||||
if errors.As(err, &ae) {
|
||||
log.Println(ae.Reason())
|
||||
log.Println(ae.Details().Help.GetLinks())
|
||||
}
|
||||
}
|
||||
|
||||
If the gRPC transport was used, the `grpc.Status` can still be parsed using the
|
||||
`status.FromError` function.
|
||||
if err != nil {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
log.Println(s.Message())
|
||||
for _, d := range s.Proto().Details {
|
||||
log.Println(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
log.Println(s.Message())
|
||||
for _, d := range s.Proto().Details {
|
||||
log.Println(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
If the REST transport was used, the `googleapi.Error` can be parsed in a similar
|
||||
way.
|
||||
if err != nil {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) {
|
||||
log.Println(gerr.Message)
|
||||
}
|
||||
}
|
||||
|
||||
Client Stability
|
||||
if err != nil {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) {
|
||||
log.Println(gerr.Message)
|
||||
}
|
||||
}
|
||||
|
||||
# Client Stability
|
||||
|
||||
Clients in this repository are considered alpha or beta unless otherwise
|
||||
marked as stable in the README.md. Semver is not used to communicate stability
|
||||
|
|
103
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
103
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
|
@ -62,6 +62,24 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/apigeeregistry/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/apigeeregistry/apiv1",
|
||||
"description": "Apigee Registry API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/apikeys/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/apikeys/apiv2",
|
||||
"description": "API Keys API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/appengine/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/appengine/apiv1",
|
||||
"description": "App Engine Admin API",
|
||||
|
@ -80,6 +98,15 @@
|
|||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/artifactregistry/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/artifactregistry/apiv1",
|
||||
"description": "Artifact Registry API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/artifactregistry/apiv1beta2": {
|
||||
"distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2",
|
||||
"description": "Artifact Registry API",
|
||||
|
@ -170,6 +197,51 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appconnections/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appconnectors/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appgateways/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/clientgateways/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery",
|
||||
"description": "BigQuery",
|
||||
|
@ -467,6 +539,15 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dataform/apiv1alpha2": {
|
||||
"distribution_name": "cloud.google.com/go/dataform/apiv1alpha2",
|
||||
"description": "Dataform API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2",
|
||||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/datafusion/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/datafusion/apiv1",
|
||||
"description": "Cloud Data Fusion API",
|
||||
|
@ -719,6 +800,24 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/functions/apiv2",
|
||||
"description": "Cloud Functions API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/apiv2beta": {
|
||||
"distribution_name": "cloud.google.com/go/functions/apiv2beta",
|
||||
"description": "Cloud Functions API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/metadata": {
|
||||
"distribution_name": "cloud.google.com/go/functions/metadata",
|
||||
"description": "Cloud Functions",
|
||||
|
@ -1495,7 +1594,7 @@
|
|||
},
|
||||
"cloud.google.com/go/spanner/admin/database/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1",
|
||||
"description": "Cloud Spanner Database Admin API",
|
||||
"description": "Cloud Spanner API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1",
|
||||
|
@ -1571,7 +1670,7 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4",
|
||||
"release_level": "beta",
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/talent/apiv4beta1": {
|
||||
|
|
3
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
3
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
|
@ -31,7 +31,8 @@ import (
|
|||
// - "google.golang.org/api/googleapi".Error
|
||||
// If the error is not one of these types, Annotate behaves
|
||||
// like
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
//
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
func Annotate(err error, msg string) error {
|
||||
if err == nil {
|
||||
panic("Annotate called with nil")
|
||||
|
|
12
vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
generated
vendored
12
vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
generated
vendored
|
@ -21,6 +21,12 @@
|
|||
"apigeeconnect": {
|
||||
"component": "apigeeconnect"
|
||||
},
|
||||
"apigeeregistry": {
|
||||
"component": "apigeeregistry"
|
||||
},
|
||||
"apikeys": {
|
||||
"component": "apikeys"
|
||||
},
|
||||
"appengine": {
|
||||
"component": "appengine"
|
||||
},
|
||||
|
@ -45,6 +51,9 @@
|
|||
"batch": {
|
||||
"component": "batch"
|
||||
},
|
||||
"beyondcorp": {
|
||||
"component": "beyondcorp"
|
||||
},
|
||||
"billing": {
|
||||
"component": "billing"
|
||||
},
|
||||
|
@ -84,6 +93,9 @@
|
|||
"dataflow": {
|
||||
"component": "dataflow"
|
||||
},
|
||||
"dataform": {
|
||||
"component": "dataform"
|
||||
},
|
||||
"datafusion": {
|
||||
"component": "datafusion"
|
||||
},
|
||||
|
|
2
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
2
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
{
|
||||
"storage": "1.25.0"
|
||||
"storage": "1.26.0"
|
||||
}
|
12
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
12
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
|
@ -1,6 +1,18 @@
|
|||
# Changes
|
||||
|
||||
|
||||
## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e))
|
||||
|
||||
## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11)
|
||||
|
||||
|
||||
|
|
63
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
63
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
|
@ -610,7 +610,12 @@ const (
|
|||
//
|
||||
// All configured conditions must be met for the associated action to be taken.
|
||||
type LifecycleCondition struct {
|
||||
// AllObjects is used to select all objects in a bucket by
|
||||
// setting AgeInDays to 0.
|
||||
AllObjects bool
|
||||
|
||||
// AgeInDays is the age of the object in days.
|
||||
// If you want to set AgeInDays to `0` use AllObjects set to `true`.
|
||||
AgeInDays int64
|
||||
|
||||
// CreatedBefore is the time the object was created.
|
||||
|
@ -628,10 +633,12 @@ type LifecycleCondition struct {
|
|||
|
||||
// DaysSinceCustomTime is the days elapsed since the CustomTime date of the
|
||||
// object. This condition can only be satisfied if CustomTime has been set.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
DaysSinceCustomTime int64
|
||||
|
||||
// DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp
|
||||
// of the object. This condition is relevant only for versioned objects.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
DaysSinceNoncurrentTime int64
|
||||
|
||||
// Liveness specifies the object's liveness. Relevant only for versioned objects
|
||||
|
@ -663,6 +670,7 @@ type LifecycleCondition struct {
|
|||
// If the value is N, this condition is satisfied when there are at least N
|
||||
// versions (including the live version) newer than this version of the
|
||||
// object.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
NumNewerVersions int64
|
||||
}
|
||||
|
||||
|
@ -1421,19 +1429,6 @@ func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS {
|
|||
return out
|
||||
}
|
||||
|
||||
// Used to handle breaking change in Autogen Storage client OLM Age field
|
||||
// from int64 to *int64 gracefully in the manual client
|
||||
// TODO(#6240): Method should be removed once breaking change is made and introduced to this client
|
||||
func setAgeCondition(age int64, ageField interface{}) {
|
||||
c := reflect.ValueOf(ageField).Elem()
|
||||
switch c.Kind() {
|
||||
case reflect.Int64:
|
||||
c.SetInt(age)
|
||||
case reflect.Ptr:
|
||||
c.Set(reflect.ValueOf(&age))
|
||||
}
|
||||
}
|
||||
|
||||
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
||||
var rl raw.BucketLifecycle
|
||||
if len(l.Rules) == 0 {
|
||||
|
@ -1455,7 +1450,15 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
|||
},
|
||||
}
|
||||
|
||||
setAgeCondition(r.Condition.AgeInDays, &rr.Condition.Age)
|
||||
// AllObjects takes precedent when both AllObjects and AgeInDays are set
|
||||
// Rationale: If you've opted into using AllObjects, it makes sense that you
|
||||
// understand the implications of how this option works with AgeInDays.
|
||||
if r.Condition.AllObjects {
|
||||
rr.Condition.Age = googleapi.Int64(0)
|
||||
rr.Condition.ForceSendFields = []string{"Age"}
|
||||
} else if r.Condition.AgeInDays > 0 {
|
||||
rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays)
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
|
@ -1504,6 +1507,11 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
|||
},
|
||||
}
|
||||
|
||||
// TODO(#6205): This may not be needed for gRPC
|
||||
if r.Condition.AllObjects {
|
||||
rr.Condition.AgeDays = proto.Int32(0)
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
rr.Condition.IsLive = nil
|
||||
|
@ -1527,21 +1535,6 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
|||
return &rl
|
||||
}
|
||||
|
||||
// Used to handle breaking change in Autogen Storage client OLM Age field
|
||||
// from int64 to *int64 gracefully in the manual client
|
||||
// TODO(#6240): Method should be removed once breaking change is made and introduced to this client
|
||||
func getAgeCondition(ageField interface{}) int64 {
|
||||
v := reflect.ValueOf(ageField)
|
||||
if v.Kind() == reflect.Int64 {
|
||||
return v.Interface().(int64)
|
||||
} else if v.Kind() == reflect.Ptr {
|
||||
if val, ok := v.Interface().(*int64); ok {
|
||||
return *val
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
||||
var l Lifecycle
|
||||
if rl == nil {
|
||||
|
@ -1562,7 +1555,12 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
|||
NumNewerVersions: rr.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
r.Condition.AgeInDays = getAgeCondition(rr.Condition.Age)
|
||||
if rr.Condition.Age != nil {
|
||||
r.Condition.AgeInDays = *rr.Condition.Age
|
||||
if *rr.Condition.Age == 0 {
|
||||
r.Condition.AllObjects = true
|
||||
}
|
||||
}
|
||||
|
||||
if rr.Condition.IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
|
@ -1608,6 +1606,11 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
|
|||
},
|
||||
}
|
||||
|
||||
// TODO(#6205): This may not be needed for gRPC
|
||||
if rr.GetCondition().GetAgeDays() == 0 {
|
||||
r.Condition.AllObjects = true
|
||||
}
|
||||
|
||||
if rr.GetCondition().IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
} else if rr.GetCondition().GetIsLive() {
|
||||
|
|
10
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
10
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
|
@ -27,6 +27,7 @@ import (
|
|||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -88,6 +89,9 @@ func defaultGRPCOptions() []option.ClientOption {
|
|||
option.WithGRPCDialOption(grpc.WithInsecure()),
|
||||
option.WithoutAuthentication(),
|
||||
)
|
||||
} else {
|
||||
// Only enable DirectPath when the emulator is not being targeted.
|
||||
defaults = append(defaults, internaloption.EnableDirectPath(true))
|
||||
}
|
||||
|
||||
return defaults
|
||||
|
@ -1379,7 +1383,7 @@ func (r *gRPCReader) Close() error {
|
|||
// an attempt to reopen the stream.
|
||||
func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) {
|
||||
msg, err := r.stream.Recv()
|
||||
if err != nil && shouldRetry(err) {
|
||||
if err != nil && ShouldRetry(err) {
|
||||
// This will "close" the existing stream and immediately attempt to
|
||||
// reopen the stream, but will backoff if further attempts are necessary.
|
||||
// Reopening the stream Recvs the first message, so if retrying is
|
||||
|
@ -1559,7 +1563,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
|
|||
// resend the entire buffer via a new stream.
|
||||
// If not retriable, falling through will return the error received
|
||||
// from closing the stream.
|
||||
if shouldRetry(err) {
|
||||
if ShouldRetry(err) {
|
||||
sent = 0
|
||||
finishWrite = false
|
||||
// TODO: Add test case for failure modes of querying progress.
|
||||
|
@ -1590,7 +1594,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
|
|||
// resend the entire buffer via a new stream.
|
||||
// If not retriable, falling through will return the error received
|
||||
// from closing the stream.
|
||||
if shouldRetry(err) {
|
||||
if ShouldRetry(err) {
|
||||
sent = 0
|
||||
finishWrite = false
|
||||
offset, err = w.determineOffset(start)
|
||||
|
|
4
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
generated
vendored
4
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
generated
vendored
|
@ -1865,6 +1865,7 @@ type WriteObjectRequest struct {
|
|||
// The first message of each stream should set one of the following.
|
||||
//
|
||||
// Types that are assignable to FirstMessage:
|
||||
//
|
||||
// *WriteObjectRequest_UploadId
|
||||
// *WriteObjectRequest_WriteObjectSpec
|
||||
FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
|
||||
|
@ -1885,6 +1886,7 @@ type WriteObjectRequest struct {
|
|||
// A portion of the data for the object.
|
||||
//
|
||||
// Types that are assignable to Data:
|
||||
//
|
||||
// *WriteObjectRequest_ChecksummedData
|
||||
Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
|
||||
// Checksums for the complete object. If the checksums computed by the service
|
||||
|
@ -2039,6 +2041,7 @@ type WriteObjectResponse struct {
|
|||
// The response will set one of the following.
|
||||
//
|
||||
// Types that are assignable to WriteStatus:
|
||||
//
|
||||
// *WriteObjectResponse_PersistedSize
|
||||
// *WriteObjectResponse_Resource
|
||||
WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
|
||||
|
@ -2338,6 +2341,7 @@ type QueryWriteStatusResponse struct {
|
|||
// The response will set one of the following.
|
||||
//
|
||||
// Types that are assignable to WriteStatus:
|
||||
//
|
||||
// *QueryWriteStatusResponse_PersistedSize
|
||||
// *QueryWriteStatusResponse_Resource
|
||||
WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
|
||||
|
|
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
|
@ -15,4 +15,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.25.0"
|
||||
const Version = "1.26.0"
|
||||
|
|
15
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
15
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
|
@ -57,7 +57,7 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten
|
|||
bo.Initial = retry.backoff.Initial
|
||||
bo.Max = retry.backoff.Max
|
||||
}
|
||||
var errorFunc func(err error) bool = shouldRetry
|
||||
var errorFunc func(err error) bool = ShouldRetry
|
||||
if retry.shouldRetry != nil {
|
||||
errorFunc = retry.shouldRetry
|
||||
}
|
||||
|
@ -89,7 +89,16 @@ func setRetryHeaderGRPC(_ context.Context) func(string, int) {
|
|||
}
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
// ShouldRetry returns true if an error is retryable, based on best practice
|
||||
// guidance from GCS. See
|
||||
// https://cloud.google.com/storage/docs/retry-strategy#go for more information
|
||||
// on what errors are considered retryable.
|
||||
//
|
||||
// If you would like to customize retryable errors, use the WithErrorFunc to
|
||||
// supply a RetryOption to your library calls. For example, to retry additional
|
||||
// errors, you can write a custom func that wraps ShouldRetry and also specifies
|
||||
// additional errors that should return true.
|
||||
func ShouldRetry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
@ -131,7 +140,7 @@ func shouldRetry(err error) bool {
|
|||
}
|
||||
// Unwrap is only supported in go1.13.x+
|
||||
if e, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return shouldRetry(e.Unwrap())
|
||||
return ShouldRetry(e.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
7
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
7
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
|
@ -1875,8 +1875,8 @@ func (ws *withPolicy) apply(config *retryConfig) {
|
|||
|
||||
// WithErrorFunc allows users to pass a custom function to the retryer. Errors
|
||||
// will be retried if and only if `shouldRetry(err)` returns true.
|
||||
// By default, the following errors are retried (see invoke.go for the default
|
||||
// shouldRetry function):
|
||||
// By default, the following errors are retried (see ShouldRetry for the default
|
||||
// function):
|
||||
//
|
||||
// - HTTP responses with codes 408, 429, 502, 503, and 504.
|
||||
//
|
||||
|
@ -1887,7 +1887,8 @@ func (ws *withPolicy) apply(config *retryConfig) {
|
|||
// - Wrapped versions of these errors.
|
||||
//
|
||||
// This option can be used to retry on a different set of errors than the
|
||||
// default.
|
||||
// default. Users can use the default ShouldRetry function inside their custom
|
||||
// function if they only want to make minor modifications to default behavior.
|
||||
func WithErrorFunc(shouldRetry func(err error) bool) RetryOption {
|
||||
return &withErrorFunc{
|
||||
shouldRetry: shouldRetry,
|
||||
|
|
434
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
434
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -1677,6 +1677,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -3459,6 +3462,94 @@ var awsPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"cassandra": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "cassandra-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-2",
|
||||
}: endpoint{
|
||||
Hostname: "cassandra-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "cassandra-fips.us-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "cassandra-fips.us-west-2.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"catalog.marketplace": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -10689,6 +10780,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -11705,6 +11799,12 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.ca-central-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -11714,15 +11814,69 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "fips-ca-central-1",
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-2",
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-2",
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.us-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.us-east-2.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.us-west-2.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"ioteventsdata": service{
|
||||
|
@ -11775,6 +11929,15 @@ var awsPartition = partition{
|
|||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{
|
||||
|
@ -11799,6 +11962,42 @@ var awsPartition = partition{
|
|||
Region: "eu-west-2",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-ca-central-1",
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-2",
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-2",
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{
|
||||
|
@ -11807,6 +12006,15 @@ var awsPartition = partition{
|
|||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{
|
||||
|
@ -11815,6 +12023,15 @@ var awsPartition = partition{
|
|||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{
|
||||
|
@ -11823,6 +12040,15 @@ var awsPartition = partition{
|
|||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"iotsecuredtunneling": service{
|
||||
|
@ -12927,6 +13153,15 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-central-1-fips",
|
||||
}: endpoint{
|
||||
Hostname: "kms-fips.me-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "me-central-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
@ -14863,6 +15098,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -17253,6 +17491,112 @@ var awsPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"rds-data": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "rds-data-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-2",
|
||||
}: endpoint{
|
||||
Hostname: "rds-data-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "rds-data-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-2",
|
||||
}: endpoint{
|
||||
Hostname: "rds-data-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "rds-data-fips.us-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "rds-data-fips.us-east-2.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "rds-data-fips.us-west-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "rds-data-fips.us-west-2.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"redshift": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -22017,6 +22361,19 @@ var awsPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"supportapp": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"swf": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -24914,6 +25271,16 @@ var awscnPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"cassandra": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"ce": service{
|
||||
PartitionEndpoint: "aws-cn-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
@ -27046,6 +27413,26 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"cassandra": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "cassandra.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "cassandra.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cloudcontrolapi": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -28839,13 +29226,37 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
"iotevents": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"ioteventsdata": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{
|
||||
|
@ -28854,6 +29265,15 @@ var awsusgovPartition = partition{
|
|||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"iotsecuredtunneling": service{
|
||||
|
@ -32529,6 +32949,20 @@ var awsisobPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"metering.marketplace": service{
|
||||
Defaults: endpointDefaults{
|
||||
defaultKey{}: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
Service: "aws-marketplace",
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-isob-east-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"monitoring": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.81"
|
||||
const SDKVersion = "1.44.87"
|
||||
|
|
3
vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go
generated
vendored
3
vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go
generated
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
|
@ -253,7 +254,7 @@ func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err
|
|||
}
|
||||
defer file.Close()
|
||||
|
||||
data, err := io.ReadAll(io.LimitReader(file, 1<<20))
|
||||
data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20))
|
||||
if err != nil || len(data) == 0 {
|
||||
// Cachefile exists, but no data found. Get new credential.
|
||||
return "", nil
|
||||
|
|
20
vendor/golang.org/x/sys/unix/ioctl_linux.go
generated
vendored
20
vendor/golang.org/x/sys/unix/ioctl_linux.go
generated
vendored
|
@ -4,9 +4,7 @@
|
|||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
import "unsafe"
|
||||
|
||||
// IoctlRetInt performs an ioctl operation specified by req on a device
|
||||
// associated with opened file descriptor fd, and returns a non-negative
|
||||
|
@ -217,3 +215,19 @@ func IoctlKCMAttach(fd int, info KCMAttach) error {
|
|||
func IoctlKCMUnattach(fd int, info KCMUnattach) error {
|
||||
return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info))
|
||||
}
|
||||
|
||||
// IoctlLoopGetStatus64 gets the status of the loop device associated with the
|
||||
// file descriptor fd using the LOOP_GET_STATUS64 operation.
|
||||
func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) {
|
||||
var value LoopInfo64
|
||||
if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &value, nil
|
||||
}
|
||||
|
||||
// IoctlLoopSetStatus64 sets the status of the loop device associated with the
|
||||
// file descriptor fd using the LOOP_SET_STATUS64 operation.
|
||||
func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
|
||||
return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
|
||||
}
|
||||
|
|
27
vendor/golang.org/x/sys/unix/str.go
generated
vendored
27
vendor/golang.org/x/sys/unix/str.go
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package unix
|
||||
|
||||
func itoa(val int) string { // do it here rather than with fmt to avoid dependency
|
||||
if val < 0 {
|
||||
return "-" + uitoa(uint(-val))
|
||||
}
|
||||
return uitoa(uint(val))
|
||||
}
|
||||
|
||||
func uitoa(val uint) string {
|
||||
var buf [32]byte // big enough for int64
|
||||
i := len(buf) - 1
|
||||
for val >= 10 {
|
||||
buf[i] = byte(val%10 + '0')
|
||||
i--
|
||||
val /= 10
|
||||
}
|
||||
buf[i] = byte(val + '0')
|
||||
return string(buf[i:])
|
||||
}
|
3
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
3
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
|
@ -13,6 +13,7 @@ package unix
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
@ -233,7 +234,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error {
|
|||
func Futimes(fd int, tv []Timeval) (err error) {
|
||||
// Believe it or not, this is the best we can do on Linux
|
||||
// (and is what glibc does).
|
||||
return Utimes("/proc/self/fd/"+itoa(fd), tv)
|
||||
return Utimes("/proc/self/fd/"+strconv.Itoa(fd), tv)
|
||||
}
|
||||
|
||||
const ImplementsGetwd = true
|
||||
|
|
2
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
|
@ -956,7 +956,7 @@ func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) {
|
|||
// the unsafe version would be (*fileObj)(unsafe.Pointer(uintptr(peInt.Object)))
|
||||
peExt.fobj = fCookie.fobj
|
||||
} else {
|
||||
panic("mismanaged memory")
|
||||
panic("unexpected event port address; may be due to kernel bug; see https://go.dev/issue/54254")
|
||||
}
|
||||
delete(e.cookies, cookie)
|
||||
peExt.Path = BytePtrToString((*byte)(unsafe.Pointer(peExt.fobj.Name)))
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue