mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-01 14:47:38 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
20414b3038
127 changed files with 13632 additions and 1193 deletions
2
.github/workflows/check-licenses.yml
vendored
2
.github/workflows/check-licenses.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@main
|
uses: actions/setup-go@main
|
||||||
with:
|
with:
|
||||||
go-version: 1.19.1
|
go-version: 1.19.2
|
||||||
id: go
|
id: go
|
||||||
- name: Code checkout
|
- name: Code checkout
|
||||||
uses: actions/checkout@master
|
uses: actions/checkout@master
|
||||||
|
|
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
|
@ -19,7 +19,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@main
|
uses: actions/setup-go@main
|
||||||
with:
|
with:
|
||||||
go-version: 1.19.1
|
go-version: 1.19.2
|
||||||
id: go
|
id: go
|
||||||
- name: Code checkout
|
- name: Code checkout
|
||||||
uses: actions/checkout@master
|
uses: actions/checkout@master
|
||||||
|
|
16
README.md
16
README.md
|
@ -772,7 +772,7 @@ to your needs or when testing bugfixes.
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -788,7 +788,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
### Development ARM build
|
### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -802,7 +802,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -1263,10 +1263,11 @@ See also [resource usage limits docs](#resource-usage-limits).
|
||||||
|
|
||||||
By default VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
By default VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
||||||
|
|
||||||
- `-memory.allowedPercent` and `-search.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
||||||
|
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query. Queries, which need more memory, are rejected. Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
|
||||||
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
||||||
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM when executing unexpected heavy queries.
|
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM when executing unexpected heavy queries.
|
||||||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries.
|
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries. See also `-search.maxMemoryPerQuery` command-line flag.
|
||||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||||
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
||||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||||
|
@ -2225,7 +2226,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-search.logSlowQueryDuration duration
|
-search.logSlowQueryDuration duration
|
||||||
Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s)
|
Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s)
|
||||||
-search.maxConcurrentRequests int
|
-search.maxConcurrentRequests int
|
||||||
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration (default 8)
|
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration and -search.maxMemoryPerQuery (default 8)
|
||||||
-search.maxExportDuration duration
|
-search.maxExportDuration duration
|
||||||
The maximum duration for /api/v1/export call (default 720h0m0s)
|
The maximum duration for /api/v1/export call (default 720h0m0s)
|
||||||
-search.maxExportSeries int
|
-search.maxExportSeries int
|
||||||
|
@ -2236,6 +2237,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage . This flag is available only in enterprise version of VictoriaMetrics (default 300000)
|
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage . This flag is available only in enterprise version of VictoriaMetrics (default 300000)
|
||||||
-search.maxLookback duration
|
-search.maxLookback duration
|
||||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
||||||
|
-search.maxMemoryPerQuery size
|
||||||
|
The maximum amounts of memory a single query may consume. Queries requiring more memory are rejected. The total memory limit for concurrently executed queries can be estimated as -search.maxMemoryPerQuery multiplied by -search.maxConcurrentRequests
|
||||||
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
-search.maxPointsPerTimeseries int
|
-search.maxPointsPerTimeseries int
|
||||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||||
-search.maxPointsSubqueryPerTimeseries int
|
-search.maxPointsSubqueryPerTimeseries int
|
||||||
|
|
|
@ -382,7 +382,7 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
|
||||||
```
|
```
|
||||||
|
|
||||||
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line option
|
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line option
|
||||||
(e.g. when [staleness markers](#prometheus-staleness-markers) are disabled).
|
or when it scrapes target with `no_stale_markers: true` option, e.g. when [staleness markers](#prometheus-staleness-markers) are disabled.
|
||||||
|
|
||||||
* `scrape_series_limit` - the limit on the number of unique time series the given target can expose according to [these docs](#cardinality-limiter).
|
* `scrape_series_limit` - the limit on the number of unique time series the given target can expose according to [these docs](#cardinality-limiter).
|
||||||
This metric is exposed only if the series limit is set.
|
This metric is exposed only if the series limit is set.
|
||||||
|
@ -604,9 +604,13 @@ Additionally, the `action: graphite` relabeling rules usually work much faster t
|
||||||
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
|
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
|
||||||
|
|
||||||
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target
|
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target
|
||||||
in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers`
|
in order to compare it to the current response body. The memory usage may be reduced by disabling staleness tracking in the following ways:
|
||||||
command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series
|
|
||||||
per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](#automatically-generated-metrics) for details.
|
* By passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking across all the targets.
|
||||||
|
* By specifying `no_stale_markers: true` option in the [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) for the corresponding target.
|
||||||
|
|
||||||
|
When staleness tracking is disabled, then `vmagent` doesn't track the number of new time series per each scrape,
|
||||||
|
e.g. it sets `scrape_series_added` metric to zero. See [these docs](#automatically-generated-metrics) for details.
|
||||||
|
|
||||||
## Stream parsing mode
|
## Stream parsing mode
|
||||||
|
|
||||||
|
@ -1023,7 +1027,7 @@ It may be needed to build `vmagent` from source code when developing or testing
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds the `vmagent` binary and puts it into the `bin` folder.
|
It builds the `vmagent` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -1052,7 +1056,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
### Development ARM build
|
### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmagent-linux-arm` or `make vmagent-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
2. Run `make vmagent-linux-arm` or `make vmagent-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
It builds `vmagent-linux-arm` or `vmagent-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `vmagent-linux-arm` or `vmagent-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -123,7 +123,8 @@ func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries, extraLab
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
labels = pcs.Apply(labels, labelsLen, true)
|
labels = pcs.Apply(labels, labelsLen)
|
||||||
|
labels = promrelabel.FinalizeLabels(labels[:labelsLen], labels[labelsLen:])
|
||||||
if len(labels) == labelsLen {
|
if len(labels) == labelsLen {
|
||||||
// Drop the current time series, since relabeling removed all the labels.
|
// Drop the current time series, since relabeling removed all the labels.
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -665,9 +665,10 @@ Try the following recommendations in such cases:
|
||||||
are delivered to the datasource;
|
are delivered to the datasource;
|
||||||
* If you know in advance, that data in datasource is delayed - try changing vmalert's `-datasource.lookback`
|
* If you know in advance, that data in datasource is delayed - try changing vmalert's `-datasource.lookback`
|
||||||
command-line flag to add a time shift for evaluations;
|
command-line flag to add a time shift for evaluations;
|
||||||
* If time intervals between datapoints in datasource are irregular - try changing vmalert's `-datasource.queryStep`
|
* If time intervals between datapoints in datasource are irregular or `>=5min` - try changing vmalert's
|
||||||
command-line flag to specify how far search query can lookback for the recent datapoint. By default, this value
|
`-datasource.queryStep` command-line flag to specify how far search query can lookback for the recent datapoint.
|
||||||
is equal to group's evaluation interval.
|
The recommendation is to have the step at least two times bigger than `scrape_interval`, since
|
||||||
|
there are no guarantees that scrape will not fail.
|
||||||
|
|
||||||
Sometimes, it is not clear why some specific alert fired or didn't fire. It is very important to remember, that
|
Sometimes, it is not clear why some specific alert fired or didn't fire. It is very important to remember, that
|
||||||
alerts with `for: 0` fire immediately when their expression becomes true. And alerts with `for > 0` will fire only
|
alerts with `for: 0` fire immediately when their expression becomes true. And alerts with `for > 0` will fire only
|
||||||
|
@ -1275,7 +1276,7 @@ spec:
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmalert` binary and puts it into the `bin` folder.
|
It builds `vmalert` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -1291,7 +1292,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
### Development ARM build
|
### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmalert-linux-arm` or `make vmalert-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmalert-linux-arm` or `make vmalert-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmalert-linux-arm` or `vmalert-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `vmalert-linux-arm` or `vmalert-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -183,9 +183,9 @@ func (a Alert) toPromLabels(relabelCfg *promrelabel.ParsedConfigs) []prompbmarsh
|
||||||
Value: v,
|
Value: v,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
promrelabel.SortLabels(labels)
|
|
||||||
if relabelCfg != nil {
|
if relabelCfg != nil {
|
||||||
return relabelCfg.Apply(labels, 0, false)
|
labels = relabelCfg.Apply(labels, 0)
|
||||||
}
|
}
|
||||||
|
promrelabel.SortLabels(labels)
|
||||||
return labels
|
return labels
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
"endsAt":{%q= alert.End.Format(time.RFC3339Nano) %},
|
"endsAt":{%q= alert.End.Format(time.RFC3339Nano) %},
|
||||||
{% endif %}
|
{% endif %}
|
||||||
"labels": {
|
"labels": {
|
||||||
"alertname":{%q= alert.Name %}
|
|
||||||
{% code lbls := alert.toPromLabels(relabelCfg) %}
|
{% code lbls := alert.toPromLabels(relabelCfg) %}
|
||||||
{% for _, l := range lbls %}
|
{% code ll := len(lbls) %}
|
||||||
,{%q= l.Name %}:{%q= l.Value %}
|
{% for idx, l := range lbls %}
|
||||||
|
{%q= l.Name %}:{%q= l.Value %}{% if idx != ll-1 %}, {% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
},
|
},
|
||||||
"annotations": {
|
"annotations": {
|
||||||
|
|
|
@ -51,22 +51,27 @@ func streamamRequest(qw422016 *qt422016.Writer, alerts []Alert, generatorURL fun
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:16
|
//line app/vmalert/notifier/alertmanager_request.qtpl:16
|
||||||
}
|
}
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:16
|
//line app/vmalert/notifier/alertmanager_request.qtpl:16
|
||||||
qw422016.N().S(`"labels": {"alertname":`)
|
qw422016.N().S(`"labels": {`)
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:18
|
//line app/vmalert/notifier/alertmanager_request.qtpl:18
|
||||||
qw422016.N().Q(alert.Name)
|
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:19
|
|
||||||
lbls := alert.toPromLabels(relabelCfg)
|
lbls := alert.toPromLabels(relabelCfg)
|
||||||
|
|
||||||
|
//line app/vmalert/notifier/alertmanager_request.qtpl:19
|
||||||
|
ll := len(lbls)
|
||||||
|
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:20
|
//line app/vmalert/notifier/alertmanager_request.qtpl:20
|
||||||
for _, l := range lbls {
|
for idx, l := range lbls {
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:20
|
|
||||||
qw422016.N().S(`,`)
|
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||||
qw422016.N().Q(l.Name)
|
qw422016.N().Q(l.Name)
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||||
qw422016.N().S(`:`)
|
qw422016.N().S(`:`)
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||||
qw422016.N().Q(l.Value)
|
qw422016.N().Q(l.Value)
|
||||||
|
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||||
|
if idx != ll-1 {
|
||||||
|
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||||
|
qw422016.N().S(`,`)
|
||||||
|
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||||
|
}
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||||
}
|
}
|
||||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||||
|
|
|
@ -67,9 +67,6 @@ func TestAlertManager_Send(t *testing.T) {
|
||||||
if a[0].GeneratorURL != "0/0" {
|
if a[0].GeneratorURL != "0/0" {
|
||||||
t.Errorf("expected 0/0 as generatorURL got %s", a[0].GeneratorURL)
|
t.Errorf("expected 0/0 as generatorURL got %s", a[0].GeneratorURL)
|
||||||
}
|
}
|
||||||
if a[0].Labels["alertname"] != "alert0" {
|
|
||||||
t.Errorf("expected alert0 as alert name got %s", a[0].Labels["alertname"])
|
|
||||||
}
|
|
||||||
if a[0].StartsAt.IsZero() {
|
if a[0].StartsAt.IsZero() {
|
||||||
t.Errorf("expected non-zero start time")
|
t.Errorf("expected non-zero start time")
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,8 +132,9 @@ func parseConfig(path string) (*Config, error) {
|
||||||
|
|
||||||
func parseLabels(target string, metaLabels map[string]string, cfg *Config) (string, []prompbmarshal.Label, error) {
|
func parseLabels(target string, metaLabels map[string]string, cfg *Config) (string, []prompbmarshal.Label, error) {
|
||||||
labels := mergeLabels(target, metaLabels, cfg)
|
labels := mergeLabels(target, metaLabels, cfg)
|
||||||
labels = cfg.parsedRelabelConfigs.Apply(labels, 0, false)
|
labels = cfg.parsedRelabelConfigs.Apply(labels, 0)
|
||||||
labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
|
labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
|
||||||
|
promrelabel.SortLabels(labels)
|
||||||
// Remove references to already deleted labels, so GC could clean strings for label name and label value past len(labels).
|
// Remove references to already deleted labels, so GC could clean strings for label name and label value past len(labels).
|
||||||
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
|
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
|
||||||
|
|
|
@ -5,11 +5,11 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIAlert represents a notifier.AlertingRule ruleState
|
// APIAlert represents a notifier.AlertingRule state
|
||||||
// for WEB view
|
// for WEB view
|
||||||
// https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md#get-apiv1rules
|
// https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md#get-apiv1rules
|
||||||
type APIAlert struct {
|
type APIAlert struct {
|
||||||
State string `json:"ruleState"`
|
State string `json:"state"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Value string `json:"value"`
|
Value string `json:"value"`
|
||||||
Labels map[string]string `json:"labels,omitempty"`
|
Labels map[string]string `json:"labels,omitempty"`
|
||||||
|
@ -30,7 +30,7 @@ type APIAlert struct {
|
||||||
// SourceLink contains a link to a system which should show
|
// SourceLink contains a link to a system which should show
|
||||||
// why Alert was generated
|
// why Alert was generated
|
||||||
SourceLink string `json:"source"`
|
SourceLink string `json:"source"`
|
||||||
// Restored shows whether Alert's ruleState was restored on restart
|
// Restored shows whether Alert's state was restored on restart
|
||||||
Restored bool `json:"restored"`
|
Restored bool `json:"restored"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,10 +86,10 @@ type GroupAlerts struct {
|
||||||
// see https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md#get-apiv1rules
|
// see https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md#get-apiv1rules
|
||||||
type APIRule struct {
|
type APIRule struct {
|
||||||
// State must be one of these under following scenarios
|
// State must be one of these under following scenarios
|
||||||
// "pending": at least 1 alert in the rule in pending ruleState and no other alert in firing ruleState.
|
// "pending": at least 1 alert in the rule in pending state and no other alert in firing ruleState.
|
||||||
// "firing": at least 1 alert in the rule in firing ruleState.
|
// "firing": at least 1 alert in the rule in firing state.
|
||||||
// "inactive": no alert in the rule in firing or pending ruleState.
|
// "inactive": no alert in the rule in firing or pending state.
|
||||||
State string `json:"ruleState"`
|
State string `json:"state"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
// Query represents Rule's `expression` field
|
// Query represents Rule's `expression` field
|
||||||
Query string `json:"query"`
|
Query string `json:"query"`
|
||||||
|
@ -121,7 +121,7 @@ type APIRule struct {
|
||||||
// GroupID is an unique Group's ID
|
// GroupID is an unique Group's ID
|
||||||
GroupID string `json:"group_id"`
|
GroupID string `json:"group_id"`
|
||||||
|
|
||||||
// TODO:
|
// Updates contains the ordered list of recorded ruleStateEntry objects
|
||||||
Updates []ruleStateEntry `json:"updates"`
|
Updates []ruleStateEntry `json:"updates"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -167,7 +167,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmauth` binary and puts it into the `bin` folder.
|
It builds `vmauth` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -2,14 +2,6 @@
|
||||||
|
|
||||||
`vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
`vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
||||||
|
|
||||||
Supported storage systems for backups:
|
|
||||||
|
|
||||||
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
|
||||||
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
|
||||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
|
|
||||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
|
||||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
|
||||||
|
|
||||||
`vmbackup` supports incremental and full backups. Incremental backups are created automatically if the destination path already contains data from the previous backup.
|
`vmbackup` supports incremental and full backups. Incremental backups are created automatically if the destination path already contains data from the previous backup.
|
||||||
Full backups can be sped up with `-origin` pointing to an already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
Full backups can be sped up with `-origin` pointing to an already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
||||||
data between the existing backup and new backup. It saves time and costs on data transfer.
|
data between the existing backup and new backup. It saves time and costs on data transfer.
|
||||||
|
@ -23,6 +15,16 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
||||||
See also [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool built on top of `vmbackup`. This tool simplifies
|
See also [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool built on top of `vmbackup`. This tool simplifies
|
||||||
creation of hourly, daily, weekly and monthly backups.
|
creation of hourly, daily, weekly and monthly backups.
|
||||||
|
|
||||||
|
## Supported storage types
|
||||||
|
|
||||||
|
`vmbackup` supports the following `-dst` storage types:
|
||||||
|
|
||||||
|
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
||||||
|
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
||||||
|
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
|
||||||
|
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||||
|
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
||||||
|
|
||||||
## Use cases
|
## Use cases
|
||||||
|
|
||||||
### Regular backups
|
### Regular backups
|
||||||
|
@ -30,7 +32,7 @@ creation of hourly, daily, weekly and monthly backups.
|
||||||
Regular backup can be performed with the following command:
|
Regular backup can be performed with the following command:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshot.createURL=http://localhost:8428/snapshot/create -dst=gs://<bucket>/<path/to/new/backup>
|
./vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshot.createURL=http://localhost:8428/snapshot/create -dst=gs://<bucket>/<path/to/new/backup>
|
||||||
```
|
```
|
||||||
|
|
||||||
* `</path/to/victoria-metrics-data>` - path to VictoriaMetrics data pointed by `-storageDataPath` command-line flag in single-node VictoriaMetrics or in cluster `vmstorage`.
|
* `</path/to/victoria-metrics-data>` - path to VictoriaMetrics data pointed by `-storageDataPath` command-line flag in single-node VictoriaMetrics or in cluster `vmstorage`.
|
||||||
|
@ -75,7 +77,7 @@ The command will upload only changed data to `gs://<bucket>/latest`.
|
||||||
* Run the following command once a day:
|
* Run the following command once a day:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshot.createURL=http://localhost:8428/snapshot/create -dst=gs://<bucket>/<YYYYMMDD> -origin=gs://<bucket>/latest
|
./vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshot.createURL=http://localhost:8428/snapshot/create -dst=gs://<bucket>/<YYYYMMDD> -origin=gs://<bucket>/latest
|
||||||
```
|
```
|
||||||
|
|
||||||
Where `<daily-snapshot>` is the snapshot for the last day `<YYYYMMDD>`.
|
Where `<daily-snapshot>` is the snapshot for the last day `<YYYYMMDD>`.
|
||||||
|
@ -152,6 +154,11 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
||||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email"
|
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
* Obtaining credentials from env variables.
|
||||||
|
- For AWS S3 compatible storages set env variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
|
||||||
|
Also you can set env variable `AWS_SHARED_CREDENTIALS_FILE` with path to credentials file.
|
||||||
|
- For GCE cloud storage set env variable `GOOGLE_APPLICATION_CREDENTIALS` with path to credentials file.
|
||||||
|
- For Azure storage either set env variables `AZURE_STORAGE_ACCOUNT_NAME` and `AZURE_STORAGE_ACCOUNT_KEY`, or `AZURE_STORAGE_ACCOUNT_CONNECTION_STRING`.
|
||||||
|
|
||||||
* Usage with s3 custom url endpoint. It is possible to use `vmbackup` with s3 compatible storages like minio, cloudian, etc.
|
* Usage with s3 custom url endpoint. It is possible to use `vmbackup` with s3 compatible storages like minio, cloudian, etc.
|
||||||
You have to add a custom url endpoint via flag:
|
You have to add a custom url endpoint via flag:
|
||||||
|
@ -279,7 +286,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmbackup` binary and puts it into the `bin` folder.
|
It builds `vmbackup` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -2,16 +2,22 @@
|
||||||
|
|
||||||
***vmbackupmanager is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
***vmbackupmanager is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
||||||
|
|
||||||
The VictoriaMetrics backup manager automates regular backup procedures. It supports the following backup intervals: **hourly**, **daily**, **weekly** and **monthly**. Multiple backup intervals may be configured simultaneously. I.e. the backup manager creates hourly backups every hour, while it creates daily backups every day, etc. Backup manager must have read access to the storage data, so best practice is to install it on the same machine (or as a sidecar) where the storage node is installed.
|
The VictoriaMetrics backup manager automates regular backup procedures. It supports the following backup intervals: **hourly**, **daily**, **weekly** and **monthly**.
|
||||||
The backup service makes a backup every hour and puts it to the latest folder and then copies data to the folders which represent the backup intervals (hourly, daily, weekly and monthly)
|
Multiple backup intervals may be configured simultaneously. I.e. the backup manager creates hourly backups every hour, while it creates daily backups every day, etc.
|
||||||
|
Backup manager must have read access to the storage data, so best practice is to install it on the same machine (or as a sidecar) where the storage node is installed.
|
||||||
|
The backup service makes a backup every hour and puts it to the latest folder and then copies data to the folders
|
||||||
|
which represent the backup intervals (hourly, daily, weekly and monthly)
|
||||||
|
|
||||||
The required flags for running the service are as follows:
|
The required flags for running the service are as follows:
|
||||||
|
|
||||||
* -eula - should be true and means that you have the legal right to run a backup manager. That can either be a signed contract or an email with confirmation to run the service in a trial period
|
* -eula - should be true and means that you have the legal right to run a backup manager. That can either be a signed contract or an email
|
||||||
* -storageDataPath - path to VictoriaMetrics or vmstorage data path to make backup from
|
with confirmation to run the service in a trial period.
|
||||||
|
* -storageDataPath - path to VictoriaMetrics or vmstorage data path to make backup from.
|
||||||
* -snapshot.createURL - VictoriaMetrics creates snapshot URL which will automatically be created during backup. Example: <http://victoriametrics:8428/snapshot/create>
|
* -snapshot.createURL - VictoriaMetrics creates snapshot URL which will automatically be created during backup. Example: <http://victoriametrics:8428/snapshot/create>
|
||||||
* -dst - backup destination at s3, gcs or local filesystem
|
* -dst - backup destination at [the supported storage types](https://docs.victoriametrics.com/vmbackup.html#supported-storage-types).
|
||||||
* -credsFilePath - path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set. See [https://cloud.google.com/iam/docs/creating-managing-service-account-keys](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and [https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html](https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html)
|
* -credsFilePath - path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
|
||||||
|
See [https://cloud.google.com/iam/docs/creating-managing-service-account-keys](https://cloud.google.com/iam/docs/creating-managing-service-account-keys)
|
||||||
|
and [https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html](https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html).
|
||||||
|
|
||||||
Backup schedule is controlled by the following flags:
|
Backup schedule is controlled by the following flags:
|
||||||
|
|
||||||
|
@ -36,7 +42,11 @@ To get the full list of supported flags please run the following command:
|
||||||
./vmbackupmanager --help
|
./vmbackupmanager --help
|
||||||
```
|
```
|
||||||
|
|
||||||
The service creates a **full** backup each run. This means that the system can be restored fully from any particular backup using vmrestore. Backup manager uploads only the data that has been changed or created since the most recent backup (incremental backup).
|
The service creates a **full** backup each run. This means that the system can be restored fully
|
||||||
|
from any particular backup using [vmrestore](https://docs.victoriametrics.com/vmrestore.html).
|
||||||
|
Backup manager uploads only the data that has been changed or created since the most recent backup (incremental backup).
|
||||||
|
This reduces the consumed network traffic and the time needed for performing the backup.
|
||||||
|
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for details.
|
||||||
|
|
||||||
*Please take into account that the first backup upload could take a significant amount of time as it needs to upload all of the data.*
|
*Please take into account that the first backup upload could take a significant amount of time as it needs to upload all of the data.*
|
||||||
|
|
||||||
|
@ -47,7 +57,7 @@ There are two flags which could help with performance tuning:
|
||||||
|
|
||||||
## Example of Usage
|
## Example of Usage
|
||||||
|
|
||||||
GCS and cluster version. You need to have a credentials file in json format with following structure
|
GCS and cluster version. You need to have a credentials file in json format with following structure:
|
||||||
|
|
||||||
credentials.json
|
credentials.json
|
||||||
|
|
||||||
|
|
|
@ -700,7 +700,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmctl` binary and puts it into the `bin` folder.
|
It builds `vmctl` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -729,7 +729,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
#### Development ARM build
|
#### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmctl-linux-arm` or `make vmctl-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmctl-linux-arm` or `make vmctl-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmctl-linux-arm` or `vmctl-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `vmctl-linux-arm` or `vmctl-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -123,7 +123,8 @@ func (ctx *Ctx) ApplyRelabeling(labels []prompb.Label) []prompb.Label {
|
||||||
|
|
||||||
if pcs.Len() > 0 {
|
if pcs.Len() > 0 {
|
||||||
// Apply relabeling
|
// Apply relabeling
|
||||||
tmpLabels = pcs.Apply(tmpLabels, 0, true)
|
tmpLabels = pcs.Apply(tmpLabels, 0)
|
||||||
|
tmpLabels = promrelabel.FinalizeLabels(tmpLabels[:0], tmpLabels)
|
||||||
if len(tmpLabels) == 0 {
|
if len(tmpLabels) == 0 {
|
||||||
metricsDropped.Inc()
|
metricsDropped.Inc()
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ VictoriaMetrics must be stopped during the restore process.
|
||||||
Run the following command to restore backup from the given `-src` into the given `-storageDataPath`:
|
Run the following command to restore backup from the given `-src` into the given `-storageDataPath`:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
vmrestore -src=<storageType>://<path/to/backup> -storageDataPath=<local/path/to/restore>
|
./vmrestore -src=<storageType>://<path/to/backup> -storageDataPath=<local/path/to/restore>
|
||||||
```
|
```
|
||||||
|
|
||||||
* `<storageType>://<path/to/backup>` is the path to backup made with [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
* `<storageType>://<path/to/backup>` is the path to backup made with [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||||
|
@ -186,7 +186,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmrestore` binary and puts it into the `bin` folder.
|
It builds `vmrestore` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,8 @@ import (
|
||||||
var (
|
var (
|
||||||
deleteAuthKey = flag.String("deleteAuthKey", "", "authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries")
|
deleteAuthKey = flag.String("deleteAuthKey", "", "authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries")
|
||||||
maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", getDefaultMaxConcurrentRequests(), "The maximum number of concurrent search requests. "+
|
maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", getDefaultMaxConcurrentRequests(), "The maximum number of concurrent search requests. "+
|
||||||
"It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration")
|
"It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. "+
|
||||||
|
"See also -search.maxQueueDuration and -search.maxMemoryPerQuery")
|
||||||
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests "+
|
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests "+
|
||||||
"limit is reached; see also -search.maxQueryDuration")
|
"limit is reached; see also -search.maxQueryDuration")
|
||||||
resetCacheAuthKey = flag.String("search.resetCacheAuthKey", "", "Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call")
|
resetCacheAuthKey = flag.String("search.resetCacheAuthKey", "", "Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call")
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
|
@ -27,7 +28,11 @@ var (
|
||||||
disableCache = flag.Bool("search.disableCache", false, "Whether to disable response caching. This may be useful during data backfilling")
|
disableCache = flag.Bool("search.disableCache", false, "Whether to disable response caching. This may be useful during data backfilling")
|
||||||
maxPointsSubqueryPerTimeseries = flag.Int("search.maxPointsSubqueryPerTimeseries", 100e3, "The maximum number of points per series, which can be generated by subquery. "+
|
maxPointsSubqueryPerTimeseries = flag.Int("search.maxPointsSubqueryPerTimeseries", 100e3, "The maximum number of points per series, which can be generated by subquery. "+
|
||||||
"See https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3")
|
"See https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3")
|
||||||
noStaleMarkers = flag.Bool("search.noStaleMarkers", false, "Set this flag to true if the database doesn't contain Prometheus stale markers, so there is no need in spending additional CPU time on its handling. Staleness markers may exist only in data obtained from Prometheus scrape targets")
|
maxMemoryPerQuery = flagutil.NewBytes("search.maxMemoryPerQuery", 0, "The maximum amounts of memory a single query may consume. "+
|
||||||
|
"Queries requiring more memory are rejected. The total memory limit for concurrently executed queries can be estimated "+
|
||||||
|
"as -search.maxMemoryPerQuery multiplied by -search.maxConcurrentRequests")
|
||||||
|
noStaleMarkers = flag.Bool("search.noStaleMarkers", false, "Set this flag to true if the database doesn't contain Prometheus stale markers, "+
|
||||||
|
"so there is no need in spending additional CPU time on its handling. Staleness markers may exist only in data obtained from Prometheus scrape targets")
|
||||||
)
|
)
|
||||||
|
|
||||||
// The minimum number of points per timeseries for enabling time rounding.
|
// The minimum number of points per timeseries for enabling time rounding.
|
||||||
|
@ -1051,7 +1056,17 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rollupPoints := mulNoOverflow(pointsPerTimeseries, int64(timeseriesLen*len(rcs)))
|
rollupPoints := mulNoOverflow(pointsPerTimeseries, int64(timeseriesLen*len(rcs)))
|
||||||
rollupMemorySize = mulNoOverflow(rollupPoints, 16)
|
rollupMemorySize = sumNoOverflow(mulNoOverflow(int64(rssLen), 1000), mulNoOverflow(rollupPoints, 16))
|
||||||
|
if maxMemory := int64(maxMemoryPerQuery.N); maxMemory > 0 && rollupMemorySize > maxMemory {
|
||||||
|
rss.Cancel()
|
||||||
|
return nil, &UserReadableError{
|
||||||
|
Err: fmt.Errorf("not enough memory for processing %d data points across %d time series with %d points in each time series "+
|
||||||
|
"according to -search.maxMemoryPerQuery=%d; requested memory: %d bytes; "+
|
||||||
|
"possible solutions are: reducing the number of matching time series; increasing `step` query arg (step=%gs); "+
|
||||||
|
"increasing -search.maxMemoryPerQuery",
|
||||||
|
rollupPoints, timeseriesLen*len(rcs), pointsPerTimeseries, maxMemory, rollupMemorySize, float64(ec.Step)/1e3),
|
||||||
|
}
|
||||||
|
}
|
||||||
rml := getRollupMemoryLimiter()
|
rml := getRollupMemoryLimiter()
|
||||||
if !rml.Get(uint64(rollupMemorySize)) {
|
if !rml.Get(uint64(rollupMemorySize)) {
|
||||||
rss.Cancel()
|
rss.Cancel()
|
||||||
|
@ -1059,8 +1074,8 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
|
||||||
Err: fmt.Errorf("not enough memory for processing %d data points across %d time series with %d points in each time series; "+
|
Err: fmt.Errorf("not enough memory for processing %d data points across %d time series with %d points in each time series; "+
|
||||||
"total available memory for concurrent requests: %d bytes; "+
|
"total available memory for concurrent requests: %d bytes; "+
|
||||||
"requested memory: %d bytes; "+
|
"requested memory: %d bytes; "+
|
||||||
"possible solutions are: reducing the number of matching time series; switching to node with more RAM; "+
|
"possible solutions are: reducing the number of matching time series; increasing `step` query arg (step=%gs); "+
|
||||||
"increasing -memory.allowedPercent; increasing `step` query arg (%gs)",
|
"switching to node with more RAM; increasing -memory.allowedPercent",
|
||||||
rollupPoints, timeseriesLen*len(rcs), pointsPerTimeseries, rml.MaxSize, uint64(rollupMemorySize), float64(ec.Step)/1e3),
|
rollupPoints, timeseriesLen*len(rcs), pointsPerTimeseries, rml.MaxSize, uint64(rollupMemorySize), float64(ec.Step)/1e3),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1227,6 +1242,14 @@ func mulNoOverflow(a, b int64) int64 {
|
||||||
return a * b
|
return a * b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sumNoOverflow(a, b int64) int64 {
|
||||||
|
if math.MaxInt64-a < b {
|
||||||
|
// Overflow
|
||||||
|
return math.MaxInt64
|
||||||
|
}
|
||||||
|
return a + b
|
||||||
|
}
|
||||||
|
|
||||||
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
|
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||||
if *noStaleMarkers || funcName == "default_rollup" || funcName == "stale_samples_over_time" {
|
if *noStaleMarkers || funcName == "default_rollup" || funcName == "stale_samples_over_time" {
|
||||||
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
|
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
|
||||||
|
|
|
@ -3789,6 +3789,27 @@ func TestExecSuccess(t *testing.T) {
|
||||||
resultExpected := []netstorage.Result{r}
|
resultExpected := []netstorage.Result{r}
|
||||||
f(q, resultExpected)
|
f(q, resultExpected)
|
||||||
})
|
})
|
||||||
|
t.Run(`histogram_quantile(duplicate-le)`, func(t *testing.T) {
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225
|
||||||
|
t.Parallel()
|
||||||
|
q := `round(sort(histogram_quantile(0.6,
|
||||||
|
label_set(90, "foo", "bar", "le", "5")
|
||||||
|
or label_set(100, "foo", "bar", "le", "5.0")
|
||||||
|
or label_set(200, "foo", "bar", "le", "6.0")
|
||||||
|
or label_set(300, "foo", "bar", "le", "+Inf")
|
||||||
|
)), 0.1)`
|
||||||
|
r1 := netstorage.Result{
|
||||||
|
MetricName: metricNameExpected,
|
||||||
|
Values: []float64{4.7, 4.7, 4.7, 4.7, 4.7, 4.7},
|
||||||
|
Timestamps: timestampsExpected,
|
||||||
|
}
|
||||||
|
r1.MetricName.Tags = []storage.Tag{{
|
||||||
|
Key: []byte("foo"),
|
||||||
|
Value: []byte("bar"),
|
||||||
|
}}
|
||||||
|
resultExpected := []netstorage.Result{r1}
|
||||||
|
f(q, resultExpected)
|
||||||
|
})
|
||||||
t.Run(`histogram_quantile(valid)`, func(t *testing.T) {
|
t.Run(`histogram_quantile(valid)`, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
q := `sort(histogram_quantile(0.6,
|
q := `sort(histogram_quantile(0.6,
|
||||||
|
|
|
@ -665,6 +665,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
sort.Slice(xss, func(i, j int) bool {
|
sort.Slice(xss, func(i, j int) bool {
|
||||||
return xss[i].le < xss[j].le
|
return xss[i].le < xss[j].le
|
||||||
})
|
})
|
||||||
|
xss = mergeSameLE(xss)
|
||||||
dst := xss[0].ts
|
dst := xss[0].ts
|
||||||
var tsLower, tsUpper *timeseries
|
var tsLower, tsUpper *timeseries
|
||||||
if len(boundsLabel) > 0 {
|
if len(boundsLabel) > 0 {
|
||||||
|
@ -945,6 +946,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
sort.Slice(xss, func(i, j int) bool {
|
sort.Slice(xss, func(i, j int) bool {
|
||||||
return xss[i].le < xss[j].le
|
return xss[i].le < xss[j].le
|
||||||
})
|
})
|
||||||
|
xss = mergeSameLE(xss)
|
||||||
dst := xss[0].ts
|
dst := xss[0].ts
|
||||||
var tsLower, tsUpper *timeseries
|
var tsLower, tsUpper *timeseries
|
||||||
if len(boundsLabel) > 0 {
|
if len(boundsLabel) > 0 {
|
||||||
|
@ -1012,6 +1014,7 @@ func fixBrokenBuckets(i int, xss []leTimeseries) {
|
||||||
if len(xss) < 2 {
|
if len(xss) < 2 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// Fill NaN in upper buckets with the first non-NaN value found in lower buckets.
|
||||||
for j := len(xss) - 1; j >= 0; j-- {
|
for j := len(xss) - 1; j >= 0; j-- {
|
||||||
v := xss[j].ts.Values[i]
|
v := xss[j].ts.Values[i]
|
||||||
if !math.IsNaN(v) {
|
if !math.IsNaN(v) {
|
||||||
|
@ -1023,6 +1026,8 @@ func fixBrokenBuckets(i int, xss []leTimeseries) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Substitute lower bucket values with upper values if the lower values are NaN
|
||||||
|
// or are bigger than the upper bucket values.
|
||||||
vNext := xss[len(xss)-1].ts.Values[i]
|
vNext := xss[len(xss)-1].ts.Values[i]
|
||||||
for j := len(xss) - 2; j >= 0; j-- {
|
for j := len(xss) - 2; j >= 0; j-- {
|
||||||
v := xss[j].ts.Values[i]
|
v := xss[j].ts.Values[i]
|
||||||
|
@ -1034,6 +1039,26 @@ func fixBrokenBuckets(i int, xss []leTimeseries) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func mergeSameLE(xss []leTimeseries) []leTimeseries {
|
||||||
|
// Merge buckets with identical le values.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225
|
||||||
|
xsDst := xss[0]
|
||||||
|
dst := xss[:1]
|
||||||
|
for j := 1; j < len(xss); j++ {
|
||||||
|
xs := xss[j]
|
||||||
|
if xs.le != xsDst.le {
|
||||||
|
dst = append(dst, xs)
|
||||||
|
xsDst = xs
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dstValues := xsDst.ts.Values
|
||||||
|
for k, v := range xs.ts.Values {
|
||||||
|
dstValues[k] += v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
func transformHour(t time.Time) int {
|
func transformHour(t time.Time) int {
|
||||||
return t.Hour()
|
return t.Hour()
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
{
|
{
|
||||||
"files": {
|
"files": {
|
||||||
"main.css": "./static/css/main.ba692000.css",
|
"main.css": "./static/css/main.ba692000.css",
|
||||||
"main.js": "./static/js/main.623b88d4.js",
|
"main.js": "./static/js/main.c0e3dc67.js",
|
||||||
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
|
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
|
||||||
"index.html": "./index.html"
|
"index.html": "./index.html"
|
||||||
},
|
},
|
||||||
"entrypoints": [
|
"entrypoints": [
|
||||||
"static/css/main.ba692000.css",
|
"static/css/main.ba692000.css",
|
||||||
"static/js/main.623b88d4.js"
|
"static/js/main.c0e3dc67.js"
|
||||||
]
|
]
|
||||||
}
|
}
|
|
@ -1 +1 @@
|
||||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.623b88d4.js"></script><link href="./static/css/main.ba692000.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.c0e3dc67.js"></script><link href="./static/css/main.ba692000.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/main.c0e3dc67.js
Normal file
2
app/vmselect/vmui/static/js/main.c0e3dc67.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.19.1 as build-web-stage
|
FROM golang:1.19.2 as build-web-stage
|
||||||
COPY build /build
|
COPY build /build
|
||||||
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import React, {FC, useState} from "preact/compat";
|
import React, {FC, useState, useEffect} from "preact/compat";
|
||||||
import Box from "@mui/material/Box";
|
import Box from "@mui/material/Box";
|
||||||
import IconButton from "@mui/material/IconButton";
|
import IconButton from "@mui/material/IconButton";
|
||||||
import Tooltip from "@mui/material/Tooltip";
|
import Tooltip from "@mui/material/Tooltip";
|
||||||
|
@ -11,18 +11,20 @@ import AdditionalSettings from "./AdditionalSettings";
|
||||||
import {ErrorTypes} from "../../../../types";
|
import {ErrorTypes} from "../../../../types";
|
||||||
import Button from "@mui/material/Button";
|
import Button from "@mui/material/Button";
|
||||||
import Typography from "@mui/material/Typography";
|
import Typography from "@mui/material/Typography";
|
||||||
|
import usePrevious from "../../../../hooks/usePrevious";
|
||||||
|
import {MAX_QUERY_FIELDS} from "../../../../config";
|
||||||
|
|
||||||
export interface QueryConfiguratorProps {
|
export interface QueryConfiguratorProps {
|
||||||
error?: ErrorTypes | string;
|
error?: ErrorTypes | string;
|
||||||
queryOptions: string[]
|
queryOptions: string[]
|
||||||
}
|
}
|
||||||
|
|
||||||
export const MAX_QUERY_FIELDS = 4;
|
|
||||||
|
|
||||||
const QueryConfigurator: FC<QueryConfiguratorProps> = ({error, queryOptions}) => {
|
const QueryConfigurator: FC<QueryConfiguratorProps> = ({error, queryOptions}) => {
|
||||||
|
|
||||||
const {query, queryHistory, queryControls: {autocomplete}} = useAppState();
|
const {query, queryHistory, queryControls: {autocomplete}} = useAppState();
|
||||||
const [stateQuery, setStateQuery] = useState(query || []);
|
const [stateQuery, setStateQuery] = useState(query || []);
|
||||||
|
const prevStateQuery = usePrevious(stateQuery) as (undefined | string[]);
|
||||||
const dispatch = useAppDispatch();
|
const dispatch = useAppDispatch();
|
||||||
|
|
||||||
const updateHistory = () => {
|
const updateHistory = () => {
|
||||||
|
@ -66,6 +68,13 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({error, queryOptions}) =>
|
||||||
payload: {value: {values, index: newIndexHistory}, queryNumber: indexQuery}
|
payload: {value: {values, index: newIndexHistory}, queryNumber: indexQuery}
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (prevStateQuery && (stateQuery.length < prevStateQuery.filter(q => q).length)) {
|
||||||
|
onRunQuery();
|
||||||
|
}
|
||||||
|
}, [stateQuery]);
|
||||||
|
|
||||||
return <Box boxShadow="rgba(99, 99, 99, 0.2) 0px 2px 8px 0px;" p={4} pb={2} m={-4} mb={2}>
|
return <Box boxShadow="rgba(99, 99, 99, 0.2) 0px 2px 8px 0px;" p={4} pb={2} m={-4} mb={2}>
|
||||||
<Box>
|
<Box>
|
||||||
{stateQuery.map((q, i) =>
|
{stateQuery.map((q, i) =>
|
||||||
|
|
|
@ -40,7 +40,7 @@ const CustomPanel: FC = () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const {queryOptions} = useFetchQueryOptions();
|
const {queryOptions} = useFetchQueryOptions();
|
||||||
const {isLoading, liveData, graphData, error, traces} = useFetchQuery({
|
const {isLoading, liveData, graphData, error, warning, traces} = useFetchQuery({
|
||||||
visible: true,
|
visible: true,
|
||||||
customStep
|
customStep
|
||||||
});
|
});
|
||||||
|
@ -83,6 +83,7 @@ const CustomPanel: FC = () => {
|
||||||
</Box>
|
</Box>
|
||||||
</Box>
|
</Box>
|
||||||
{error && <Alert color="error" severity="error" sx={{whiteSpace: "pre-wrap", mt: 2}}>{error}</Alert>}
|
{error && <Alert color="error" severity="error" sx={{whiteSpace: "pre-wrap", mt: 2}}>{error}</Alert>}
|
||||||
|
{warning && <Alert color="warning" severity="warning" sx={{whiteSpace: "pre-wrap", my: 2}}>{warning}</Alert>}
|
||||||
{graphData && period && (displayType === "chart") && <>
|
{graphData && period && (displayType === "chart") && <>
|
||||||
{isTracingEnabled && <TracingsView
|
{isTracingEnabled && <TracingsView
|
||||||
traces={tracesState}
|
traces={tracesState}
|
||||||
|
|
|
@ -46,7 +46,7 @@ const PredefinedPanels: FC<PredefinedPanelsProps> = ({
|
||||||
|
|
||||||
const validExpr = useMemo(() => Array.isArray(expr) && expr.every(q => q), [expr]);
|
const validExpr = useMemo(() => Array.isArray(expr) && expr.every(q => q), [expr]);
|
||||||
|
|
||||||
const {isLoading, graphData, error} = useFetchQuery({
|
const {isLoading, graphData, error, warning} = useFetchQuery({
|
||||||
predefinedQuery: validExpr ? expr : [],
|
predefinedQuery: validExpr ? expr : [],
|
||||||
display: "chart",
|
display: "chart",
|
||||||
visible,
|
visible,
|
||||||
|
@ -115,6 +115,7 @@ const PredefinedPanels: FC<PredefinedPanelsProps> = ({
|
||||||
<Box px={2} pb={2}>
|
<Box px={2} pb={2}>
|
||||||
{isLoading && <Spinner isLoading={true} height={"500px"}/>}
|
{isLoading && <Spinner isLoading={true} height={"500px"}/>}
|
||||||
{error && <Alert color="error" severity="error" sx={{whiteSpace: "pre-wrap", mt: 2}}>{error}</Alert>}
|
{error && <Alert color="error" severity="error" sx={{whiteSpace: "pre-wrap", mt: 2}}>{error}</Alert>}
|
||||||
|
{warning && <Alert color="warning" severity="warning" sx={{whiteSpace: "pre-wrap", my: 2}}>{warning}</Alert>}
|
||||||
{graphData && <GraphView
|
{graphData && <GraphView
|
||||||
data={graphData}
|
data={graphData}
|
||||||
period={period}
|
period={period}
|
||||||
|
|
6
app/vmui/packages/vmui/src/config.tsx
Normal file
6
app/vmui/packages/vmui/src/config.tsx
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
export const MAX_QUERY_FIELDS = 4;
|
||||||
|
export const MAX_SERIES = {
|
||||||
|
table: 100,
|
||||||
|
chart: 20,
|
||||||
|
code: Infinity,
|
||||||
|
};
|
|
@ -11,6 +11,7 @@ import {CustomStep} from "../state/graph/reducer";
|
||||||
import usePrevious from "./usePrevious";
|
import usePrevious from "./usePrevious";
|
||||||
import {arrayEquals} from "../utils/array";
|
import {arrayEquals} from "../utils/array";
|
||||||
import Trace from "../components/CustomPanel/Trace/Trace";
|
import Trace from "../components/CustomPanel/Trace/Trace";
|
||||||
|
import {MAX_SERIES} from "../config";
|
||||||
|
|
||||||
interface FetchQueryParams {
|
interface FetchQueryParams {
|
||||||
predefinedQuery?: string[]
|
predefinedQuery?: string[]
|
||||||
|
@ -28,6 +29,7 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||||
graphData?: MetricResult[],
|
graphData?: MetricResult[],
|
||||||
liveData?: InstantMetricResult[],
|
liveData?: InstantMetricResult[],
|
||||||
error?: ErrorTypes | string,
|
error?: ErrorTypes | string,
|
||||||
|
warning?: string,
|
||||||
traces?: Trace[],
|
traces?: Trace[],
|
||||||
} => {
|
} => {
|
||||||
const {query, displayType, serverUrl, time: {period}, queryControls: {nocache, isTracingEnabled}} = useAppState();
|
const {query, displayType, serverUrl, time: {period}, queryControls: {nocache, isTracingEnabled}} = useAppState();
|
||||||
|
@ -37,6 +39,7 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||||
const [liveData, setLiveData] = useState<InstantMetricResult[]>();
|
const [liveData, setLiveData] = useState<InstantMetricResult[]>();
|
||||||
const [traces, setTraces] = useState<Trace[]>();
|
const [traces, setTraces] = useState<Trace[]>();
|
||||||
const [error, setError] = useState<ErrorTypes | string>();
|
const [error, setError] = useState<ErrorTypes | string>();
|
||||||
|
const [warning, setWarning] = useState<string>();
|
||||||
const [fetchQueue, setFetchQueue] = useState<AbortController[]>([]);
|
const [fetchQueue, setFetchQueue] = useState<AbortController[]>([]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
@ -56,6 +59,7 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||||
const tempData: MetricBase[] = [];
|
const tempData: MetricBase[] = [];
|
||||||
const tempTraces: Trace[] = [];
|
const tempTraces: Trace[] = [];
|
||||||
let counter = 1;
|
let counter = 1;
|
||||||
|
|
||||||
for await (const response of responses) {
|
for await (const response of responses) {
|
||||||
const resp = await response.json();
|
const resp = await response.json();
|
||||||
if (response.ok) {
|
if (response.ok) {
|
||||||
|
@ -73,7 +77,14 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||||
setError(`${resp.errorType}\r\n${resp?.error}`);
|
setError(`${resp.errorType}\r\n${resp?.error}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
isDisplayChart ? setGraphData(tempData as MetricResult[]) : setLiveData(tempData as InstantMetricResult[]);
|
|
||||||
|
const length = tempData.length;
|
||||||
|
const seriesLimit = MAX_SERIES[displayType];
|
||||||
|
const result = tempData.slice(0, seriesLimit);
|
||||||
|
const limitText = `Showing ${seriesLimit} series out of ${length} series due to performance reasons. Please narrow down the query, so it returns less series`;
|
||||||
|
setWarning(length > seriesLimit ? limitText : "");
|
||||||
|
|
||||||
|
isDisplayChart ? setGraphData(result as MetricResult[]) : setLiveData(result as InstantMetricResult[]);
|
||||||
setTraces(tempTraces);
|
setTraces(tempTraces);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (e instanceof Error && e.name !== "AbortError") {
|
if (e instanceof Error && e.name !== "AbortError") {
|
||||||
|
@ -107,9 +118,12 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||||
[serverUrl, period, displayType, customStep]);
|
[serverUrl, period, displayType, customStep]);
|
||||||
|
|
||||||
const prevFetchUrl = usePrevious(fetchUrl);
|
const prevFetchUrl = usePrevious(fetchUrl);
|
||||||
|
const prevDisplayType = usePrevious(displayType);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!visible || (fetchUrl && prevFetchUrl && arrayEquals(fetchUrl, prevFetchUrl)) || !fetchUrl?.length) return;
|
const equalFetchUrl = fetchUrl && prevFetchUrl && arrayEquals(fetchUrl, prevFetchUrl);
|
||||||
|
const changedDisplayType = displayType !== prevDisplayType;
|
||||||
|
if (!visible || (equalFetchUrl && !changedDisplayType) || !fetchUrl?.length) return;
|
||||||
setIsLoading(true);
|
setIsLoading(true);
|
||||||
const expr = predefinedQuery ?? query;
|
const expr = predefinedQuery ?? query;
|
||||||
throttledFetchData(fetchUrl, fetchQueue, (display || displayType), expr);
|
throttledFetchData(fetchUrl, fetchQueue, (display || displayType), expr);
|
||||||
|
@ -122,5 +136,5 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||||
setFetchQueue(fetchQueue.filter(f => !f.signal.aborted));
|
setFetchQueue(fetchQueue.filter(f => !f.signal.aborted));
|
||||||
}, [fetchQueue]);
|
}, [fetchQueue]);
|
||||||
|
|
||||||
return {fetchUrl, isLoading, graphData, liveData, error, traces};
|
return {fetchUrl, isLoading, graphData, liveData, error, warning, traces};
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import qs from "qs";
|
import qs from "qs";
|
||||||
import get from "lodash.get";
|
import get from "lodash.get";
|
||||||
import router from "../router";
|
import router from "../router";
|
||||||
import {MAX_QUERY_FIELDS} from "../components/CustomPanel/Configurator/Query/QueryConfigurator";
|
import {MAX_QUERY_FIELDS} from "../config";
|
||||||
|
|
||||||
const graphStateToUrlParams = {
|
const graphStateToUrlParams = {
|
||||||
"time.duration": "range_input",
|
"time.duration": "range_input",
|
||||||
|
|
|
@ -15,11 +15,52 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
* FEATURE: allow limiting memory usage on a per-query basis with `-search.maxMemoryPerQuery` command-line flag. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3203).
|
||||||
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): drop all the labels with `__` prefix from discovered targets in the same way as Prometheus does according to [this article](https://www.robustperception.io/life-of-a-label/). Previously the following labels were available during [metric-level relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs): `__address__`, `__scheme__`, `__metrics_path__`, `__scrape_interval__`, `__scrape_timeout__`, `__param_*`. Now these labels are available only during [target-level relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config). This should reduce CPU usage and memory usage for `vmagent` setups, which scrape big number of targets.
|
||||||
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): improve the performance for metric-level [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling), which can be applied via `metric_relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs), via `-remoteWrite.relabelConfig` or via `-remoteWrite.urlRelabelConfig` command-line options.
|
||||||
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying full url in scrape target addresses (aka `__address__` label). This makes valid the following `-promscrape.config`:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: abc
|
||||||
|
metrics_path: /foo/bar
|
||||||
|
scheme: https
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
# the following targets are scraped by the provided full urls
|
||||||
|
- 'http://host1/metric/path1'
|
||||||
|
- 'https://host2/metric/path2'
|
||||||
|
- 'http://host3:1234/metric/path3?arg1=value1'
|
||||||
|
# the following target is scraped by <scheme>://host4:1234<metrics_path>
|
||||||
|
- host4:1234
|
||||||
|
```
|
||||||
|
|
||||||
|
See [the corresponding issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3208).
|
||||||
|
|
||||||
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow controlling staleness tracking on a per-[scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) basis by specifying `no_stale_markers: true` or `no_stale_markers: false` option in the corresponding [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||||
|
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): limit the number of plotted series. This should prevent from browser crashes or hangs when the query returns big number of time series. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3155).
|
||||||
|
|
||||||
|
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly merge buckets with identical `le` values, but with different string representation of these values when calculating [histogram_quantile](https://docs.victoriametrics.com/MetricsQL.html#histogram_quantile) and [histogram_share](https://docs.victoriametrics.com/MetricsQL.html#histogram_share). For example, `http_request_duration_seconds_bucket{le="5"}` and `http_requests_duration_seconds_bucket{le="5.0"}`. Such buckets may be returned from distinct targets. Thanks to @647-coder for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225).
|
||||||
|
|
||||||
|
## [v1.82.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.82.1)
|
||||||
|
|
||||||
|
Released at 14-10-2022
|
||||||
|
|
||||||
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): automatically update graph, legend and url after the removal of query field. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3169) and [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3196#issuecomment-1269765205).
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): remove duplicate `alertname` JSON entry from generated alerts. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3053). Thanks to @Howie59 for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3182)!
|
||||||
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): fix integration with Grafana via `-vmalert.proxyURL`, which has been broken in [v1.82.0](https://docs.victoriametrics.com/CHANGELOG.html#v1820). See [this issue](https://github.com/VictoriaMetrics/helm-charts/issues/391).
|
||||||
|
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): set default region to `us-east-1` if `AWS_REGION` environment variable isn't set. The issue was introduced in [vmbackup v1.82.0](https://docs.victoriametrics.com/CHANGELOG.html#v1820). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3224).
|
||||||
|
* BUGFIX: [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html): fix deletion of old backups at [Azure blob storage](https://azure.microsoft.com/en-us/products/storage/blobs/).
|
||||||
|
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly apply regex filters when searching for time series. Previously unexpected time series could be returned from regex filter. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3227). The issue was introduced in [v1.82.0](https://docs.victoriametrics.com/CHANGELOG.html#v1820).
|
||||||
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmbagent.html): properly apply `if` section with regex filters. Previously unexpected metrics could be returned from `if` section. The issue was introduced in [v1.82.0](https://docs.victoriametrics.com/CHANGELOG.html#v1820).
|
||||||
|
|
||||||
|
|
||||||
## [v1.82.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.82.0)
|
## [v1.82.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.82.0)
|
||||||
|
|
||||||
Released at 07-10-2022
|
Released at 07-10-2022
|
||||||
|
|
||||||
|
**It isn't recommended to use VictoriaMetrics and vmagent v1.82.0 because of [the bug](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3227), which may result in incorrect query results and [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling) results. Upgrade to [v1.82.1](https://docs.victoriametrics.com/CHANGELOG.html#v1821) instead.**
|
||||||
|
|
||||||
**Update note 1:** this release changes data format for [/api/v1/export/native](https://docs.victoriametrics.com/#how-to-export-data-in-native-format) in incompatible way, so it cannot be imported into older version of VictoriaMetrics via [/api/v1/import/native](https://docs.victoriametrics.com/#how-to-import-data-in-native-format).
|
**Update note 1:** this release changes data format for [/api/v1/export/native](https://docs.victoriametrics.com/#how-to-export-data-in-native-format) in incompatible way, so it cannot be imported into older version of VictoriaMetrics via [/api/v1/import/native](https://docs.victoriametrics.com/#how-to-import-data-in-native-format).
|
||||||
|
|
||||||
**Update note 2:** [vmalert](https://docs.victoriametrics.com/vmalert.html) changes default value for command-line flag `-datasource.queryStep` from `0s` to `5m`. The change supposed to improve reliability of the rules evaluation when evaluation interval is lower than scraping interval.
|
**Update note 2:** [vmalert](https://docs.victoriametrics.com/vmalert.html) changes default value for command-line flag `-datasource.queryStep` from `0s` to `5m`. The change supposed to improve reliability of the rules evaluation when evaluation interval is lower than scraping interval.
|
||||||
|
|
|
@ -469,10 +469,11 @@ See also [resource usage limits docs](#resource-usage-limits).
|
||||||
|
|
||||||
By default cluster components of VictoriaMetrics are tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
By default cluster components of VictoriaMetrics are tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
||||||
|
|
||||||
- `-memory.allowedPercent` and `-search.allowedBytes` limit the amounts of memory, which may be used for various internal caches at all the cluster components of VictoriaMetrics - `vminsert`, `vmselect` and `vmstorage`. Note that VictoriaMetrics components may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at all the cluster components of VictoriaMetrics - `vminsert`, `vmselect` and `vmstorage`. Note that VictoriaMetrics components may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
||||||
|
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query at `vmselect` node. Queries, which need more memory, are rejected. Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
|
||||||
- `-search.maxUniqueTimeseries` at `vmselect` component limits the number of unique time series a single query can find and process. `vmselect` passes the limit to `vmstorage` component, which keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use at `vmstorage` is proportional to `-search.maxUniqueTimeseries`.
|
- `-search.maxUniqueTimeseries` at `vmselect` component limits the number of unique time series a single query can find and process. `vmselect` passes the limit to `vmstorage` component, which keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use at `vmstorage` is proportional to `-search.maxUniqueTimeseries`.
|
||||||
- `-search.maxQueryDuration` at `vmselect` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM at `vmselect` and `vmstorage` when executing unexpected heavy queries.
|
- `-search.maxQueryDuration` at `vmselect` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM at `vmselect` and `vmstorage` when executing unexpected heavy queries.
|
||||||
- `-search.maxConcurrentRequests` at `vmselect` limits the number of concurrent requests a single `vmselect` node can process. Bigger number of concurrent requests usually means bigger memory usage at both `vmselect` and `vmstorage`. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. `vmselect` provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries.
|
- `-search.maxConcurrentRequests` at `vmselect` limits the number of concurrent requests a single `vmselect` node can process. Bigger number of concurrent requests usually means bigger memory usage at both `vmselect` and `vmstorage`. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. `vmselect` provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries. See also `-search.maxMemoryPerQuery` command-line flag.
|
||||||
- `-search.maxSamplesPerSeries` at `vmselect` limits the number of raw samples the query can process per each time series. `vmselect` sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage at `vmselect` in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
- `-search.maxSamplesPerSeries` at `vmselect` limits the number of raw samples the query can process per each time series. `vmselect` sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage at `vmselect` in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||||
- `-search.maxSamplesPerQuery` at `vmselect` limits the number of raw samples a single query can process. This allows limiting CPU usage at `vmselect` for heavy queries.
|
- `-search.maxSamplesPerQuery` at `vmselect` limits the number of raw samples a single query can process. This allows limiting CPU usage at `vmselect` for heavy queries.
|
||||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||||
|
@ -945,7 +946,7 @@ Below is the output for `/path/to/vmselect -help`:
|
||||||
-search.logSlowQueryDuration duration
|
-search.logSlowQueryDuration duration
|
||||||
Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s)
|
Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s)
|
||||||
-search.maxConcurrentRequests int
|
-search.maxConcurrentRequests int
|
||||||
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration (default 8)
|
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration and -search.maxMemoryPerQuery (default 8)
|
||||||
-search.maxExportDuration duration
|
-search.maxExportDuration duration
|
||||||
The maximum duration for /api/v1/export call (default 720h0m0s)
|
The maximum duration for /api/v1/export call (default 720h0m0s)
|
||||||
-search.maxExportSeries int
|
-search.maxExportSeries int
|
||||||
|
@ -956,6 +957,9 @@ Below is the output for `/path/to/vmselect -help`:
|
||||||
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage . This flag is available only in enterprise version of VictoriaMetrics (default 300000)
|
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage . This flag is available only in enterprise version of VictoriaMetrics (default 300000)
|
||||||
-search.maxLookback duration
|
-search.maxLookback duration
|
||||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
||||||
|
-search.maxMemoryPerQuery size
|
||||||
|
The maximum amounts of memory a single query may consume. Queries requiring more memory are rejected. The total memory limit for concurrently executed queries can be estimated as -search.maxMemoryPerQuery multiplied by -search.maxConcurrentRequests
|
||||||
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
-search.maxPointsPerTimeseries int
|
-search.maxPointsPerTimeseries int
|
||||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||||
-search.maxPointsSubqueryPerTimeseries int
|
-search.maxPointsSubqueryPerTimeseries int
|
||||||
|
|
|
@ -1022,14 +1022,18 @@ per each `job` over the last 5 minutes.
|
||||||
|
|
||||||
#### histogram_quantile
|
#### histogram_quantile
|
||||||
|
|
||||||
`histogram_quantile(phi, buckets)` is a [transform function](#transform-functions), which calculates `phi`-quantile over the given
|
`histogram_quantile(phi, buckets)` is a [transform function](#transform-functions), which calculates `phi`-[percentile](https://en.wikipedia.org/wiki/Percentile)
|
||||||
[histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350).
|
over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350).
|
||||||
`phi` must be in the range `[0...1]`. For example, `histogram_quantile(0.5, sum(rate(http_request_duration_seconds_bucket[5m]) by (le))`
|
`phi` must be in the range `[0...1]`. For example, `histogram_quantile(0.5, sum(rate(http_request_duration_seconds_bucket[5m]) by (le))`
|
||||||
would return median request duration for all the requests during the last 5 minutes.
|
would return median request duration for all the requests during the last 5 minutes.
|
||||||
|
|
||||||
The function accepts optional third arg - `boundsLabel`. In this case it returns `lower` and `upper` bounds for the estimated percentile with the given `boundsLabel` label.
|
The function accepts optional third arg - `boundsLabel`. In this case it returns `lower` and `upper` bounds for the estimated percentile with the given `boundsLabel` label.
|
||||||
See [this issue for details](https://github.com/prometheus/prometheus/issues/5706).
|
See [this issue for details](https://github.com/prometheus/prometheus/issues/5706).
|
||||||
|
|
||||||
|
When the [percentile](https://en.wikipedia.org/wiki/Percentile) is calculated over multiple histograms,
|
||||||
|
then all the input histograms **must** have buckets with identical boundaries, e.g. they must have the same set of `le` or `vmrange` labels.
|
||||||
|
Otherwise the returned result may be invalid. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3231) for details.
|
||||||
|
|
||||||
This function is supported by PromQL (except of the `boundLabel` arg). See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share)
|
This function is supported by PromQL (except of the `boundLabel` arg). See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share)
|
||||||
and [quantile](#quantile).
|
and [quantile](#quantile).
|
||||||
|
|
||||||
|
|
|
@ -773,7 +773,7 @@ to your needs or when testing bugfixes.
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -789,7 +789,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
### Development ARM build
|
### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -803,7 +803,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -1264,10 +1264,11 @@ See also [resource usage limits docs](#resource-usage-limits).
|
||||||
|
|
||||||
By default VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
By default VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
||||||
|
|
||||||
- `-memory.allowedPercent` and `-search.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
||||||
|
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query. Queries, which need more memory, are rejected. Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
|
||||||
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
||||||
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM when executing unexpected heavy queries.
|
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM when executing unexpected heavy queries.
|
||||||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries.
|
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries. See also `-search.maxMemoryPerQuery` command-line flag.
|
||||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||||
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
||||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||||
|
@ -2226,7 +2227,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-search.logSlowQueryDuration duration
|
-search.logSlowQueryDuration duration
|
||||||
Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s)
|
Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s)
|
||||||
-search.maxConcurrentRequests int
|
-search.maxConcurrentRequests int
|
||||||
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration (default 8)
|
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration and -search.maxMemoryPerQuery (default 8)
|
||||||
-search.maxExportDuration duration
|
-search.maxExportDuration duration
|
||||||
The maximum duration for /api/v1/export call (default 720h0m0s)
|
The maximum duration for /api/v1/export call (default 720h0m0s)
|
||||||
-search.maxExportSeries int
|
-search.maxExportSeries int
|
||||||
|
@ -2237,6 +2238,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage . This flag is available only in enterprise version of VictoriaMetrics (default 300000)
|
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage . This flag is available only in enterprise version of VictoriaMetrics (default 300000)
|
||||||
-search.maxLookback duration
|
-search.maxLookback duration
|
||||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
||||||
|
-search.maxMemoryPerQuery size
|
||||||
|
The maximum amounts of memory a single query may consume. Queries requiring more memory are rejected. The total memory limit for concurrently executed queries can be estimated as -search.maxMemoryPerQuery multiplied by -search.maxConcurrentRequests
|
||||||
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
-search.maxPointsPerTimeseries int
|
-search.maxPointsPerTimeseries int
|
||||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||||
-search.maxPointsSubqueryPerTimeseries int
|
-search.maxPointsSubqueryPerTimeseries int
|
||||||
|
|
|
@ -776,7 +776,7 @@ to your needs or when testing bugfixes.
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -792,7 +792,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
### Development ARM build
|
### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -806,7 +806,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -1267,10 +1267,11 @@ See also [resource usage limits docs](#resource-usage-limits).
|
||||||
|
|
||||||
By default VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
By default VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
||||||
|
|
||||||
- `-memory.allowedPercent` and `-search.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
||||||
|
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query. Queries, which need more memory, are rejected. Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
|
||||||
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
||||||
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM when executing unexpected heavy queries.
|
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM when executing unexpected heavy queries.
|
||||||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries.
|
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries. See also `-search.maxMemoryPerQuery` command-line flag.
|
||||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||||
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
||||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||||
|
@ -2229,7 +2230,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-search.logSlowQueryDuration duration
|
-search.logSlowQueryDuration duration
|
||||||
Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s)
|
Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s)
|
||||||
-search.maxConcurrentRequests int
|
-search.maxConcurrentRequests int
|
||||||
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration (default 8)
|
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration and -search.maxMemoryPerQuery (default 8)
|
||||||
-search.maxExportDuration duration
|
-search.maxExportDuration duration
|
||||||
The maximum duration for /api/v1/export call (default 720h0m0s)
|
The maximum duration for /api/v1/export call (default 720h0m0s)
|
||||||
-search.maxExportSeries int
|
-search.maxExportSeries int
|
||||||
|
@ -2240,6 +2241,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage . This flag is available only in enterprise version of VictoriaMetrics (default 300000)
|
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage . This flag is available only in enterprise version of VictoriaMetrics (default 300000)
|
||||||
-search.maxLookback duration
|
-search.maxLookback duration
|
||||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
|
||||||
|
-search.maxMemoryPerQuery size
|
||||||
|
The maximum amounts of memory a single query may consume. Queries requiring more memory are rejected. The total memory limit for concurrently executed queries can be estimated as -search.maxMemoryPerQuery multiplied by -search.maxConcurrentRequests
|
||||||
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
-search.maxPointsPerTimeseries int
|
-search.maxPointsPerTimeseries int
|
||||||
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as VMUI or Grafana. There is no sense in setting this limit to values bigger than the horizontal resolution of the graph (default 30000)
|
||||||
-search.maxPointsSubqueryPerTimeseries int
|
-search.maxPointsSubqueryPerTimeseries int
|
||||||
|
|
|
@ -16,7 +16,7 @@ A multi-retention setup can be implemented by dividing a [victoriametrics cluste
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
Setup should handle 3 different retention groups 3months, 1year and 3 years.
|
Setup should handle 3 different retention groups 3months, 1year and 3 years.
|
||||||
Solution contains 3 groups of vmstorages + vminserst and one group of vmselects. Routing is done by [vmagent](https://docs.victoriametrics.com/vmagent.html) and [relabeling configuration](https://docs.victoriametrics.com/vmagent.html#relabeling)
|
Solution contains 3 groups of vmstorages + vminserst and one group of vmselects. Routing is done by [vmagent](https://docs.victoriametrics.com/vmagent.html) and [relabeling configuration](https://docs.victoriametrics.com/vmagent.html#relabeling). The [-retentionPeriod](https://docs.victoriametrics.com/#retention) sets how long to keep the metrics.
|
||||||
|
|
||||||
The diagram below shows a proposed solution
|
The diagram below shows a proposed solution
|
||||||
|
|
||||||
|
@ -25,11 +25,11 @@ The diagram below shows a proposed solution
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
**Implementation Details**
|
**Implementation Details**
|
||||||
1. Groups of vminserts A know about only vmstorages A and this is explicitly specified in [-storageNode configuration](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
|
1. Groups of vminserts A know about only vmstorages A and this is explicitly specified via `-storageNode` [configuration](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
|
||||||
2. Groups of vminserts B know about only vmstorages B and this is explicitly specified in `-storageNode` configuration.
|
2. Groups of vminserts B know about only vmstorages B and this is explicitly specified via `-storageNode` [configuration](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
|
||||||
3. Groups of vminserts C know about only vmstorages A and this is explicitly specified in `-storageNode` configuration.
|
3. Groups of vminserts C know about only vmstorages A and this is explicitly specified via `-storageNode` [configuration](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
|
||||||
4. Vmselect reads data from all vmstorage nodes.
|
4. Vmselect reads data from all vmstorage nodes via `-storageNode` [configuration](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
|
||||||
5. Vmagent routes incoming metrics to the given set of `vminsert` nodes using relabeling rules specified at `-remoteWrite.urlRelabelConfig`. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling).
|
5. Vmagent routes incoming metrics to the given set of `vminsert` nodes using relabeling rules specified at `-remoteWrite.urlRelabelConfig` [configuration](https://docs.victoriametrics.com/vmagent.html#relabeling).
|
||||||
|
|
||||||
**Multi-Tenant Setup**
|
**Multi-Tenant Setup**
|
||||||
|
|
||||||
|
|
|
@ -249,7 +249,7 @@ See also [useful tips for target relabeling](#useful-tips-for-target-relabeling)
|
||||||
Single-node VictoriaMetrics and [vmagent](https://docs.victoriametrics.com/vmagent.html) automatically add `instance` and `job` labels per each discovered target:
|
Single-node VictoriaMetrics and [vmagent](https://docs.victoriametrics.com/vmagent.html) automatically add `instance` and `job` labels per each discovered target:
|
||||||
|
|
||||||
* The `job` label is set to `job_name` value specified in the corresponding [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
* The `job` label is set to `job_name` value specified in the corresponding [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs).
|
||||||
* The `instance` label is set to the final `__address__` label value after target-level relabeling.
|
* The `instance` label is set to the `host:port` part of `__address__` label value after target-level relabeling.
|
||||||
The `__address__` label value is automatically set to the most suitable value depending
|
The `__address__` label value is automatically set to the most suitable value depending
|
||||||
on the used [service discovery type](https://docs.victoriametrics.com/sd_configs.html#supported-service-discovery-configs).
|
on the used [service discovery type](https://docs.victoriametrics.com/sd_configs.html#supported-service-discovery-configs).
|
||||||
The `__address__` label can be overriden during relabeling - see [these docs](#how-to-modify-scrape-urls-in-targets).
|
The `__address__` label can be overriden during relabeling - see [these docs](#how-to-modify-scrape-urls-in-targets).
|
||||||
|
@ -284,8 +284,10 @@ URLs for scrape targets are composed of the following parts:
|
||||||
just update the `__address__` label during relabeling to the needed value.
|
just update the `__address__` label during relabeling to the needed value.
|
||||||
The port part is optional. If it is missing, then it is automatically set either to `80` or `443` depending
|
The port part is optional. If it is missing, then it is automatically set either to `80` or `443` depending
|
||||||
on the used scheme (`http` or `https`).
|
on the used scheme (`http` or `https`).
|
||||||
The final `__address__` label is automatically converted into `instance` label per each target unless the `instance`
|
The `host:port` part from the final `__address__` label is automatically set to `instance` label unless the `instance`
|
||||||
label is explicitly set during relabeling.
|
label is explicitly set during relabeling.
|
||||||
|
The `__address__` label can contain the full scrape url, e.g. `http://host:port/metrics/path?query_args`.
|
||||||
|
In this case the `__scheme__` and `__metrics_path__` labels are ignored.
|
||||||
* URL path (e.g. `/metrics`). This information is available during target relabeling in a special label - `__metrics_path__`.
|
* URL path (e.g. `/metrics`). This information is available during target relabeling in a special label - `__metrics_path__`.
|
||||||
By default the `__metrics_path__` is set to `/metrics`. It can be overriden either by specifying the `metrics_path`
|
By default the `__metrics_path__` is set to `/metrics`. It can be overriden either by specifying the `metrics_path`
|
||||||
option at [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs)
|
option at [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs)
|
||||||
|
|
|
@ -987,6 +987,8 @@ scrape_configs:
|
||||||
#
|
#
|
||||||
# Alternatively the scheme and path can be changed via `relabel_configs` section at `scrape_config` level.
|
# Alternatively the scheme and path can be changed via `relabel_configs` section at `scrape_config` level.
|
||||||
# See https://docs.victoriametrics.com/vmagent.html#relabeling .
|
# See https://docs.victoriametrics.com/vmagent.html#relabeling .
|
||||||
|
#
|
||||||
|
# It is also possible specifying full target urls here, e.g. "http://host:port/metrics/path?query_args"
|
||||||
- targets:
|
- targets:
|
||||||
- "vmsingle1:8428"
|
- "vmsingle1:8428"
|
||||||
- "vmsingleN:8428"
|
- "vmsingleN:8428"
|
||||||
|
@ -1087,6 +1089,9 @@ scrape_configs:
|
||||||
# Example values:
|
# Example values:
|
||||||
# - "30s" - 30 seconds
|
# - "30s" - 30 seconds
|
||||||
# - "2m" - 2 minutes
|
# - "2m" - 2 minutes
|
||||||
|
# The scrape_interval can be set on a per-target basis by specifying `__scrape_interval__`
|
||||||
|
# label during target relabeling phase.
|
||||||
|
# See https://docs.victoriametrics.com/vmagent.html#relabeling
|
||||||
# scrape_interval: <duration>
|
# scrape_interval: <duration>
|
||||||
|
|
||||||
# scrape_timeout is an optional timeout when scraping the targets.
|
# scrape_timeout is an optional timeout when scraping the targets.
|
||||||
|
@ -1098,6 +1103,9 @@ scrape_configs:
|
||||||
# - "30s" - 30 seconds
|
# - "30s" - 30 seconds
|
||||||
# - "2m" - 2 minutes
|
# - "2m" - 2 minutes
|
||||||
# The `scrape_timeout` cannot exceed the `scrape_interval`.
|
# The `scrape_timeout` cannot exceed the `scrape_interval`.
|
||||||
|
# The scrape_timeout can be set on a per-target basis by specifying `__scrape_timeout__`
|
||||||
|
# label during target relabeling phase.
|
||||||
|
# See https://docs.victoriametrics.com/vmagent.html#relabeling
|
||||||
# scrape_timeout: <duration>
|
# scrape_timeout: <duration>
|
||||||
|
|
||||||
# metrics_path is the path to fetch metrics from targets.
|
# metrics_path is the path to fetch metrics from targets.
|
||||||
|
@ -1186,8 +1194,11 @@ scrape_configs:
|
||||||
# disable_keepalive: <boolean>
|
# disable_keepalive: <boolean>
|
||||||
|
|
||||||
# stream_parse allows enabling stream parsing mode when scraping targets.
|
# stream_parse allows enabling stream parsing mode when scraping targets.
|
||||||
# By default stram parsing mode is disabled for targets which return up to a few thosands samples.
|
# By default stream parsing mode is disabled for targets which return up to a few thosands samples.
|
||||||
# See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode .
|
# See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode .
|
||||||
|
# The stream_parse can be set on a per-target basis by specifying `__stream_parse__`
|
||||||
|
# label during target relabeling phase.
|
||||||
|
# See https://docs.victoriametrics.com/vmagent.html#relabeling
|
||||||
# stream_parse: <boolean>
|
# stream_parse: <boolean>
|
||||||
|
|
||||||
# scrape_align_interval allows aligning scrapes to the given interval.
|
# scrape_align_interval allows aligning scrapes to the given interval.
|
||||||
|
@ -1208,8 +1219,16 @@ scrape_configs:
|
||||||
# a single target can expose during all the scrapes.
|
# a single target can expose during all the scrapes.
|
||||||
# By default there is no limit on the number of exposed series.
|
# By default there is no limit on the number of exposed series.
|
||||||
# See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter .
|
# See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter .
|
||||||
|
# The series_limit can be set on a per-target basis by specifying `__series_limit__`
|
||||||
|
# label during target relabeling phase.
|
||||||
|
# See https://docs.victoriametrics.com/vmagent.html#relabeling
|
||||||
# series_limit: ...
|
# series_limit: ...
|
||||||
|
|
||||||
|
# no_stale_markers allows disabling staleness tracking.
|
||||||
|
# By default staleness tracking is enabled for all the discovered scrape targets.
|
||||||
|
# See https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers
|
||||||
|
# no_stale_markers: <boolean>
|
||||||
|
|
||||||
# Additional HTTP client options for target scraping can be specified here.
|
# Additional HTTP client options for target scraping can be specified here.
|
||||||
# See https://docs.victoriametrics.com/sd_configs.html#http-api-client-options
|
# See https://docs.victoriametrics.com/sd_configs.html#http-api-client-options
|
||||||
```
|
```
|
||||||
|
|
|
@ -386,7 +386,7 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
|
||||||
```
|
```
|
||||||
|
|
||||||
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line option
|
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line option
|
||||||
(e.g. when [staleness markers](#prometheus-staleness-markers) are disabled).
|
or when it scrapes target with `no_stale_markers: true` option, e.g. when [staleness markers](#prometheus-staleness-markers) are disabled.
|
||||||
|
|
||||||
* `scrape_series_limit` - the limit on the number of unique time series the given target can expose according to [these docs](#cardinality-limiter).
|
* `scrape_series_limit` - the limit on the number of unique time series the given target can expose according to [these docs](#cardinality-limiter).
|
||||||
This metric is exposed only if the series limit is set.
|
This metric is exposed only if the series limit is set.
|
||||||
|
@ -608,9 +608,13 @@ Additionally, the `action: graphite` relabeling rules usually work much faster t
|
||||||
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
|
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
|
||||||
|
|
||||||
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target
|
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target
|
||||||
in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers`
|
in order to compare it to the current response body. The memory usage may be reduced by disabling staleness tracking in the following ways:
|
||||||
command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series
|
|
||||||
per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](#automatically-generated-metrics) for details.
|
* By passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking across all the targets.
|
||||||
|
* By specifying `no_stale_markers: true` option in the [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) for the corresponding target.
|
||||||
|
|
||||||
|
When staleness tracking is disabled, then `vmagent` doesn't track the number of new time series per each scrape,
|
||||||
|
e.g. it sets `scrape_series_added` metric to zero. See [these docs](#automatically-generated-metrics) for details.
|
||||||
|
|
||||||
## Stream parsing mode
|
## Stream parsing mode
|
||||||
|
|
||||||
|
@ -1027,7 +1031,7 @@ It may be needed to build `vmagent` from source code when developing or testing
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmagent` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds the `vmagent` binary and puts it into the `bin` folder.
|
It builds the `vmagent` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -1056,7 +1060,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
### Development ARM build
|
### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmagent-linux-arm` or `make vmagent-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
2. Run `make vmagent-linux-arm` or `make vmagent-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
It builds `vmagent-linux-arm` or `vmagent-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `vmagent-linux-arm` or `vmagent-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -669,9 +669,10 @@ Try the following recommendations in such cases:
|
||||||
are delivered to the datasource;
|
are delivered to the datasource;
|
||||||
* If you know in advance, that data in datasource is delayed - try changing vmalert's `-datasource.lookback`
|
* If you know in advance, that data in datasource is delayed - try changing vmalert's `-datasource.lookback`
|
||||||
command-line flag to add a time shift for evaluations;
|
command-line flag to add a time shift for evaluations;
|
||||||
* If time intervals between datapoints in datasource are irregular - try changing vmalert's `-datasource.queryStep`
|
* If time intervals between datapoints in datasource are irregular or `>=5min` - try changing vmalert's
|
||||||
command-line flag to specify how far search query can lookback for the recent datapoint. By default, this value
|
`-datasource.queryStep` command-line flag to specify how far search query can lookback for the recent datapoint.
|
||||||
is equal to group's evaluation interval.
|
The recommendation is to have the step at least two times bigger than `scrape_interval`, since
|
||||||
|
there are no guarantees that scrape will not fail.
|
||||||
|
|
||||||
Sometimes, it is not clear why some specific alert fired or didn't fire. It is very important to remember, that
|
Sometimes, it is not clear why some specific alert fired or didn't fire. It is very important to remember, that
|
||||||
alerts with `for: 0` fire immediately when their expression becomes true. And alerts with `for > 0` will fire only
|
alerts with `for: 0` fire immediately when their expression becomes true. And alerts with `for > 0` will fire only
|
||||||
|
@ -1279,7 +1280,7 @@ spec:
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmalert` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmalert` binary and puts it into the `bin` folder.
|
It builds `vmalert` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -1295,7 +1296,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
### Development ARM build
|
### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmalert-linux-arm` or `make vmalert-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmalert-linux-arm` or `make vmalert-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmalert-linux-arm` or `vmalert-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `vmalert-linux-arm` or `vmalert-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -171,7 +171,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmauth` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmauth` binary and puts it into the `bin` folder.
|
It builds `vmauth` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -6,14 +6,6 @@ sort: 6
|
||||||
|
|
||||||
`vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
`vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots).
|
||||||
|
|
||||||
Supported storage systems for backups:
|
|
||||||
|
|
||||||
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
|
||||||
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
|
||||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
|
|
||||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
|
||||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
|
||||||
|
|
||||||
`vmbackup` supports incremental and full backups. Incremental backups are created automatically if the destination path already contains data from the previous backup.
|
`vmbackup` supports incremental and full backups. Incremental backups are created automatically if the destination path already contains data from the previous backup.
|
||||||
Full backups can be sped up with `-origin` pointing to an already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
Full backups can be sped up with `-origin` pointing to an already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
||||||
data between the existing backup and new backup. It saves time and costs on data transfer.
|
data between the existing backup and new backup. It saves time and costs on data transfer.
|
||||||
|
@ -27,6 +19,16 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
||||||
See also [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool built on top of `vmbackup`. This tool simplifies
|
See also [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool built on top of `vmbackup`. This tool simplifies
|
||||||
creation of hourly, daily, weekly and monthly backups.
|
creation of hourly, daily, weekly and monthly backups.
|
||||||
|
|
||||||
|
## Supported storage types
|
||||||
|
|
||||||
|
`vmbackup` supports the following `-dst` storage types:
|
||||||
|
|
||||||
|
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
||||||
|
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
||||||
|
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
|
||||||
|
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||||
|
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
||||||
|
|
||||||
## Use cases
|
## Use cases
|
||||||
|
|
||||||
### Regular backups
|
### Regular backups
|
||||||
|
@ -34,7 +36,7 @@ creation of hourly, daily, weekly and monthly backups.
|
||||||
Regular backup can be performed with the following command:
|
Regular backup can be performed with the following command:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshot.createURL=http://localhost:8428/snapshot/create -dst=gs://<bucket>/<path/to/new/backup>
|
./vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshot.createURL=http://localhost:8428/snapshot/create -dst=gs://<bucket>/<path/to/new/backup>
|
||||||
```
|
```
|
||||||
|
|
||||||
* `</path/to/victoria-metrics-data>` - path to VictoriaMetrics data pointed by `-storageDataPath` command-line flag in single-node VictoriaMetrics or in cluster `vmstorage`.
|
* `</path/to/victoria-metrics-data>` - path to VictoriaMetrics data pointed by `-storageDataPath` command-line flag in single-node VictoriaMetrics or in cluster `vmstorage`.
|
||||||
|
@ -79,7 +81,7 @@ The command will upload only changed data to `gs://<bucket>/latest`.
|
||||||
* Run the following command once a day:
|
* Run the following command once a day:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshot.createURL=http://localhost:8428/snapshot/create -dst=gs://<bucket>/<YYYYMMDD> -origin=gs://<bucket>/latest
|
./vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshot.createURL=http://localhost:8428/snapshot/create -dst=gs://<bucket>/<YYYYMMDD> -origin=gs://<bucket>/latest
|
||||||
```
|
```
|
||||||
|
|
||||||
Where `<daily-snapshot>` is the snapshot for the last day `<YYYYMMDD>`.
|
Where `<daily-snapshot>` is the snapshot for the last day `<YYYYMMDD>`.
|
||||||
|
@ -156,6 +158,11 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
||||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email"
|
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
* Obtaining credentials from env variables.
|
||||||
|
- For AWS S3 compatible storages set env variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
|
||||||
|
Also you can set env variable `AWS_SHARED_CREDENTIALS_FILE` with path to credentials file.
|
||||||
|
- For GCE cloud storage set env variable `GOOGLE_APPLICATION_CREDENTIALS` with path to credentials file.
|
||||||
|
- For Azure storage either set env variables `AZURE_STORAGE_ACCOUNT_NAME` and `AZURE_STORAGE_ACCOUNT_KEY`, or `AZURE_STORAGE_ACCOUNT_CONNECTION_STRING`.
|
||||||
|
|
||||||
* Usage with s3 custom url endpoint. It is possible to use `vmbackup` with s3 compatible storages like minio, cloudian, etc.
|
* Usage with s3 custom url endpoint. It is possible to use `vmbackup` with s3 compatible storages like minio, cloudian, etc.
|
||||||
You have to add a custom url endpoint via flag:
|
You have to add a custom url endpoint via flag:
|
||||||
|
@ -283,7 +290,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmbackup` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmbackup` binary and puts it into the `bin` folder.
|
It builds `vmbackup` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -6,16 +6,22 @@ sort: 10
|
||||||
|
|
||||||
***vmbackupmanager is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
***vmbackupmanager is a part of [enterprise package](https://victoriametrics.com/products/enterprise/). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
||||||
|
|
||||||
The VictoriaMetrics backup manager automates regular backup procedures. It supports the following backup intervals: **hourly**, **daily**, **weekly** and **monthly**. Multiple backup intervals may be configured simultaneously. I.e. the backup manager creates hourly backups every hour, while it creates daily backups every day, etc. Backup manager must have read access to the storage data, so best practice is to install it on the same machine (or as a sidecar) where the storage node is installed.
|
The VictoriaMetrics backup manager automates regular backup procedures. It supports the following backup intervals: **hourly**, **daily**, **weekly** and **monthly**.
|
||||||
The backup service makes a backup every hour and puts it to the latest folder and then copies data to the folders which represent the backup intervals (hourly, daily, weekly and monthly)
|
Multiple backup intervals may be configured simultaneously. I.e. the backup manager creates hourly backups every hour, while it creates daily backups every day, etc.
|
||||||
|
Backup manager must have read access to the storage data, so best practice is to install it on the same machine (or as a sidecar) where the storage node is installed.
|
||||||
|
The backup service makes a backup every hour and puts it to the latest folder and then copies data to the folders
|
||||||
|
which represent the backup intervals (hourly, daily, weekly and monthly)
|
||||||
|
|
||||||
The required flags for running the service are as follows:
|
The required flags for running the service are as follows:
|
||||||
|
|
||||||
* -eula - should be true and means that you have the legal right to run a backup manager. That can either be a signed contract or an email with confirmation to run the service in a trial period
|
* -eula - should be true and means that you have the legal right to run a backup manager. That can either be a signed contract or an email
|
||||||
* -storageDataPath - path to VictoriaMetrics or vmstorage data path to make backup from
|
with confirmation to run the service in a trial period.
|
||||||
|
* -storageDataPath - path to VictoriaMetrics or vmstorage data path to make backup from.
|
||||||
* -snapshot.createURL - VictoriaMetrics creates snapshot URL which will automatically be created during backup. Example: <http://victoriametrics:8428/snapshot/create>
|
* -snapshot.createURL - VictoriaMetrics creates snapshot URL which will automatically be created during backup. Example: <http://victoriametrics:8428/snapshot/create>
|
||||||
* -dst - backup destination at s3, gcs or local filesystem
|
* -dst - backup destination at [the supported storage types](https://docs.victoriametrics.com/vmbackup.html#supported-storage-types).
|
||||||
* -credsFilePath - path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set. See [https://cloud.google.com/iam/docs/creating-managing-service-account-keys](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and [https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html](https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html)
|
* -credsFilePath - path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
|
||||||
|
See [https://cloud.google.com/iam/docs/creating-managing-service-account-keys](https://cloud.google.com/iam/docs/creating-managing-service-account-keys)
|
||||||
|
and [https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html](https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html).
|
||||||
|
|
||||||
Backup schedule is controlled by the following flags:
|
Backup schedule is controlled by the following flags:
|
||||||
|
|
||||||
|
@ -40,7 +46,11 @@ To get the full list of supported flags please run the following command:
|
||||||
./vmbackupmanager --help
|
./vmbackupmanager --help
|
||||||
```
|
```
|
||||||
|
|
||||||
The service creates a **full** backup each run. This means that the system can be restored fully from any particular backup using vmrestore. Backup manager uploads only the data that has been changed or created since the most recent backup (incremental backup).
|
The service creates a **full** backup each run. This means that the system can be restored fully
|
||||||
|
from any particular backup using [vmrestore](https://docs.victoriametrics.com/vmrestore.html).
|
||||||
|
Backup manager uploads only the data that has been changed or created since the most recent backup (incremental backup).
|
||||||
|
This reduces the consumed network traffic and the time needed for performing the backup.
|
||||||
|
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for details.
|
||||||
|
|
||||||
*Please take into account that the first backup upload could take a significant amount of time as it needs to upload all of the data.*
|
*Please take into account that the first backup upload could take a significant amount of time as it needs to upload all of the data.*
|
||||||
|
|
||||||
|
@ -51,7 +61,7 @@ There are two flags which could help with performance tuning:
|
||||||
|
|
||||||
## Example of Usage
|
## Example of Usage
|
||||||
|
|
||||||
GCS and cluster version. You need to have a credentials file in json format with following structure
|
GCS and cluster version. You need to have a credentials file in json format with following structure:
|
||||||
|
|
||||||
credentials.json
|
credentials.json
|
||||||
|
|
||||||
|
|
|
@ -704,7 +704,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmctl` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmctl` binary and puts it into the `bin` folder.
|
It builds `vmctl` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
@ -733,7 +733,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||||
|
|
||||||
#### Development ARM build
|
#### Development ARM build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmctl-linux-arm` or `make vmctl-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmctl-linux-arm` or `make vmctl-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmctl-linux-arm` or `vmctl-linux-arm64` binary respectively and puts it into the `bin` folder.
|
It builds `vmctl-linux-arm` or `vmctl-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ VictoriaMetrics must be stopped during the restore process.
|
||||||
Run the following command to restore backup from the given `-src` into the given `-storageDataPath`:
|
Run the following command to restore backup from the given `-src` into the given `-storageDataPath`:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
vmrestore -src=<storageType>://<path/to/backup> -storageDataPath=<local/path/to/restore>
|
./vmrestore -src=<storageType>://<path/to/backup> -storageDataPath=<local/path/to/restore>
|
||||||
```
|
```
|
||||||
|
|
||||||
* `<storageType>://<path/to/backup>` is the path to backup made with [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
* `<storageType>://<path/to/backup>` is the path to backup made with [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||||
|
@ -190,7 +190,7 @@ It is recommended using [binary releases](https://github.com/VictoriaMetrics/Vic
|
||||||
|
|
||||||
### Development build
|
### Development build
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.1.
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.19.2.
|
||||||
2. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
2. Run `make vmrestore` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||||
It builds `vmrestore` binary and puts it into the `bin` folder.
|
It builds `vmrestore` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
|
12
go.mod
12
go.mod
|
@ -4,7 +4,7 @@ go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.27.0
|
cloud.google.com/go/storage v1.27.0
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1
|
||||||
github.com/VictoriaMetrics/fastcache v1.12.0
|
github.com/VictoriaMetrics/fastcache v1.12.0
|
||||||
|
|
||||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||||
|
@ -30,15 +30,15 @@ require (
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||||
github.com/urfave/cli/v2 v2.17.1
|
github.com/urfave/cli/v2 v2.19.2
|
||||||
github.com/valyala/fastjson v1.6.3
|
github.com/valyala/fastjson v1.6.3
|
||||||
github.com/valyala/fastrand v1.1.0
|
github.com/valyala/fastrand v1.1.0
|
||||||
github.com/valyala/fasttemplate v1.2.1
|
github.com/valyala/fasttemplate v1.2.1
|
||||||
github.com/valyala/gozstd v1.17.0
|
github.com/valyala/gozstd v1.17.0
|
||||||
github.com/valyala/quicktemplate v1.7.0
|
github.com/valyala/quicktemplate v1.7.0
|
||||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af
|
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b
|
||||||
golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1
|
golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1
|
||||||
golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875
|
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43
|
||||||
google.golang.org/api v0.98.0
|
google.golang.org/api v0.98.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
@ -90,10 +90,10 @@ require (
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
|
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
|
||||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
|
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.8 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91 // indirect
|
google.golang.org/genproto v0.0.0-20221013201013-33fc6f83cba4 // indirect
|
||||||
google.golang.org/grpc v1.50.0 // indirect
|
google.golang.org/grpc v1.50.0 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
23
go.sum
23
go.sum
|
@ -73,8 +73,8 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4/go.mod h1:uGG2W01BaETf0Ozp+Q
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 h1:XUNQ4mw+zJmaA2KXzP9JlQiecy1SI+Eog7xVkPiqIbg=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 h1:XUNQ4mw+zJmaA2KXzP9JlQiecy1SI+Eog7xVkPiqIbg=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0 h1:fe+kSd9btgTTeHeUlMTyEsjoe6L/zd+Q61iWEMPwHmc=
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0/go.mod h1:T7nxmZ9i42Dqy7kwnn8AZYNjqxd4TloKXdIbhosHSqo=
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||||
|
@ -884,8 +884,8 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
|
||||||
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/urfave/cli/v2 v2.17.1 h1:UzjDEw2dJQUE3iRaiNQ1VrVFbyAtKGH3VdkMoHA58V0=
|
github.com/urfave/cli/v2 v2.19.2 h1:eXu5089gqqiDQKSnFW+H/FhjrxRGztwSxlTsVK7IuqQ=
|
||||||
github.com/urfave/cli/v2 v2.17.1/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
github.com/urfave/cli/v2 v2.19.2/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||||
|
@ -1065,8 +1065,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af h1:wv66FM3rLZGPdxpYL+ApnDe2HzHcTFta3z5nsc13wI4=
|
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU=
|
||||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
@ -1200,8 +1200,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875 h1:AzgQNqF+FKwyQ5LbVrVqOcuuFB67N47F9+htZYH0wFM=
|
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 h1:OK7RB6t2WQX54srQQYSXMW8dF5C6/8+oA/s5QBmmto4=
|
||||||
golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
@ -1212,8 +1212,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
|
||||||
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
@ -1441,8 +1442,8 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP
|
||||||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91 h1:Ezh2cpcnP5Rq60sLensUsFnxh7P6513NLvNtCm9iyJ4=
|
google.golang.org/genproto v0.0.0-20221013201013-33fc6f83cba4 h1:nZ28yoLJWNLTcERW43BN+JDsNQOdiZOFB9Dly/IUrjw=
|
||||||
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
|
google.golang.org/genproto v0.0.0-20221013201013-33fc6f83cba4/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||||
|
|
|
@ -61,6 +61,7 @@ func (fs *FS) Init() error {
|
||||||
}
|
}
|
||||||
configOpts := []func(*config.LoadOptions) error{
|
configOpts := []func(*config.LoadOptions) error{
|
||||||
config.WithSharedConfigProfile(fs.ProfileName),
|
config.WithSharedConfigProfile(fs.ProfileName),
|
||||||
|
config.WithDefaultRegion("us-east-1"),
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(fs.CredsFilePath) > 0 {
|
if len(fs.CredsFilePath) > 0 {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package bytesutil
|
package bytesutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
)
|
)
|
||||||
|
@ -38,6 +39,11 @@ func (fsm *FastStringMatcher) Match(s string) bool {
|
||||||
// Slow path - run matchFunc for s and store the result in the cache.
|
// Slow path - run matchFunc for s and store the result in the cache.
|
||||||
b := fsm.matchFunc(s)
|
b := fsm.matchFunc(s)
|
||||||
bp := &b
|
bp := &b
|
||||||
|
// Make a copy of s in order to limit memory usage to the s length,
|
||||||
|
// since the s may point to bigger string.
|
||||||
|
// This also protects from the case when s contains unsafe string, which points to a temporary byte slice.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3227
|
||||||
|
s = strings.Clone(s)
|
||||||
m.Store(s, bp)
|
m.Store(s, bp)
|
||||||
n := atomic.AddUint64(&fsm.mLen, 1)
|
n := atomic.AddUint64(&fsm.mLen, 1)
|
||||||
if n > 100e3 {
|
if n > 100e3 {
|
||||||
|
|
|
@ -40,6 +40,8 @@ func (fst *FastStringTransformer) Transform(s string) string {
|
||||||
sTransformed := fst.transformFunc(s)
|
sTransformed := fst.transformFunc(s)
|
||||||
// Make a copy of s in order to limit memory usage to the s length,
|
// Make a copy of s in order to limit memory usage to the s length,
|
||||||
// since the s may point to bigger string.
|
// since the s may point to bigger string.
|
||||||
|
// This also protects from the case when s contains unsafe string, which points to a temporary byte slice.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3227
|
||||||
s = strings.Clone(s)
|
s = strings.Clone(s)
|
||||||
if sTransformed == s {
|
if sTransformed == s {
|
||||||
// point sTransformed to just allocated s, since it may point to s,
|
// point sTransformed to just allocated s, since it may point to s,
|
||||||
|
|
|
@ -213,6 +213,9 @@ func (ps *partSearch) NextItem() bool {
|
||||||
|
|
||||||
// The current block is over. Proceed to the next block.
|
// The current block is over. Proceed to the next block.
|
||||||
if err := ps.nextBlock(); err != nil {
|
if err := ps.nextBlock(); err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
err = fmt.Errorf("error in %q: %w", ps.p.path, err)
|
||||||
|
}
|
||||||
ps.err = err
|
ps.err = err
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,9 +50,7 @@ func (prc *parsedRelabelConfig) String() string {
|
||||||
// Apply applies pcs to labels starting from the labelsOffset.
|
// Apply applies pcs to labels starting from the labelsOffset.
|
||||||
//
|
//
|
||||||
// If isFinalize is set, then FinalizeLabels is called on the labels[labelsOffset:].
|
// If isFinalize is set, then FinalizeLabels is called on the labels[labelsOffset:].
|
||||||
//
|
func (pcs *ParsedConfigs) Apply(labels []prompbmarshal.Label, labelsOffset int) []prompbmarshal.Label {
|
||||||
// The returned labels at labels[labelsOffset:] are sorted.
|
|
||||||
func (pcs *ParsedConfigs) Apply(labels []prompbmarshal.Label, labelsOffset int, isFinalize bool) []prompbmarshal.Label {
|
|
||||||
var inStr string
|
var inStr string
|
||||||
relabelDebug := false
|
relabelDebug := false
|
||||||
if pcs != nil {
|
if pcs != nil {
|
||||||
|
@ -73,10 +71,6 @@ func (pcs *ParsedConfigs) Apply(labels []prompbmarshal.Label, labelsOffset int,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
labels = removeEmptyLabels(labels, labelsOffset)
|
labels = removeEmptyLabels(labels, labelsOffset)
|
||||||
if isFinalize {
|
|
||||||
labels = FinalizeLabels(labels[:labelsOffset], labels[labelsOffset:])
|
|
||||||
}
|
|
||||||
SortLabels(labels[labelsOffset:])
|
|
||||||
if relabelDebug {
|
if relabelDebug {
|
||||||
if len(labels) == labelsOffset {
|
if len(labels) == labelsOffset {
|
||||||
logger.Infof("\nRelabel In: %s\nRelabel Out: DROPPED - all labels removed", inStr)
|
logger.Infof("\nRelabel In: %s\nRelabel Out: DROPPED - all labels removed", inStr)
|
||||||
|
@ -121,25 +115,36 @@ func removeEmptyLabels(labels []prompbmarshal.Label, labelsOffset int) []prompbm
|
||||||
//
|
//
|
||||||
// See https://www.robustperception.io/life-of-a-label fo details.
|
// See https://www.robustperception.io/life-of-a-label fo details.
|
||||||
func RemoveMetaLabels(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
|
func RemoveMetaLabels(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
|
||||||
for i := range src {
|
for _, label := range src {
|
||||||
label := &src[i]
|
|
||||||
if strings.HasPrefix(label.Name, "__meta_") {
|
if strings.HasPrefix(label.Name, "__meta_") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dst = append(dst, *label)
|
dst = append(dst, label)
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveLabelsWithDoubleDashPrefix removes labels with "__" prefix from src, appends the remaining lables to dst and returns the result.
|
||||||
|
func RemoveLabelsWithDoubleDashPrefix(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
|
||||||
|
for _, label := range src {
|
||||||
|
name := label.Name
|
||||||
|
// A hack: do not delete __vm_filepath label, since it is used by internal logic for FileSDConfig.
|
||||||
|
if strings.HasPrefix(name, "__") && name != "__vm_filepath" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dst = append(dst, label)
|
||||||
}
|
}
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
// FinalizeLabels removes labels with "__" in the beginning (except of "__name__").
|
// FinalizeLabels removes labels with "__" in the beginning (except of "__name__").
|
||||||
func FinalizeLabels(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
|
func FinalizeLabels(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
|
||||||
for i := range src {
|
for _, label := range src {
|
||||||
label := &src[i]
|
|
||||||
name := label.Name
|
name := label.Name
|
||||||
if strings.HasPrefix(name, "__") && name != "__name__" {
|
if strings.HasPrefix(name, "__") && name != "__name__" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dst = append(dst, *label)
|
dst = append(dst, label)
|
||||||
}
|
}
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,11 @@ func TestApplyRelabelConfigs(t *testing.T) {
|
||||||
t.Fatalf("cannot parse %q: %s", config, err)
|
t.Fatalf("cannot parse %q: %s", config, err)
|
||||||
}
|
}
|
||||||
labels := MustParseMetricWithLabels(metric)
|
labels := MustParseMetricWithLabels(metric)
|
||||||
resultLabels := pcs.Apply(labels, 0, isFinalize)
|
resultLabels := pcs.Apply(labels, 0)
|
||||||
|
if isFinalize {
|
||||||
|
resultLabels = FinalizeLabels(resultLabels[:0], resultLabels)
|
||||||
|
}
|
||||||
|
SortLabels(resultLabels)
|
||||||
result := labelsToString(resultLabels)
|
result := labelsToString(resultLabels)
|
||||||
if result != resultExpected {
|
if result != resultExpected {
|
||||||
t.Fatalf("unexpected result; got\n%s\nwant\n%s", result, resultExpected)
|
t.Fatalf("unexpected result; got\n%s\nwant\n%s", result, resultExpected)
|
||||||
|
|
|
@ -396,7 +396,7 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, false)
|
labels = pcs.Apply(labels, 0)
|
||||||
if len(labels) != 0 {
|
if len(labels) != 0 {
|
||||||
panic(fmt.Errorf("BUG: expecting empty labels"))
|
panic(fmt.Errorf("BUG: expecting empty labels"))
|
||||||
}
|
}
|
||||||
|
@ -419,7 +419,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labelsOrig))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labelsOrig))
|
||||||
}
|
}
|
||||||
|
@ -454,7 +456,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||||
}
|
}
|
||||||
|
@ -488,7 +492,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 2 {
|
if len(labels) != 2 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 2, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 2, labels))
|
||||||
}
|
}
|
||||||
|
@ -524,7 +530,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||||
}
|
}
|
||||||
|
@ -560,7 +568,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||||
}
|
}
|
||||||
|
@ -595,7 +605,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||||
}
|
}
|
||||||
|
@ -630,7 +642,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 0 {
|
if len(labels) != 0 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels))
|
||||||
}
|
}
|
||||||
|
@ -653,7 +667,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 0 {
|
if len(labels) != 0 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels))
|
||||||
}
|
}
|
||||||
|
@ -676,7 +692,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 0 {
|
if len(labels) != 0 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels))
|
||||||
}
|
}
|
||||||
|
@ -699,7 +717,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||||
}
|
}
|
||||||
|
@ -734,7 +754,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||||
}
|
}
|
||||||
|
@ -768,7 +790,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||||
}
|
}
|
||||||
|
@ -802,7 +826,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 1 {
|
if len(labels) != 1 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
||||||
}
|
}
|
||||||
|
@ -830,7 +856,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 1 {
|
if len(labels) != 1 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
||||||
}
|
}
|
||||||
|
@ -858,7 +886,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 1 {
|
if len(labels) != 1 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
||||||
}
|
}
|
||||||
|
@ -886,7 +916,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 0 {
|
if len(labels) != 0 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels))
|
||||||
}
|
}
|
||||||
|
@ -908,7 +940,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 1 {
|
if len(labels) != 1 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
||||||
}
|
}
|
||||||
|
@ -936,7 +970,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 1 {
|
if len(labels) != 1 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
||||||
}
|
}
|
||||||
|
@ -964,7 +1000,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 1 {
|
if len(labels) != 1 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels))
|
||||||
}
|
}
|
||||||
|
@ -991,7 +1029,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 1 {
|
if len(labels) != 1 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 3, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 3, labels))
|
||||||
}
|
}
|
||||||
|
@ -1018,7 +1058,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 2 {
|
if len(labels) != 2 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 3, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 3, labels))
|
||||||
}
|
}
|
||||||
|
@ -1051,7 +1093,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != 2 {
|
if len(labels) != 2 {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 3, labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 3, labels))
|
||||||
}
|
}
|
||||||
|
@ -1087,7 +1131,9 @@ func BenchmarkApplyRelabelConfigs(b *testing.B) {
|
||||||
var labels []prompbmarshal.Label
|
var labels []prompbmarshal.Label
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
labels = append(labels[:0], labelsOrig...)
|
labels = append(labels[:0], labelsOrig...)
|
||||||
labels = pcs.Apply(labels, 0, true)
|
labels = pcs.Apply(labels, 0)
|
||||||
|
labels = FinalizeLabels(labels[:0], labels)
|
||||||
|
SortLabels(labels)
|
||||||
if len(labels) != len(labelsOrig) {
|
if len(labels) != len(labelsOrig) {
|
||||||
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels))
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,9 @@ import (
|
||||||
|
|
||||||
// SortLabels sorts labels.
|
// SortLabels sorts labels.
|
||||||
func SortLabels(labels []prompbmarshal.Label) {
|
func SortLabels(labels []prompbmarshal.Label) {
|
||||||
|
if len(labels) < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
ls := labelsSorterPool.Get().(*labelsSorter)
|
ls := labelsSorterPool.Get().(*labelsSorter)
|
||||||
*ls = labels
|
*ls = labels
|
||||||
if !sort.IsSorted(ls) {
|
if !sort.IsSorted(ls) {
|
||||||
|
@ -20,6 +23,9 @@ func SortLabels(labels []prompbmarshal.Label) {
|
||||||
|
|
||||||
// SortLabelsStable sorts labels using stable sort.
|
// SortLabelsStable sorts labels using stable sort.
|
||||||
func SortLabelsStable(labels []prompbmarshal.Label) {
|
func SortLabelsStable(labels []prompbmarshal.Label) {
|
||||||
|
if len(labels) < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
ls := labelsSorterPool.Get().(*labelsSorter)
|
ls := labelsSorterPool.Get().(*labelsSorter)
|
||||||
*ls = labels
|
*ls = labels
|
||||||
if !sort.IsSorted(ls) {
|
if !sort.IsSorted(ls) {
|
||||||
|
|
|
@ -42,6 +42,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
noStaleMarkers = flag.Bool("promscrape.noStaleMarkers", false, "Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series")
|
||||||
strictParse = flag.Bool("promscrape.config.strictParse", true, "Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields")
|
strictParse = flag.Bool("promscrape.config.strictParse", true, "Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields")
|
||||||
dryRun = flag.Bool("promscrape.config.dryRun", false, "Checks -promscrape.config file for errors and unsupported fields and then exits. "+
|
dryRun = flag.Bool("promscrape.config.dryRun", false, "Checks -promscrape.config file for errors and unsupported fields and then exits. "+
|
||||||
"Returns non-zero exit code on parsing errors and emits these errors to stderr. "+
|
"Returns non-zero exit code on parsing errors and emits these errors to stderr. "+
|
||||||
|
@ -289,6 +290,7 @@ type ScrapeConfig struct {
|
||||||
ScrapeAlignInterval *promutils.Duration `yaml:"scrape_align_interval,omitempty"`
|
ScrapeAlignInterval *promutils.Duration `yaml:"scrape_align_interval,omitempty"`
|
||||||
ScrapeOffset *promutils.Duration `yaml:"scrape_offset,omitempty"`
|
ScrapeOffset *promutils.Duration `yaml:"scrape_offset,omitempty"`
|
||||||
SeriesLimit int `yaml:"series_limit,omitempty"`
|
SeriesLimit int `yaml:"series_limit,omitempty"`
|
||||||
|
NoStaleMarkers *bool `yaml:"no_stale_markers,omitempty"`
|
||||||
ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"`
|
ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"`
|
||||||
|
|
||||||
// This is set in loadConfig
|
// This is set in loadConfig
|
||||||
|
@ -950,6 +952,10 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
|
||||||
return nil, fmt.Errorf("cannot use stream parsing mode when `series_limit` is set for `job_name` %q", jobName)
|
return nil, fmt.Errorf("cannot use stream parsing mode when `series_limit` is set for `job_name` %q", jobName)
|
||||||
}
|
}
|
||||||
externalLabels := globalCfg.getExternalLabels()
|
externalLabels := globalCfg.getExternalLabels()
|
||||||
|
noStaleTracking := *noStaleMarkers
|
||||||
|
if sc.NoStaleMarkers != nil {
|
||||||
|
noStaleTracking = *sc.NoStaleMarkers
|
||||||
|
}
|
||||||
swc := &scrapeWorkConfig{
|
swc := &scrapeWorkConfig{
|
||||||
scrapeInterval: scrapeInterval,
|
scrapeInterval: scrapeInterval,
|
||||||
scrapeIntervalString: scrapeInterval.String(),
|
scrapeIntervalString: scrapeInterval.String(),
|
||||||
|
@ -975,6 +981,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
|
||||||
scrapeAlignInterval: sc.ScrapeAlignInterval.Duration(),
|
scrapeAlignInterval: sc.ScrapeAlignInterval.Duration(),
|
||||||
scrapeOffset: sc.ScrapeOffset.Duration(),
|
scrapeOffset: sc.ScrapeOffset.Duration(),
|
||||||
seriesLimit: sc.SeriesLimit,
|
seriesLimit: sc.SeriesLimit,
|
||||||
|
noStaleMarkers: noStaleTracking,
|
||||||
}
|
}
|
||||||
return swc, nil
|
return swc, nil
|
||||||
}
|
}
|
||||||
|
@ -1004,6 +1011,7 @@ type scrapeWorkConfig struct {
|
||||||
scrapeAlignInterval time.Duration
|
scrapeAlignInterval time.Duration
|
||||||
scrapeOffset time.Duration
|
scrapeOffset time.Duration
|
||||||
seriesLimit int
|
seriesLimit int
|
||||||
|
noStaleMarkers bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type targetLabelsGetter interface {
|
type targetLabelsGetter interface {
|
||||||
|
@ -1187,18 +1195,17 @@ var scrapeWorkKeyBufPool bytesutil.ByteBufferPool
|
||||||
|
|
||||||
func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabels map[string]string) (*ScrapeWork, error) {
|
func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabels map[string]string) (*ScrapeWork, error) {
|
||||||
lctx := getLabelsContext()
|
lctx := getLabelsContext()
|
||||||
lctx.labels = mergeLabels(lctx.labels[:0], swc, target, extraLabels, metaLabels)
|
defer putLabelsContext(lctx)
|
||||||
|
|
||||||
|
labels := mergeLabels(lctx.labels[:0], swc, target, extraLabels, metaLabels)
|
||||||
var originalLabels []prompbmarshal.Label
|
var originalLabels []prompbmarshal.Label
|
||||||
if !*dropOriginalLabels {
|
if !*dropOriginalLabels {
|
||||||
originalLabels = append([]prompbmarshal.Label{}, lctx.labels...)
|
originalLabels = append([]prompbmarshal.Label{}, labels...)
|
||||||
}
|
}
|
||||||
lctx.labels = swc.relabelConfigs.Apply(lctx.labels, 0, false)
|
labels = swc.relabelConfigs.Apply(labels, 0)
|
||||||
lctx.labels = promrelabel.RemoveMetaLabels(lctx.labels[:0], lctx.labels)
|
// Remove labels starting from "__meta_" prefix according to https://www.robustperception.io/life-of-a-label/
|
||||||
// Remove references to already deleted labels, so GC could clean strings for label name and label value past len(labels).
|
labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
|
||||||
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
|
lctx.labels = labels
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
|
|
||||||
labels := append([]prompbmarshal.Label{}, lctx.labels...)
|
|
||||||
putLabelsContext(lctx)
|
|
||||||
|
|
||||||
// Verify whether the scrape work must be skipped because of `-promscrape.cluster.*` configs.
|
// Verify whether the scrape work must be skipped because of `-promscrape.cluster.*` configs.
|
||||||
// Perform the verification on labels after the relabeling in order to guarantee that targets with the same set of labels
|
// Perform the verification on labels after the relabeling in order to guarantee that targets with the same set of labels
|
||||||
|
@ -1224,58 +1231,62 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
// See https://www.robustperception.io/life-of-a-label
|
// See https://www.robustperception.io/life-of-a-label
|
||||||
schemeRelabeled := promrelabel.GetLabelValueByName(labels, "__scheme__")
|
scheme := promrelabel.GetLabelValueByName(labels, "__scheme__")
|
||||||
if len(schemeRelabeled) == 0 {
|
if len(scheme) == 0 {
|
||||||
schemeRelabeled = "http"
|
scheme = "http"
|
||||||
}
|
}
|
||||||
addressRelabeled := promrelabel.GetLabelValueByName(labels, "__address__")
|
metricsPath := promrelabel.GetLabelValueByName(labels, "__metrics_path__")
|
||||||
if len(addressRelabeled) == 0 {
|
if len(metricsPath) == 0 {
|
||||||
|
metricsPath = "/metrics"
|
||||||
|
}
|
||||||
|
address := promrelabel.GetLabelValueByName(labels, "__address__")
|
||||||
|
if len(address) == 0 {
|
||||||
// Drop target without scrape address.
|
// Drop target without scrape address.
|
||||||
droppedTargetsMap.Register(originalLabels)
|
droppedTargetsMap.Register(originalLabels)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
if strings.Contains(addressRelabeled, "/") {
|
// Usability extension to Prometheus behavior: extract optional scheme and metricsPath from __address__.
|
||||||
// Drop target with '/'
|
// Prometheus silently drops targets with __address__ containing scheme or metricsPath
|
||||||
droppedTargetsMap.Register(originalLabels)
|
// according to https://www.robustperception.io/life-of-a-label/ .
|
||||||
return nil, nil
|
if strings.HasPrefix(address, "http://") {
|
||||||
|
scheme = "http"
|
||||||
|
address = address[len("http://"):]
|
||||||
|
} else if strings.HasPrefix(address, "https://") {
|
||||||
|
scheme = "https"
|
||||||
|
address = address[len("https://"):]
|
||||||
}
|
}
|
||||||
addressRelabeled = addMissingPort(addressRelabeled, schemeRelabeled == "https")
|
if n := strings.IndexByte(address, '/'); n >= 0 {
|
||||||
metricsPathRelabeled := promrelabel.GetLabelValueByName(labels, "__metrics_path__")
|
metricsPath = address[n:]
|
||||||
if metricsPathRelabeled == "" {
|
address = address[:n]
|
||||||
metricsPathRelabeled = "/metrics"
|
|
||||||
}
|
}
|
||||||
|
address = addMissingPort(address, scheme == "https")
|
||||||
|
|
||||||
var at *auth.Token
|
var at *auth.Token
|
||||||
tenantID := promrelabel.GetLabelValueByName(labels, "__tenant_id__")
|
tenantID := promrelabel.GetLabelValueByName(labels, "__tenant_id__")
|
||||||
if tenantID != "" {
|
if len(tenantID) > 0 {
|
||||||
newToken, err := auth.NewToken(tenantID)
|
newToken, err := auth.NewToken(tenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse __tenant_id__=%q for job=%s, err: %w", tenantID, swc.jobName, err)
|
return nil, fmt.Errorf("cannot parse __tenant_id__=%q for job=%q: %w", tenantID, swc.jobName, err)
|
||||||
}
|
}
|
||||||
at = newToken
|
at = newToken
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasPrefix(metricsPathRelabeled, "/") {
|
if !strings.HasPrefix(metricsPath, "/") {
|
||||||
metricsPathRelabeled = "/" + metricsPathRelabeled
|
metricsPath = "/" + metricsPath
|
||||||
}
|
}
|
||||||
paramsRelabeled := getParamsFromLabels(labels, swc.params)
|
params := getParamsFromLabels(labels, swc.params)
|
||||||
optionalQuestion := "?"
|
optionalQuestion := ""
|
||||||
if len(paramsRelabeled) == 0 || strings.Contains(metricsPathRelabeled, "?") {
|
if len(params) > 0 {
|
||||||
optionalQuestion = ""
|
optionalQuestion = "?"
|
||||||
|
if strings.Contains(metricsPath, "?") {
|
||||||
|
optionalQuestion = "&"
|
||||||
}
|
}
|
||||||
paramsStr := url.Values(paramsRelabeled).Encode()
|
}
|
||||||
scrapeURL := fmt.Sprintf("%s://%s%s%s%s", schemeRelabeled, addressRelabeled, metricsPathRelabeled, optionalQuestion, paramsStr)
|
paramsStr := url.Values(params).Encode()
|
||||||
|
scrapeURL := fmt.Sprintf("%s://%s%s%s%s", scheme, address, metricsPath, optionalQuestion, paramsStr)
|
||||||
if _, err := url.Parse(scrapeURL); err != nil {
|
if _, err := url.Parse(scrapeURL); err != nil {
|
||||||
return nil, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %w",
|
return nil, fmt.Errorf("invalid url %q for scheme=%q, target=%q, address=%q, metrics_path=%q for job=%q: %w",
|
||||||
scrapeURL, swc.scheme, schemeRelabeled, target, addressRelabeled, swc.metricsPath, metricsPathRelabeled, swc.jobName, err)
|
scrapeURL, scheme, target, address, metricsPath, swc.jobName, err)
|
||||||
}
|
|
||||||
// Set missing "instance" label according to https://www.robustperception.io/life-of-a-label
|
|
||||||
if promrelabel.GetLabelByName(labels, "instance") == nil {
|
|
||||||
labels = append(labels, prompbmarshal.Label{
|
|
||||||
Name: "instance",
|
|
||||||
Value: addressRelabeled,
|
|
||||||
})
|
|
||||||
promrelabel.SortLabels(labels)
|
|
||||||
}
|
}
|
||||||
// Read __scrape_interval__ and __scrape_timeout__ from labels.
|
// Read __scrape_interval__ and __scrape_timeout__ from labels.
|
||||||
scrapeInterval := swc.scrapeInterval
|
scrapeInterval := swc.scrapeInterval
|
||||||
|
@ -1314,8 +1325,24 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
||||||
}
|
}
|
||||||
streamParse = b
|
streamParse = b
|
||||||
}
|
}
|
||||||
|
// Remove labels with "__" prefix according to https://www.robustperception.io/life-of-a-label/
|
||||||
|
labels = promrelabel.RemoveLabelsWithDoubleDashPrefix(labels[:0], labels)
|
||||||
|
// Remove references to deleted labels, so GC could clean strings for label name and label value past len(labels).
|
||||||
|
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
|
||||||
|
labelsCopy := make([]prompbmarshal.Label, len(labels)+1)
|
||||||
|
labels = append(labelsCopy[:0], labels...)
|
||||||
|
// Add missing "instance" label according to https://www.robustperception.io/life-of-a-label
|
||||||
|
if promrelabel.GetLabelByName(labels, "instance") == nil {
|
||||||
|
labels = append(labels, prompbmarshal.Label{
|
||||||
|
Name: "instance",
|
||||||
|
Value: address,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
promrelabel.SortLabels(labels)
|
||||||
// Reduce memory usage by interning all the strings in labels.
|
// Reduce memory usage by interning all the strings in labels.
|
||||||
internLabelStrings(labels)
|
internLabelStrings(labels)
|
||||||
|
|
||||||
sw := &ScrapeWork{
|
sw := &ScrapeWork{
|
||||||
ScrapeURL: scrapeURL,
|
ScrapeURL: scrapeURL,
|
||||||
ScrapeInterval: scrapeInterval,
|
ScrapeInterval: scrapeInterval,
|
||||||
|
@ -1337,6 +1364,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
||||||
ScrapeAlignInterval: swc.scrapeAlignInterval,
|
ScrapeAlignInterval: swc.scrapeAlignInterval,
|
||||||
ScrapeOffset: swc.scrapeOffset,
|
ScrapeOffset: swc.scrapeOffset,
|
||||||
SeriesLimit: seriesLimit,
|
SeriesLimit: seriesLimit,
|
||||||
|
NoStaleMarkers: swc.noStaleMarkers,
|
||||||
AuthToken: at,
|
AuthToken: at,
|
||||||
|
|
||||||
jobNameOriginal: swc.jobName,
|
jobNameOriginal: swc.jobName,
|
||||||
|
|
|
@ -214,6 +214,116 @@ func TestLoadConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAddressWithFullURL(t *testing.T) {
|
||||||
|
data := `
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: abc
|
||||||
|
metrics_path: /foo/bar
|
||||||
|
scheme: https
|
||||||
|
params:
|
||||||
|
x: [y]
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
# the following targets are scraped by the provided urls
|
||||||
|
- 'http://host1/metric/path1'
|
||||||
|
- 'https://host2/metric/path2'
|
||||||
|
- 'http://host3:1234/metric/path3?arg1=value1'
|
||||||
|
# the following target is scraped by <scheme>://host4:1234<metrics_path>
|
||||||
|
- host4:1234
|
||||||
|
`
|
||||||
|
var cfg Config
|
||||||
|
allData, err := cfg.parseData([]byte(data), "sss")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot parase data: %s", err)
|
||||||
|
}
|
||||||
|
if string(allData) != data {
|
||||||
|
t.Fatalf("invalid data returned from parseData;\ngot\n%s\nwant\n%s", allData, data)
|
||||||
|
}
|
||||||
|
sws := cfg.getStaticScrapeWork()
|
||||||
|
resetNonEssentialFields(sws)
|
||||||
|
swsExpected := []*ScrapeWork{
|
||||||
|
{
|
||||||
|
ScrapeURL: "http://host1:80/metric/path1?x=y",
|
||||||
|
ScrapeInterval: defaultScrapeInterval,
|
||||||
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
|
HonorTimestamps: true,
|
||||||
|
Labels: []prompbmarshal.Label{
|
||||||
|
{
|
||||||
|
Name: "instance",
|
||||||
|
Value: "host1:80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "job",
|
||||||
|
Value: "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AuthConfig: &promauth.Config{},
|
||||||
|
ProxyAuthConfig: &promauth.Config{},
|
||||||
|
jobNameOriginal: "abc",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ScrapeURL: "https://host2:443/metric/path2?x=y",
|
||||||
|
ScrapeInterval: defaultScrapeInterval,
|
||||||
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
|
HonorTimestamps: true,
|
||||||
|
Labels: []prompbmarshal.Label{
|
||||||
|
{
|
||||||
|
Name: "instance",
|
||||||
|
Value: "host2:443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "job",
|
||||||
|
Value: "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AuthConfig: &promauth.Config{},
|
||||||
|
ProxyAuthConfig: &promauth.Config{},
|
||||||
|
jobNameOriginal: "abc",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ScrapeURL: "http://host3:1234/metric/path3?arg1=value1&x=y",
|
||||||
|
ScrapeInterval: defaultScrapeInterval,
|
||||||
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
|
HonorTimestamps: true,
|
||||||
|
Labels: []prompbmarshal.Label{
|
||||||
|
{
|
||||||
|
Name: "instance",
|
||||||
|
Value: "host3:1234",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "job",
|
||||||
|
Value: "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AuthConfig: &promauth.Config{},
|
||||||
|
ProxyAuthConfig: &promauth.Config{},
|
||||||
|
jobNameOriginal: "abc",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ScrapeURL: "https://host4:1234/foo/bar?x=y",
|
||||||
|
ScrapeInterval: defaultScrapeInterval,
|
||||||
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
|
HonorTimestamps: true,
|
||||||
|
Labels: []prompbmarshal.Label{
|
||||||
|
{
|
||||||
|
Name: "instance",
|
||||||
|
Value: "host4:1234",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "job",
|
||||||
|
Value: "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AuthConfig: &promauth.Config{},
|
||||||
|
ProxyAuthConfig: &promauth.Config{},
|
||||||
|
jobNameOriginal: "abc",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(sws, swsExpected) {
|
||||||
|
t.Fatalf("unexpected scrapeWork;\ngot\n%#v\nwant\n%#v", sws, swsExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestBlackboxExporter(t *testing.T) {
|
func TestBlackboxExporter(t *testing.T) {
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/684
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/684
|
||||||
data := `
|
data := `
|
||||||
|
@ -249,34 +359,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "black:9115",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/probe",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_module",
|
|
||||||
Value: "dns_udp_example",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_target",
|
|
||||||
Value: "8.8.8.8",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "8.8.8.8",
|
Value: "8.8.8.8",
|
||||||
|
@ -718,26 +800,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "host1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/abc/de",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "__vm_filepath",
|
Name: "__vm_filepath",
|
||||||
Value: "",
|
Value: "",
|
||||||
|
@ -765,26 +827,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "host2",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/abc/de",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "__vm_filepath",
|
Name: "__vm_filepath",
|
||||||
Value: "",
|
Value: "",
|
||||||
|
@ -812,26 +854,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "localhost:9090",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/abc/de",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "__vm_filepath",
|
Name: "__vm_filepath",
|
||||||
Value: "",
|
Value: "",
|
||||||
|
@ -881,26 +903,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:1234",
|
Value: "foo.bar:1234",
|
||||||
|
@ -931,26 +933,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:1234",
|
Value: "foo.bar:1234",
|
||||||
|
@ -1015,30 +997,6 @@ scrape_configs:
|
||||||
HonorTimestamps: false,
|
HonorTimestamps: false,
|
||||||
DenyRedirects: true,
|
DenyRedirects: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/foo/bar",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_p",
|
|
||||||
Value: "x&y",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "https",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "54s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "5s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:443",
|
Value: "foo.bar:443",
|
||||||
|
@ -1065,30 +1023,6 @@ scrape_configs:
|
||||||
HonorTimestamps: false,
|
HonorTimestamps: false,
|
||||||
DenyRedirects: true,
|
DenyRedirects: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "aaa",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/foo/bar",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_p",
|
|
||||||
Value: "x&y",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "https",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "54s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "5s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "aaa:443",
|
Value: "aaa:443",
|
||||||
|
@ -1113,26 +1047,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: 8 * time.Second,
|
ScrapeTimeout: 8 * time.Second,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "1.2.3.4",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "8s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "8s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "1.2.3.4:80",
|
Value: "1.2.3.4:80",
|
||||||
|
@ -1155,26 +1069,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: 8 * time.Second,
|
ScrapeTimeout: 8 * time.Second,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foobar",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "8s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "8s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foobar:80",
|
Value: "foobar:80",
|
||||||
|
@ -1231,30 +1125,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_x",
|
|
||||||
Value: "keep_me",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "hash",
|
Name: "hash",
|
||||||
Value: "82",
|
Value: "82",
|
||||||
|
@ -1311,30 +1181,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/abc.de",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_a",
|
|
||||||
Value: "b",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "mailto",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "fake.addr",
|
Value: "fake.addr",
|
||||||
|
@ -1376,10 +1222,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:1234",
|
Value: "foo.bar:1234",
|
||||||
|
@ -1410,26 +1252,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:1234",
|
Value: "foo.bar:1234",
|
||||||
|
@ -1456,26 +1278,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:1234",
|
Value: "foo.bar:1234",
|
||||||
|
@ -1502,26 +1304,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:1234",
|
Value: "foo.bar:1234",
|
||||||
|
@ -1562,30 +1344,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "pp",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_a",
|
|
||||||
Value: "c",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
Value: "bar",
|
Value: "bar",
|
||||||
|
@ -1677,42 +1435,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "127.0.0.1:9116",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/snmp",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_module",
|
|
||||||
Value: "if_mib",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__param_target",
|
|
||||||
Value: "192.168.1.2",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__series_limit__",
|
|
||||||
Value: "1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__stream_parse__",
|
|
||||||
Value: "true",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "192.168.1.2",
|
Value: "192.168.1.2",
|
||||||
|
@ -1749,26 +1471,6 @@ scrape_configs:
|
||||||
ScrapeTimeout: defaultScrapeTimeout,
|
ScrapeTimeout: defaultScrapeTimeout,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "metricspath",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "1m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "10s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:1234",
|
Value: "foo.bar:1234",
|
||||||
|
@ -1791,6 +1493,7 @@ scrape_configs:
|
||||||
scrape_interval: 1w
|
scrape_interval: 1w
|
||||||
scrape_align_interval: 1d
|
scrape_align_interval: 1d
|
||||||
scrape_offset: 2d
|
scrape_offset: 2d
|
||||||
|
no_stale_markers: true
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["foo.bar:1234"]
|
- targets: ["foo.bar:1234"]
|
||||||
`, []*ScrapeWork{
|
`, []*ScrapeWork{
|
||||||
|
@ -1801,27 +1504,8 @@ scrape_configs:
|
||||||
ScrapeAlignInterval: time.Hour * 24,
|
ScrapeAlignInterval: time.Hour * 24,
|
||||||
ScrapeOffset: time.Hour * 24 * 2,
|
ScrapeOffset: time.Hour * 24 * 2,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
|
NoStaleMarkers: true,
|
||||||
Labels: []prompbmarshal.Label{
|
Labels: []prompbmarshal.Label{
|
||||||
{
|
|
||||||
Name: "__address__",
|
|
||||||
Value: "foo.bar:1234",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__metrics_path__",
|
|
||||||
Value: "/metrics",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scheme__",
|
|
||||||
Value: "http",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_interval__",
|
|
||||||
Value: "168h0m0s",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "__scrape_timeout__",
|
|
||||||
Value: "24h0m0s",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "instance",
|
Name: "instance",
|
||||||
Value: "foo.bar:1234",
|
Value: "foo.bar:1234",
|
||||||
|
|
|
@ -36,7 +36,6 @@ var (
|
||||||
"See also -promscrape.suppressScrapeErrorsDelay")
|
"See also -promscrape.suppressScrapeErrorsDelay")
|
||||||
suppressScrapeErrorsDelay = flag.Duration("promscrape.suppressScrapeErrorsDelay", 0, "The delay for suppressing repeated scrape errors logging per each scrape targets. "+
|
suppressScrapeErrorsDelay = flag.Duration("promscrape.suppressScrapeErrorsDelay", 0, "The delay for suppressing repeated scrape errors logging per each scrape targets. "+
|
||||||
"This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors")
|
"This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors")
|
||||||
noStaleMarkers = flag.Bool("promscrape.noStaleMarkers", false, "Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series")
|
|
||||||
seriesLimitPerTarget = flag.Int("promscrape.seriesLimitPerTarget", 0, "Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info")
|
seriesLimitPerTarget = flag.Int("promscrape.seriesLimitPerTarget", 0, "Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info")
|
||||||
minResponseSizeForStreamParse = flagutil.NewBytes("promscrape.minResponseSizeForStreamParse", 1e6, "The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode")
|
minResponseSizeForStreamParse = flagutil.NewBytes("promscrape.minResponseSizeForStreamParse", 1e6, "The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode")
|
||||||
)
|
)
|
||||||
|
@ -68,29 +67,29 @@ type ScrapeWork struct {
|
||||||
// OriginalLabels contains original labels before relabeling.
|
// OriginalLabels contains original labels before relabeling.
|
||||||
//
|
//
|
||||||
// These labels are needed for relabeling troubleshooting at /targets page.
|
// These labels are needed for relabeling troubleshooting at /targets page.
|
||||||
|
//
|
||||||
|
// OriginalLabels are sorted by name.
|
||||||
OriginalLabels []prompbmarshal.Label
|
OriginalLabels []prompbmarshal.Label
|
||||||
|
|
||||||
// Labels to add to the scraped metrics.
|
// Labels to add to the scraped metrics.
|
||||||
//
|
//
|
||||||
// The list contains at least the following labels according to https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
|
// The list contains at least the following labels according to https://www.robustperception.io/life-of-a-label/
|
||||||
//
|
//
|
||||||
// * job
|
// * job
|
||||||
// * __address__
|
// * instance
|
||||||
// * __scheme__
|
|
||||||
// * __metrics_path__
|
|
||||||
// * __scrape_interval__
|
|
||||||
// * __scrape_timeout__
|
|
||||||
// * __param_<name>
|
|
||||||
// * __meta_*
|
|
||||||
// * user-defined labels set via `relabel_configs` section in `scrape_config`
|
// * user-defined labels set via `relabel_configs` section in `scrape_config`
|
||||||
//
|
//
|
||||||
// See also https://prometheus.io/docs/concepts/jobs_instances/
|
// See also https://prometheus.io/docs/concepts/jobs_instances/
|
||||||
|
//
|
||||||
|
// Labels are sorted by name.
|
||||||
Labels []prompbmarshal.Label
|
Labels []prompbmarshal.Label
|
||||||
|
|
||||||
// ExternalLabels contains labels from global->external_labels section of -promscrape.config
|
// ExternalLabels contains labels from global->external_labels section of -promscrape.config
|
||||||
//
|
//
|
||||||
// These labels are added to scraped metrics after the relabeling.
|
// These labels are added to scraped metrics after the relabeling.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3137
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3137
|
||||||
|
//
|
||||||
|
// ExternalLabels are sorted by name.
|
||||||
ExternalLabels []prompbmarshal.Label
|
ExternalLabels []prompbmarshal.Label
|
||||||
|
|
||||||
// ProxyURL HTTP proxy url
|
// ProxyURL HTTP proxy url
|
||||||
|
@ -126,6 +125,10 @@ type ScrapeWork struct {
|
||||||
// Optional limit on the number of unique series the scrape target can expose.
|
// Optional limit on the number of unique series the scrape target can expose.
|
||||||
SeriesLimit int
|
SeriesLimit int
|
||||||
|
|
||||||
|
// Whether to process stale markers for the given target.
|
||||||
|
// See https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers
|
||||||
|
NoStaleMarkers bool
|
||||||
|
|
||||||
//The Tenant Info
|
//The Tenant Info
|
||||||
AuthToken *auth.Token
|
AuthToken *auth.Token
|
||||||
|
|
||||||
|
@ -148,12 +151,12 @@ func (sw *ScrapeWork) key() string {
|
||||||
key := fmt.Sprintf("JobNameOriginal=%s, ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, HonorTimestamps=%v, DenyRedirects=%v, Labels=%s, "+
|
key := fmt.Sprintf("JobNameOriginal=%s, ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, HonorTimestamps=%v, DenyRedirects=%v, Labels=%s, "+
|
||||||
"ExternalLabels=%s, "+
|
"ExternalLabels=%s, "+
|
||||||
"ProxyURL=%s, ProxyAuthConfig=%s, AuthConfig=%s, MetricRelabelConfigs=%s, SampleLimit=%d, DisableCompression=%v, DisableKeepAlive=%v, StreamParse=%v, "+
|
"ProxyURL=%s, ProxyAuthConfig=%s, AuthConfig=%s, MetricRelabelConfigs=%s, SampleLimit=%d, DisableCompression=%v, DisableKeepAlive=%v, StreamParse=%v, "+
|
||||||
"ScrapeAlignInterval=%s, ScrapeOffset=%s, SeriesLimit=%d",
|
"ScrapeAlignInterval=%s, ScrapeOffset=%s, SeriesLimit=%d, NoStaleMarkers=%v",
|
||||||
sw.jobNameOriginal, sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels, sw.HonorTimestamps, sw.DenyRedirects, sw.LabelsString(),
|
sw.jobNameOriginal, sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels, sw.HonorTimestamps, sw.DenyRedirects, sw.LabelsString(),
|
||||||
promLabelsString(sw.ExternalLabels),
|
promLabelsString(sw.ExternalLabels),
|
||||||
sw.ProxyURL.String(), sw.ProxyAuthConfig.String(),
|
sw.ProxyURL.String(), sw.ProxyAuthConfig.String(),
|
||||||
sw.AuthConfig.String(), sw.MetricRelabelConfigs.String(), sw.SampleLimit, sw.DisableCompression, sw.DisableKeepAlive, sw.StreamParse,
|
sw.AuthConfig.String(), sw.MetricRelabelConfigs.String(), sw.SampleLimit, sw.DisableCompression, sw.DisableKeepAlive, sw.StreamParse,
|
||||||
sw.ScrapeAlignInterval, sw.ScrapeOffset, sw.SeriesLimit)
|
sw.ScrapeAlignInterval, sw.ScrapeOffset, sw.SeriesLimit, sw.NoStaleMarkers)
|
||||||
return key
|
return key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,8 +167,7 @@ func (sw *ScrapeWork) Job() string {
|
||||||
|
|
||||||
// LabelsString returns labels in Prometheus format for the given sw.
|
// LabelsString returns labels in Prometheus format for the given sw.
|
||||||
func (sw *ScrapeWork) LabelsString() string {
|
func (sw *ScrapeWork) LabelsString() string {
|
||||||
labelsFinalized := promrelabel.FinalizeLabels(nil, sw.Labels)
|
return promLabelsString(sw.Labels)
|
||||||
return promLabelsString(labelsFinalized)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func promLabelsString(labels []prompbmarshal.Label) string {
|
func promLabelsString(labels []prompbmarshal.Label) string {
|
||||||
|
@ -443,7 +445,7 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
||||||
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
|
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
|
||||||
lastScrape := sw.loadLastScrape()
|
lastScrape := sw.loadLastScrape()
|
||||||
bodyString := bytesutil.ToUnsafeString(body.B)
|
bodyString := bytesutil.ToUnsafeString(body.B)
|
||||||
areIdenticalSeries := *noStaleMarkers || parser.AreIdenticalSeriesFast(lastScrape, bodyString)
|
areIdenticalSeries := sw.Config.NoStaleMarkers || parser.AreIdenticalSeriesFast(lastScrape, bodyString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
up = 0
|
up = 0
|
||||||
scrapesFailed.Inc()
|
scrapesFailed.Inc()
|
||||||
|
@ -595,7 +597,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
||||||
}
|
}
|
||||||
lastScrape := sw.loadLastScrape()
|
lastScrape := sw.loadLastScrape()
|
||||||
bodyString := bytesutil.ToUnsafeString(sbr.body)
|
bodyString := bytesutil.ToUnsafeString(sbr.body)
|
||||||
areIdenticalSeries := *noStaleMarkers || parser.AreIdenticalSeriesFast(lastScrape, bodyString)
|
areIdenticalSeries := sw.Config.NoStaleMarkers || parser.AreIdenticalSeriesFast(lastScrape, bodyString)
|
||||||
|
|
||||||
scrapedSamples.Update(float64(samplesScraped))
|
scrapedSamples.Update(float64(samplesScraped))
|
||||||
endTimestamp := time.Now().UnixNano() / 1e6
|
endTimestamp := time.Now().UnixNano() / 1e6
|
||||||
|
@ -743,7 +745,7 @@ func (sw *scrapeWork) applySeriesLimit(wc *writeRequestCtx) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp int64, addAutoSeries bool) {
|
func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp int64, addAutoSeries bool) {
|
||||||
if *noStaleMarkers {
|
if sw.Config.NoStaleMarkers {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bodyString := lastScrape
|
bodyString := lastScrape
|
||||||
|
@ -834,11 +836,9 @@ func (sw *scrapeWork) addRowToTimeseries(wc *writeRequestCtx, r *parser.Row, tim
|
||||||
labelsLen := len(wc.labels)
|
labelsLen := len(wc.labels)
|
||||||
wc.labels = appendLabels(wc.labels, r.Metric, r.Tags, sw.Config.Labels, sw.Config.HonorLabels)
|
wc.labels = appendLabels(wc.labels, r.Metric, r.Tags, sw.Config.Labels, sw.Config.HonorLabels)
|
||||||
if needRelabel {
|
if needRelabel {
|
||||||
wc.labels = sw.Config.MetricRelabelConfigs.Apply(wc.labels, labelsLen, true)
|
wc.labels = sw.Config.MetricRelabelConfigs.Apply(wc.labels, labelsLen)
|
||||||
} else {
|
|
||||||
wc.labels = promrelabel.FinalizeLabels(wc.labels[:labelsLen], wc.labels[labelsLen:])
|
|
||||||
promrelabel.SortLabels(wc.labels[labelsLen:])
|
|
||||||
}
|
}
|
||||||
|
wc.labels = promrelabel.FinalizeLabels(wc.labels[:labelsLen], wc.labels[labelsLen:])
|
||||||
if len(wc.labels) == labelsLen {
|
if len(wc.labels) == labelsLen {
|
||||||
// Skip row without labels.
|
// Skip row without labels.
|
||||||
return
|
return
|
||||||
|
|
|
@ -196,8 +196,7 @@ func (tsm *targetStatusMap) WriteActiveTargetsJSON(w io.Writer) {
|
||||||
fmt.Fprintf(w, `{"discoveredLabels":`)
|
fmt.Fprintf(w, `{"discoveredLabels":`)
|
||||||
writeLabelsJSON(w, ts.sw.Config.OriginalLabels)
|
writeLabelsJSON(w, ts.sw.Config.OriginalLabels)
|
||||||
fmt.Fprintf(w, `,"labels":`)
|
fmt.Fprintf(w, `,"labels":`)
|
||||||
labelsFinalized := promrelabel.FinalizeLabels(nil, ts.sw.Config.Labels)
|
writeLabelsJSON(w, ts.sw.Config.Labels)
|
||||||
writeLabelsJSON(w, labelsFinalized)
|
|
||||||
fmt.Fprintf(w, `,"scrapePool":%q`, ts.sw.Config.Job())
|
fmt.Fprintf(w, `,"scrapePool":%q`, ts.sw.Config.Job())
|
||||||
fmt.Fprintf(w, `,"scrapeUrl":%q`, ts.sw.Config.ScrapeURL)
|
fmt.Fprintf(w, `,"scrapeUrl":%q`, ts.sw.Config.ScrapeURL)
|
||||||
errMsg := ""
|
errMsg := ""
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
GO_VERSION ?=1.19.0
|
GO_VERSION ?=1.19.2
|
||||||
SNAP_BUILDER_IMAGE := local/snap-builder:2.0.0-$(shell echo $(GO_VERSION) | tr :/ __)
|
SNAP_BUILDER_IMAGE := local/snap-builder:2.0.0-$(shell echo $(GO_VERSION) | tr :/ __)
|
||||||
|
|
||||||
|
|
||||||
|
|
12
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
generated
vendored
12
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
generated
vendored
|
@ -1,5 +1,17 @@
|
||||||
# Release History
|
# Release History
|
||||||
|
|
||||||
|
## 0.5.1 (2022-10-11)
|
||||||
|
|
||||||
|
### Bugs Fixed
|
||||||
|
|
||||||
|
* `GetSASURL()`: for container and blob clients, don't add a forward slash before the query string
|
||||||
|
* Fixed issue [#19249](https://github.com/Azure/azure-sdk-for-go/issues/19249) by increasing service version to '2020-02-10'.
|
||||||
|
|
||||||
|
### Other Changes
|
||||||
|
|
||||||
|
* Improved docs for client constructors.
|
||||||
|
* Updating azcore version to 1.1.4
|
||||||
|
|
||||||
## 0.5.0 (2022-09-29)
|
## 0.5.0 (2022-09-29)
|
||||||
|
|
||||||
### Breaking Changes
|
### Breaking Changes
|
||||||
|
|
39
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go
generated
vendored
39
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go
generated
vendored
|
@ -28,36 +28,49 @@ type ClientOptions struct {
|
||||||
// Client represents a client to an Azure Storage append blob;
|
// Client represents a client to an Azure Storage append blob;
|
||||||
type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient]
|
type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient]
|
||||||
|
|
||||||
// NewClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options.
|
// NewClient creates an instance of Client with the specified values.
|
||||||
func NewClient(blobURL string, cred azcore.TokenCredential, o *ClientOptions) (*Client, error) {
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
|
||||||
|
// - cred - an Azure AD credential, typically obtained via the azidentity module
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
|
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
||||||
conOptions := shared.GetClientOptions(o)
|
conOptions := shared.GetClientOptions(options)
|
||||||
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
|
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
|
||||||
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
||||||
|
|
||||||
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
|
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithNoCredential creates an AppendBlobClient with the specified URL and options.
|
// NewClientWithNoCredential creates an instance of Client with the specified values.
|
||||||
func NewClientWithNoCredential(blobURL string, o *ClientOptions) (*Client, error) {
|
// This is used to anonymously access a blob or with a shared access signature (SAS) token.
|
||||||
conOptions := shared.GetClientOptions(o)
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt?<sas token>
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
|
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
|
||||||
|
conOptions := shared.GetClientOptions(options)
|
||||||
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
||||||
|
|
||||||
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
|
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithSharedKeyCredential creates an AppendBlobClient with the specified URL, shared key, and options.
|
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
|
||||||
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, o *ClientOptions) (*Client, error) {
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
|
||||||
|
// - cred - a SharedKeyCredential created with the matching blob's storage account and access key
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
|
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
||||||
conOptions := shared.GetClientOptions(o)
|
conOptions := shared.GetClientOptions(options)
|
||||||
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
|
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
|
||||||
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
||||||
|
|
||||||
return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil
|
return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientFromConnectionString creates Client from a connection String
|
// NewClientFromConnectionString creates an instance of Client with the specified values.
|
||||||
func NewClientFromConnectionString(connectionString, containerName, blobName string, o *ClientOptions) (*Client, error) {
|
// - connectionString - a connection string for the desired storage account
|
||||||
|
// - containerName - the name of the container within the storage account
|
||||||
|
// - blobName - the name of the blob within the container
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
|
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
|
||||||
parsed, err := shared.ParseConnectionString(connectionString)
|
parsed, err := shared.ParseConnectionString(connectionString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -69,10 +82,10 @@ func NewClientFromConnectionString(connectionString, containerName, blobName str
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, o)
|
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewClientWithNoCredential(parsed.ServiceURL, o)
|
return NewClientWithNoCredential(parsed.ServiceURL, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobClient returns the embedded blob client for this AppendBlob client.
|
// BlobClient returns the embedded blob client for this AppendBlob client.
|
||||||
|
|
28
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go
generated
vendored
28
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go
generated
vendored
|
@ -11,7 +11,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -33,7 +32,10 @@ type ClientOptions struct {
|
||||||
// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
|
// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
|
||||||
type Client base.Client[generated.BlobClient]
|
type Client base.Client[generated.BlobClient]
|
||||||
|
|
||||||
// NewClient creates a Client object using the specified URL, Azure AD credential, and options.
|
// NewClient creates an instance of Client with the specified values.
|
||||||
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
|
||||||
|
// - cred - an Azure AD credential, typically obtained via the azidentity module
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -43,7 +45,10 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
|
||||||
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
|
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithNoCredential creates a Client object using the specified URL and options.
|
// NewClientWithNoCredential creates an instance of Client with the specified values.
|
||||||
|
// This is used to anonymously access a blob or with a shared access signature (SAS) token.
|
||||||
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt?<sas token>
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
|
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
||||||
|
@ -51,7 +56,10 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client,
|
||||||
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
|
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithSharedKeyCredential creates a Client object using the specified URL, shared key, and options.
|
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
|
||||||
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
|
||||||
|
// - cred - a SharedKeyCredential created with the matching blob's storage account and access key
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -61,7 +69,11 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential,
|
||||||
return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil
|
return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientFromConnectionString creates Client from a connection String
|
// NewClientFromConnectionString creates an instance of Client with the specified values.
|
||||||
|
// - connectionString - a connection string for the desired storage account
|
||||||
|
// - containerName - the name of the container within the storage account
|
||||||
|
// - blobName - the name of the blob within the container
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
|
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
|
||||||
parsed, err := shared.ParseConnectionString(connectionString)
|
parsed, err := shared.ParseConnectionString(connectionString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -261,11 +273,7 @@ func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, exp
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint := b.URL()
|
endpoint := b.URL() + "?" + qps.Encode()
|
||||||
if !strings.HasSuffix(endpoint, "/") {
|
|
||||||
endpoint += "/"
|
|
||||||
}
|
|
||||||
endpoint += "?" + qps.Encode()
|
|
||||||
|
|
||||||
return endpoint, nil
|
return endpoint, nil
|
||||||
}
|
}
|
||||||
|
|
21
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go
generated
vendored
21
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go
generated
vendored
|
@ -35,7 +35,10 @@ type ClientOptions struct {
|
||||||
// Client defines a set of operations applicable to block blobs.
|
// Client defines a set of operations applicable to block blobs.
|
||||||
type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient]
|
type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient]
|
||||||
|
|
||||||
// NewClient creates a Client object using the specified URL, Azure AD credential, and options.
|
// NewClient creates an instance of Client with the specified values.
|
||||||
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
|
||||||
|
// - cred - an Azure AD credential, typically obtained via the azidentity module
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -45,7 +48,10 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
|
||||||
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
|
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithNoCredential creates a Client object using the specified URL and options.
|
// NewClientWithNoCredential creates an instance of Client with the specified values.
|
||||||
|
// This is used to anonymously access a blob or with a shared access signature (SAS) token.
|
||||||
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt?<sas token>
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
|
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
||||||
|
@ -53,7 +59,10 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client,
|
||||||
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
|
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithSharedKeyCredential creates a Client object using the specified URL, shared key, and options.
|
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
|
||||||
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
|
||||||
|
// - cred - a SharedKeyCredential created with the matching blob's storage account and access key
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -63,7 +72,11 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden
|
||||||
return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil
|
return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientFromConnectionString creates Client from a connection String
|
// NewClientFromConnectionString creates an instance of Client with the specified values.
|
||||||
|
// - connectionString - a connection string for the desired storage account
|
||||||
|
// - containerName - the name of the container within the storage account
|
||||||
|
// - blobName - the name of the blob within the container
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
|
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
|
||||||
parsed, err := shared.ParseConnectionString(connectionString)
|
parsed, err := shared.ParseConnectionString(connectionString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
19
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go
generated
vendored
19
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go
generated
vendored
|
@ -27,7 +27,10 @@ type Client struct {
|
||||||
svc *service.Client
|
svc *service.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a BlobClient object using the specified URL, Azure AD credential, and options.
|
// NewClient creates an instance of Client with the specified values.
|
||||||
|
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/
|
||||||
|
// - cred - an Azure AD credential, typically obtained via the azidentity module
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
||||||
var clientOptions *service.ClientOptions
|
var clientOptions *service.ClientOptions
|
||||||
if options != nil {
|
if options != nil {
|
||||||
|
@ -43,7 +46,10 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithNoCredential creates a BlobClient object using the specified URL and options.
|
// NewClientWithNoCredential creates an instance of Client with the specified values.
|
||||||
|
// This is used to anonymously access a storage account or with a shared access signature (SAS) token.
|
||||||
|
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/?<sas token>
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
|
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
|
||||||
var clientOptions *service.ClientOptions
|
var clientOptions *service.ClientOptions
|
||||||
if options != nil {
|
if options != nil {
|
||||||
|
@ -59,7 +65,10 @@ func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Clie
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithSharedKeyCredential creates a BlobClient object using the specified URL, shared key, and options.
|
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
|
||||||
|
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/
|
||||||
|
// - cred - a SharedKeyCredential created with the matching storage account and access key
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
||||||
svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options))
|
svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -71,7 +80,9 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientFromConnectionString creates BlobClient from a connection String
|
// NewClientFromConnectionString creates an instance of Client with the specified values.
|
||||||
|
// - connectionString - a connection string for the desired storage account
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
|
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
|
||||||
if options == nil {
|
if options == nil {
|
||||||
options = &ClientOptions{}
|
options = &ClientOptions{}
|
||||||
|
|
27
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go
generated
vendored
27
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go
generated
vendored
|
@ -10,7 +10,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
|
@ -35,7 +34,10 @@ type ClientOptions struct {
|
||||||
// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs.
|
// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs.
|
||||||
type Client base.Client[generated.ContainerClient]
|
type Client base.Client[generated.ContainerClient]
|
||||||
|
|
||||||
// NewClient creates a Client object using the specified URL, Azure AD credential, and options.
|
// NewClient creates an instance of Client with the specified values.
|
||||||
|
// - containerURL - the URL of the container e.g. https://<account>.blob.core.windows.net/container
|
||||||
|
// - cred - an Azure AD credential, typically obtained via the azidentity module
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -45,7 +47,10 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client
|
||||||
return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil
|
return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithNoCredential creates a Client object using the specified URL and options.
|
// NewClientWithNoCredential creates an instance of Client with the specified values.
|
||||||
|
// This is used to anonymously access a container or with a shared access signature (SAS) token.
|
||||||
|
// - containerURL - the URL of the container e.g. https://<account>.blob.core.windows.net/container?<sas token>
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) {
|
func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) {
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
||||||
|
@ -53,7 +58,10 @@ func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Cl
|
||||||
return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil
|
return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithSharedKeyCredential creates a Client object using the specified URL, shared key, and options.
|
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
|
||||||
|
// - containerURL - the URL of the container e.g. https://<account>.blob.core.windows.net/container
|
||||||
|
// - cred - a SharedKeyCredential created with the matching container's storage account and access key
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -63,7 +71,10 @@ func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCreden
|
||||||
return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil
|
return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientFromConnectionString creates a Client object using connection string of an account
|
// NewClientFromConnectionString creates an instance of Client with the specified values.
|
||||||
|
// - connectionString - a connection string for the desired storage account
|
||||||
|
// - containerName - the name of the container within the storage account
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*Client, error) {
|
func NewClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*Client, error) {
|
||||||
parsed, err := shared.ParseConnectionString(connectionString)
|
parsed, err := shared.ParseConnectionString(connectionString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -317,11 +328,7 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint := c.URL()
|
endpoint := c.URL() + "?" + qps.Encode()
|
||||||
if !strings.HasSuffix(endpoint, "/") {
|
|
||||||
endpoint += "/"
|
|
||||||
}
|
|
||||||
endpoint += "?" + qps.Encode()
|
|
||||||
|
|
||||||
return endpoint, nil
|
return endpoint, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,12 +30,12 @@ type UserDelegationCredential struct {
|
||||||
userDelegationKey UserDelegationKey
|
userDelegationKey UserDelegationKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccountName returns the Storage account's Name
|
// getAccountName returns the Storage account's Name
|
||||||
func (f *UserDelegationCredential) getAccountName() string {
|
func (f *UserDelegationCredential) getAccountName() string {
|
||||||
return f.accountName
|
return f.accountName
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUDKParams is a helper method for accessing the user delegation key parameters outside of this package.
|
// GetAccountName is a helper method for accessing the user delegation key parameters outside this package.
|
||||||
func GetAccountName(udc *UserDelegationCredential) string {
|
func GetAccountName(udc *UserDelegationCredential) string {
|
||||||
return udc.getAccountName()
|
return udc.getAccountName()
|
||||||
}
|
}
|
||||||
|
@ -48,17 +48,17 @@ func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, er
|
||||||
return base64.StdEncoding.EncodeToString(h.Sum(nil)), err
|
return base64.StdEncoding.EncodeToString(h.Sum(nil)), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside of this package.
|
// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside this package.
|
||||||
func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) {
|
func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) {
|
||||||
return udc.computeHMACSHA256(message)
|
return udc.computeHMACSHA256(message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUDKParams returns UserDelegationKey
|
// getUDKParams returns UserDelegationKey
|
||||||
func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey {
|
func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey {
|
||||||
return &f.userDelegationKey
|
return &f.userDelegationKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUDKParams is a helper method for accessing the user delegation key parameters outside of this package.
|
// GetUDKParams is a helper method for accessing the user delegation key parameters outside this package.
|
||||||
func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey {
|
func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey {
|
||||||
return udc.getUDKParams()
|
return udc.getUDKParams()
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,5 +8,5 @@ package exported
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ModuleName = "azblob"
|
ModuleName = "azblob"
|
||||||
ModuleVersion = "v0.5.0"
|
ModuleVersion = "v0.5.1"
|
||||||
)
|
)
|
||||||
|
|
24
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go
generated
vendored
24
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go
generated
vendored
|
@ -31,8 +31,10 @@ type ClientOptions struct {
|
||||||
// Client represents a client to an Azure Storage page blob;
|
// Client represents a client to an Azure Storage page blob;
|
||||||
type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient]
|
type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient]
|
||||||
|
|
||||||
// NewClient creates a ServiceClient object using the specified URL, Azure AD credential, and options.
|
// NewClient creates an instance of Client with the specified values.
|
||||||
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
|
||||||
|
// - cred - an Azure AD credential, typically obtained via the azidentity module
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -42,8 +44,10 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
|
||||||
return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil
|
return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithNoCredential creates a ServiceClient object using the specified URL and options.
|
// NewClientWithNoCredential creates an instance of Client with the specified values.
|
||||||
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net?<SAS token>
|
// This is used to anonymously access a blob or with a shared access signature (SAS) token.
|
||||||
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt?<sas token>
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
|
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
||||||
|
@ -51,8 +55,10 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client,
|
||||||
return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil
|
return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithSharedKeyCredential creates a ServiceClient object using the specified URL, shared key, and options.
|
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
|
||||||
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
|
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
|
||||||
|
// - cred - a SharedKeyCredential created with the matching blob's storage account and access key
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -62,7 +68,11 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden
|
||||||
return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil
|
return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientFromConnectionString creates Client from a connection String
|
// NewClientFromConnectionString creates an instance of Client with the specified values.
|
||||||
|
// - connectionString - a connection string for the desired storage account
|
||||||
|
// - containerName - the name of the container within the storage account
|
||||||
|
// - blobName - the name of the blob within the container
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
|
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
|
||||||
parsed, err := shared.ParseConnectionString(connectionString)
|
parsed, err := shared.ParseConnectionString(connectionString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go
generated
vendored
|
@ -23,7 +23,7 @@ const (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Version is the default version encoded in the SAS token.
|
// Version is the default version encoded in the SAS token.
|
||||||
Version = "2019-12-12"
|
Version = "2020-02-10"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TimeFormats ISO 8601 format.
|
// TimeFormats ISO 8601 format.
|
||||||
|
|
26
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go
generated
vendored
26
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go
generated
vendored
|
@ -9,7 +9,6 @@ package service
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -17,6 +16,7 @@ import (
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
|
||||||
|
@ -33,8 +33,10 @@ type ClientOptions struct {
|
||||||
// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers.
|
// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers.
|
||||||
type Client base.Client[generated.ServiceClient]
|
type Client base.Client[generated.ServiceClient]
|
||||||
|
|
||||||
// NewClient creates a Client object using the specified URL, Azure AD credential, and options.
|
// NewClient creates an instance of Client with the specified values.
|
||||||
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
|
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/
|
||||||
|
// - cred - an Azure AD credential, typically obtained via the azidentity module
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -44,8 +46,10 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp
|
||||||
return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil
|
return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithNoCredential creates a Client object using the specified URL and options.
|
// NewClientWithNoCredential creates an instance of Client with the specified values.
|
||||||
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net?<SAS token>
|
// This is used to anonymously access a storage account or with a shared access signature (SAS) token.
|
||||||
|
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/?<sas token>
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
|
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
|
||||||
|
@ -53,8 +57,10 @@ func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Clie
|
||||||
return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil
|
return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithSharedKeyCredential creates a Client object using the specified URL, shared key, and options.
|
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
|
||||||
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
|
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/
|
||||||
|
// - cred - a SharedKeyCredential created with the matching storage account and access key
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
|
||||||
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
authPolicy := exported.NewSharedKeyCredPolicy(cred)
|
||||||
conOptions := shared.GetClientOptions(options)
|
conOptions := shared.GetClientOptions(options)
|
||||||
|
@ -64,8 +70,9 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti
|
||||||
return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil
|
return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientFromConnectionString creates a service client from the given connection string.
|
// NewClientFromConnectionString creates an instance of Client with the specified values.
|
||||||
// nolint
|
// - connectionString - a connection string for the desired storage account
|
||||||
|
// - options - client options; pass nil to accept the default values
|
||||||
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
|
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
|
||||||
parsed, err := shared.ParseConnectionString(connectionString)
|
parsed, err := shared.ParseConnectionString(connectionString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -260,6 +267,7 @@ func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.A
|
||||||
|
|
||||||
endpoint := s.URL()
|
endpoint := s.URL()
|
||||||
if !strings.HasSuffix(endpoint, "/") {
|
if !strings.HasSuffix(endpoint, "/") {
|
||||||
|
// add a trailing slash to be consistent with the portal
|
||||||
endpoint += "/"
|
endpoint += "/"
|
||||||
}
|
}
|
||||||
endpoint += "?" + qps.Encode()
|
endpoint += "?" + qps.Encode()
|
||||||
|
|
2
vendor/github.com/urfave/cli/v2/Makefile
generated
vendored
2
vendor/github.com/urfave/cli/v2/Makefile
generated
vendored
|
@ -7,7 +7,7 @@
|
||||||
GO_RUN_BUILD := go run internal/build/build.go
|
GO_RUN_BUILD := go run internal/build/build.go
|
||||||
|
|
||||||
.PHONY: all
|
.PHONY: all
|
||||||
all: generate vet tag-test test check-binary-size tag-check-binary-size gfmrun yamlfmt v2diff
|
all: generate vet test check-binary-size gfmrun yamlfmt v2diff
|
||||||
|
|
||||||
# NOTE: this is a special catch-all rule to run any of the commands
|
# NOTE: this is a special catch-all rule to run any of the commands
|
||||||
# defined in internal/build/build.go with optional arguments passed
|
# defined in internal/build/build.go with optional arguments passed
|
||||||
|
|
2
vendor/github.com/urfave/cli/v2/app.go
generated
vendored
2
vendor/github.com/urfave/cli/v2/app.go
generated
vendored
|
@ -229,9 +229,11 @@ func (a *App) Setup() {
|
||||||
a.flagCategories = newFlagCategories()
|
a.flagCategories = newFlagCategories()
|
||||||
for _, fl := range a.Flags {
|
for _, fl := range a.Flags {
|
||||||
if cf, ok := fl.(CategorizableFlag); ok {
|
if cf, ok := fl.(CategorizableFlag); ok {
|
||||||
|
if cf.GetCategory() != "" {
|
||||||
a.flagCategories.AddFlag(cf.GetCategory(), cf)
|
a.flagCategories.AddFlag(cf.GetCategory(), cf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if a.Metadata == nil {
|
if a.Metadata == nil {
|
||||||
a.Metadata = make(map[string]interface{})
|
a.Metadata = make(map[string]interface{})
|
||||||
|
|
2
vendor/github.com/urfave/cli/v2/category.go
generated
vendored
2
vendor/github.com/urfave/cli/v2/category.go
generated
vendored
|
@ -102,9 +102,11 @@ func newFlagCategoriesFromFlags(fs []Flag) FlagCategories {
|
||||||
fc := newFlagCategories()
|
fc := newFlagCategories()
|
||||||
for _, fl := range fs {
|
for _, fl := range fs {
|
||||||
if cf, ok := fl.(CategorizableFlag); ok {
|
if cf, ok := fl.(CategorizableFlag); ok {
|
||||||
|
if cf.GetCategory() != "" {
|
||||||
fc.AddFlag(cf.GetCategory(), cf)
|
fc.AddFlag(cf.GetCategory(), cf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return fc
|
return fc
|
||||||
}
|
}
|
||||||
|
|
18
vendor/github.com/urfave/cli/v2/command.go
generated
vendored
18
vendor/github.com/urfave/cli/v2/command.go
generated
vendored
|
@ -295,15 +295,21 @@ func (c *Command) startApp(ctx *Context) error {
|
||||||
return app.RunAsSubcommand(ctx)
|
return app.RunAsSubcommand(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VisibleCommands returns a slice of the Commands with Hidden=false
|
||||||
|
func (c *Command) VisibleCommands() []*Command {
|
||||||
|
var ret []*Command
|
||||||
|
for _, command := range c.Subcommands {
|
||||||
|
if !command.Hidden {
|
||||||
|
ret = append(ret, command)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
// VisibleFlagCategories returns a slice containing all the visible flag categories with the flags they contain
|
// VisibleFlagCategories returns a slice containing all the visible flag categories with the flags they contain
|
||||||
func (c *Command) VisibleFlagCategories() []VisibleFlagCategory {
|
func (c *Command) VisibleFlagCategories() []VisibleFlagCategory {
|
||||||
if c.flagCategories == nil {
|
if c.flagCategories == nil {
|
||||||
c.flagCategories = newFlagCategories()
|
c.flagCategories = newFlagCategoriesFromFlags(c.Flags)
|
||||||
for _, fl := range c.Flags {
|
|
||||||
if cf, ok := fl.(CategorizableFlag); ok {
|
|
||||||
c.flagCategories.AddFlag(cf.GetCategory(), cf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return c.flagCategories.VisibleCategories()
|
return c.flagCategories.VisibleCategories()
|
||||||
}
|
}
|
||||||
|
|
29
vendor/github.com/urfave/cli/v2/flag.go
generated
vendored
29
vendor/github.com/urfave/cli/v2/flag.go
generated
vendored
|
@ -129,6 +129,14 @@ type DocGenerationFlag interface {
|
||||||
GetEnvVars() []string
|
GetEnvVars() []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DocGenerationSliceFlag extends DocGenerationFlag for slice-based flags.
|
||||||
|
type DocGenerationSliceFlag interface {
|
||||||
|
DocGenerationFlag
|
||||||
|
|
||||||
|
// IsSliceFlag returns true for flags that can be given multiple times.
|
||||||
|
IsSliceFlag() bool
|
||||||
|
}
|
||||||
|
|
||||||
// VisibleFlag is an interface that allows to check if a flag is visible
|
// VisibleFlag is an interface that allows to check if a flag is visible
|
||||||
type VisibleFlag interface {
|
type VisibleFlag interface {
|
||||||
Flag
|
Flag
|
||||||
|
@ -325,24 +333,13 @@ func stringifyFlag(f Flag) string {
|
||||||
|
|
||||||
usageWithDefault := strings.TrimSpace(usage + defaultValueString)
|
usageWithDefault := strings.TrimSpace(usage + defaultValueString)
|
||||||
|
|
||||||
return withEnvHint(df.GetEnvVars(),
|
pn := prefixedNames(df.Names(), placeholder)
|
||||||
fmt.Sprintf("%s\t%s", prefixedNames(df.Names(), placeholder), usageWithDefault))
|
sliceFlag, ok := f.(DocGenerationSliceFlag)
|
||||||
}
|
if ok && sliceFlag.IsSliceFlag() {
|
||||||
|
pn = pn + " [ " + pn + " ]"
|
||||||
func stringifySliceFlag(usage string, names, defaultVals []string) string {
|
|
||||||
placeholder, usage := unquoteUsage(usage)
|
|
||||||
if placeholder == "" {
|
|
||||||
placeholder = defaultPlaceholder
|
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultVal := ""
|
return withEnvHint(df.GetEnvVars(), fmt.Sprintf("%s\t%s", pn, usageWithDefault))
|
||||||
if len(defaultVals) > 0 {
|
|
||||||
defaultVal = fmt.Sprintf(formatDefault("%s"), strings.Join(defaultVals, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal))
|
|
||||||
pn := prefixedNames(names, placeholder)
|
|
||||||
return fmt.Sprintf("%s [ %s ]\t%s", pn, pn, usageWithDefault)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasFlag(flags []Flag, fl Flag) bool {
|
func hasFlag(flags []Flag, fl Flag) bool {
|
||||||
|
|
28
vendor/github.com/urfave/cli/v2/flag_float64_slice.go
generated
vendored
28
vendor/github.com/urfave/cli/v2/flag_float64_slice.go
generated
vendored
|
@ -83,7 +83,7 @@ func (f *Float64Slice) Get() interface{} {
|
||||||
// String returns a readable representation of this value
|
// String returns a readable representation of this value
|
||||||
// (for usage defaults)
|
// (for usage defaults)
|
||||||
func (f *Float64SliceFlag) String() string {
|
func (f *Float64SliceFlag) String() string {
|
||||||
return withEnvHint(f.GetEnvVars(), f.stringify())
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TakesValue returns true if the flag takes a value, otherwise false
|
// TakesValue returns true if the flag takes a value, otherwise false
|
||||||
|
@ -104,10 +104,13 @@ func (f *Float64SliceFlag) GetCategory() string {
|
||||||
// GetValue returns the flags value as string representation and an empty
|
// GetValue returns the flags value as string representation and an empty
|
||||||
// string if the flag takes no value at all.
|
// string if the flag takes no value at all.
|
||||||
func (f *Float64SliceFlag) GetValue() string {
|
func (f *Float64SliceFlag) GetValue() string {
|
||||||
if f.Value != nil {
|
var defaultVals []string
|
||||||
return f.Value.String()
|
if f.Value != nil && len(f.Value.Value()) > 0 {
|
||||||
|
for _, i := range f.Value.Value() {
|
||||||
|
defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), "."))
|
||||||
}
|
}
|
||||||
return ""
|
}
|
||||||
|
return strings.Join(defaultVals, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDefaultText returns the default text for this flag
|
// GetDefaultText returns the default text for this flag
|
||||||
|
@ -123,6 +126,11 @@ func (f *Float64SliceFlag) GetEnvVars() []string {
|
||||||
return f.EnvVars
|
return f.EnvVars
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
func (f *Float64SliceFlag) IsSliceFlag() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// apply any default
|
// apply any default
|
||||||
|
@ -169,18 +177,6 @@ func (f *Float64SliceFlag) Get(ctx *Context) []float64 {
|
||||||
return ctx.Float64Slice(f.Name)
|
return ctx.Float64Slice(f.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Float64SliceFlag) stringify() string {
|
|
||||||
var defaultVals []string
|
|
||||||
|
|
||||||
if f.Value != nil && len(f.Value.Value()) > 0 {
|
|
||||||
for _, i := range f.Value.Value() {
|
|
||||||
defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), "."))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunAction executes flag action if set
|
// RunAction executes flag action if set
|
||||||
func (f *Float64SliceFlag) RunAction(c *Context) error {
|
func (f *Float64SliceFlag) RunAction(c *Context) error {
|
||||||
if f.Action != nil {
|
if f.Action != nil {
|
||||||
|
|
4
vendor/github.com/urfave/cli/v2/flag_generic.go
generated
vendored
4
vendor/github.com/urfave/cli/v2/flag_generic.go
generated
vendored
|
@ -62,6 +62,10 @@ func (f *GenericFlag) Apply(set *flag.FlagSet) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range f.Names() {
|
for _, name := range f.Names() {
|
||||||
|
if f.Destination != nil {
|
||||||
|
set.Var(f.Destination, name, f.Usage)
|
||||||
|
continue
|
||||||
|
}
|
||||||
set.Var(f.Value, name, f.Usage)
|
set.Var(f.Value, name, f.Usage)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
27
vendor/github.com/urfave/cli/v2/flag_int64_slice.go
generated
vendored
27
vendor/github.com/urfave/cli/v2/flag_int64_slice.go
generated
vendored
|
@ -84,7 +84,7 @@ func (i *Int64Slice) Get() interface{} {
|
||||||
// String returns a readable representation of this value
|
// String returns a readable representation of this value
|
||||||
// (for usage defaults)
|
// (for usage defaults)
|
||||||
func (f *Int64SliceFlag) String() string {
|
func (f *Int64SliceFlag) String() string {
|
||||||
return withEnvHint(f.GetEnvVars(), f.stringify())
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TakesValue returns true of the flag takes a value, otherwise false
|
// TakesValue returns true of the flag takes a value, otherwise false
|
||||||
|
@ -105,10 +105,13 @@ func (f *Int64SliceFlag) GetCategory() string {
|
||||||
// GetValue returns the flags value as string representation and an empty
|
// GetValue returns the flags value as string representation and an empty
|
||||||
// string if the flag takes no value at all.
|
// string if the flag takes no value at all.
|
||||||
func (f *Int64SliceFlag) GetValue() string {
|
func (f *Int64SliceFlag) GetValue() string {
|
||||||
if f.Value != nil {
|
var defaultVals []string
|
||||||
return f.Value.String()
|
if f.Value != nil && len(f.Value.Value()) > 0 {
|
||||||
|
for _, i := range f.Value.Value() {
|
||||||
|
defaultVals = append(defaultVals, strconv.FormatInt(i, 10))
|
||||||
}
|
}
|
||||||
return ""
|
}
|
||||||
|
return strings.Join(defaultVals, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDefaultText returns the default text for this flag
|
// GetDefaultText returns the default text for this flag
|
||||||
|
@ -124,6 +127,11 @@ func (f *Int64SliceFlag) GetEnvVars() []string {
|
||||||
return f.EnvVars
|
return f.EnvVars
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
func (f *Int64SliceFlag) IsSliceFlag() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// apply any default
|
// apply any default
|
||||||
|
@ -168,17 +176,6 @@ func (f *Int64SliceFlag) Get(ctx *Context) []int64 {
|
||||||
return ctx.Int64Slice(f.Name)
|
return ctx.Int64Slice(f.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Int64SliceFlag) stringify() string {
|
|
||||||
var defaultVals []string
|
|
||||||
if f.Value != nil && len(f.Value.Value()) > 0 {
|
|
||||||
for _, i := range f.Value.Value() {
|
|
||||||
defaultVals = append(defaultVals, strconv.FormatInt(i, 10))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunAction executes flag action if set
|
// RunAction executes flag action if set
|
||||||
func (f *Int64SliceFlag) RunAction(c *Context) error {
|
func (f *Int64SliceFlag) RunAction(c *Context) error {
|
||||||
if f.Action != nil {
|
if f.Action != nil {
|
||||||
|
|
27
vendor/github.com/urfave/cli/v2/flag_int_slice.go
generated
vendored
27
vendor/github.com/urfave/cli/v2/flag_int_slice.go
generated
vendored
|
@ -95,7 +95,7 @@ func (i *IntSlice) Get() interface{} {
|
||||||
// String returns a readable representation of this value
|
// String returns a readable representation of this value
|
||||||
// (for usage defaults)
|
// (for usage defaults)
|
||||||
func (f *IntSliceFlag) String() string {
|
func (f *IntSliceFlag) String() string {
|
||||||
return withEnvHint(f.GetEnvVars(), f.stringify())
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TakesValue returns true of the flag takes a value, otherwise false
|
// TakesValue returns true of the flag takes a value, otherwise false
|
||||||
|
@ -116,10 +116,13 @@ func (f *IntSliceFlag) GetCategory() string {
|
||||||
// GetValue returns the flags value as string representation and an empty
|
// GetValue returns the flags value as string representation and an empty
|
||||||
// string if the flag takes no value at all.
|
// string if the flag takes no value at all.
|
||||||
func (f *IntSliceFlag) GetValue() string {
|
func (f *IntSliceFlag) GetValue() string {
|
||||||
if f.Value != nil {
|
var defaultVals []string
|
||||||
return f.Value.String()
|
if f.Value != nil && len(f.Value.Value()) > 0 {
|
||||||
|
for _, i := range f.Value.Value() {
|
||||||
|
defaultVals = append(defaultVals, strconv.Itoa(i))
|
||||||
}
|
}
|
||||||
return ""
|
}
|
||||||
|
return strings.Join(defaultVals, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDefaultText returns the default text for this flag
|
// GetDefaultText returns the default text for this flag
|
||||||
|
@ -135,6 +138,11 @@ func (f *IntSliceFlag) GetEnvVars() []string {
|
||||||
return f.EnvVars
|
return f.EnvVars
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
func (f *IntSliceFlag) IsSliceFlag() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// apply any default
|
// apply any default
|
||||||
|
@ -188,17 +196,6 @@ func (f *IntSliceFlag) RunAction(c *Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *IntSliceFlag) stringify() string {
|
|
||||||
var defaultVals []string
|
|
||||||
if f.Value != nil && len(f.Value.Value()) > 0 {
|
|
||||||
for _, i := range f.Value.Value() {
|
|
||||||
defaultVals = append(defaultVals, strconv.Itoa(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntSlice looks up the value of a local IntSliceFlag, returns
|
// IntSlice looks up the value of a local IntSliceFlag, returns
|
||||||
// nil if not found
|
// nil if not found
|
||||||
func (cCtx *Context) IntSlice(name string) []int {
|
func (cCtx *Context) IntSlice(name string) []int {
|
||||||
|
|
31
vendor/github.com/urfave/cli/v2/flag_string_slice.go
generated
vendored
31
vendor/github.com/urfave/cli/v2/flag_string_slice.go
generated
vendored
|
@ -74,7 +74,7 @@ func (s *StringSlice) Get() interface{} {
|
||||||
// String returns a readable representation of this value
|
// String returns a readable representation of this value
|
||||||
// (for usage defaults)
|
// (for usage defaults)
|
||||||
func (f *StringSliceFlag) String() string {
|
func (f *StringSliceFlag) String() string {
|
||||||
return withEnvHint(f.GetEnvVars(), f.stringify())
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TakesValue returns true of the flag takes a value, otherwise false
|
// TakesValue returns true of the flag takes a value, otherwise false
|
||||||
|
@ -95,10 +95,15 @@ func (f *StringSliceFlag) GetCategory() string {
|
||||||
// GetValue returns the flags value as string representation and an empty
|
// GetValue returns the flags value as string representation and an empty
|
||||||
// string if the flag takes no value at all.
|
// string if the flag takes no value at all.
|
||||||
func (f *StringSliceFlag) GetValue() string {
|
func (f *StringSliceFlag) GetValue() string {
|
||||||
if f.Value != nil {
|
var defaultVals []string
|
||||||
return f.Value.String()
|
if f.Value != nil && len(f.Value.Value()) > 0 {
|
||||||
|
for _, s := range f.Value.Value() {
|
||||||
|
if len(s) > 0 {
|
||||||
|
defaultVals = append(defaultVals, strconv.Quote(s))
|
||||||
}
|
}
|
||||||
return ""
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(defaultVals, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDefaultText returns the default text for this flag
|
// GetDefaultText returns the default text for this flag
|
||||||
|
@ -114,6 +119,11 @@ func (f *StringSliceFlag) GetEnvVars() []string {
|
||||||
return f.EnvVars
|
return f.EnvVars
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
func (f *StringSliceFlag) IsSliceFlag() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// apply any default
|
// apply any default
|
||||||
|
@ -158,19 +168,6 @@ func (f *StringSliceFlag) Get(ctx *Context) []string {
|
||||||
return ctx.StringSlice(f.Name)
|
return ctx.StringSlice(f.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *StringSliceFlag) stringify() string {
|
|
||||||
var defaultVals []string
|
|
||||||
if f.Value != nil && len(f.Value.Value()) > 0 {
|
|
||||||
for _, s := range f.Value.Value() {
|
|
||||||
if len(s) > 0 {
|
|
||||||
defaultVals = append(defaultVals, strconv.Quote(s))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunAction executes flag action if set
|
// RunAction executes flag action if set
|
||||||
func (f *StringSliceFlag) RunAction(c *Context) error {
|
func (f *StringSliceFlag) RunAction(c *Context) error {
|
||||||
if f.Action != nil {
|
if f.Action != nil {
|
||||||
|
|
27
vendor/github.com/urfave/cli/v2/flag_uint64_slice.go
generated
vendored
27
vendor/github.com/urfave/cli/v2/flag_uint64_slice.go
generated
vendored
|
@ -88,7 +88,7 @@ func (i *Uint64Slice) Get() interface{} {
|
||||||
// String returns a readable representation of this value
|
// String returns a readable representation of this value
|
||||||
// (for usage defaults)
|
// (for usage defaults)
|
||||||
func (f *Uint64SliceFlag) String() string {
|
func (f *Uint64SliceFlag) String() string {
|
||||||
return withEnvHint(f.GetEnvVars(), f.stringify())
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TakesValue returns true of the flag takes a value, otherwise false
|
// TakesValue returns true of the flag takes a value, otherwise false
|
||||||
|
@ -109,10 +109,13 @@ func (f *Uint64SliceFlag) GetCategory() string {
|
||||||
// GetValue returns the flags value as string representation and an empty
|
// GetValue returns the flags value as string representation and an empty
|
||||||
// string if the flag takes no value at all.
|
// string if the flag takes no value at all.
|
||||||
func (f *Uint64SliceFlag) GetValue() string {
|
func (f *Uint64SliceFlag) GetValue() string {
|
||||||
if f.Value != nil {
|
var defaultVals []string
|
||||||
return f.Value.String()
|
if f.Value != nil && len(f.Value.Value()) > 0 {
|
||||||
|
for _, i := range f.Value.Value() {
|
||||||
|
defaultVals = append(defaultVals, strconv.FormatUint(i, 10))
|
||||||
}
|
}
|
||||||
return ""
|
}
|
||||||
|
return strings.Join(defaultVals, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDefaultText returns the default text for this flag
|
// GetDefaultText returns the default text for this flag
|
||||||
|
@ -128,6 +131,11 @@ func (f *Uint64SliceFlag) GetEnvVars() []string {
|
||||||
return f.EnvVars
|
return f.EnvVars
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
func (f *Uint64SliceFlag) IsSliceFlag() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *Uint64SliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *Uint64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// apply any default
|
// apply any default
|
||||||
|
@ -172,17 +180,6 @@ func (f *Uint64SliceFlag) Get(ctx *Context) []uint64 {
|
||||||
return ctx.Uint64Slice(f.Name)
|
return ctx.Uint64Slice(f.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Uint64SliceFlag) stringify() string {
|
|
||||||
var defaultVals []string
|
|
||||||
if f.Value != nil && len(f.Value.Value()) > 0 {
|
|
||||||
for _, i := range f.Value.Value() {
|
|
||||||
defaultVals = append(defaultVals, strconv.FormatUint(i, 10))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64Slice looks up the value of a local Uint64SliceFlag, returns
|
// Uint64Slice looks up the value of a local Uint64SliceFlag, returns
|
||||||
// nil if not found
|
// nil if not found
|
||||||
func (cCtx *Context) Uint64Slice(name string) []uint64 {
|
func (cCtx *Context) Uint64Slice(name string) []uint64 {
|
||||||
|
|
27
vendor/github.com/urfave/cli/v2/flag_uint_slice.go
generated
vendored
27
vendor/github.com/urfave/cli/v2/flag_uint_slice.go
generated
vendored
|
@ -99,7 +99,7 @@ func (i *UintSlice) Get() interface{} {
|
||||||
// String returns a readable representation of this value
|
// String returns a readable representation of this value
|
||||||
// (for usage defaults)
|
// (for usage defaults)
|
||||||
func (f *UintSliceFlag) String() string {
|
func (f *UintSliceFlag) String() string {
|
||||||
return withEnvHint(f.GetEnvVars(), f.stringify())
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TakesValue returns true of the flag takes a value, otherwise false
|
// TakesValue returns true of the flag takes a value, otherwise false
|
||||||
|
@ -120,10 +120,13 @@ func (f *UintSliceFlag) GetCategory() string {
|
||||||
// GetValue returns the flags value as string representation and an empty
|
// GetValue returns the flags value as string representation and an empty
|
||||||
// string if the flag takes no value at all.
|
// string if the flag takes no value at all.
|
||||||
func (f *UintSliceFlag) GetValue() string {
|
func (f *UintSliceFlag) GetValue() string {
|
||||||
if f.Value != nil {
|
var defaultVals []string
|
||||||
return f.Value.String()
|
if f.Value != nil && len(f.Value.Value()) > 0 {
|
||||||
|
for _, i := range f.Value.Value() {
|
||||||
|
defaultVals = append(defaultVals, strconv.FormatUint(uint64(i), 10))
|
||||||
}
|
}
|
||||||
return ""
|
}
|
||||||
|
return strings.Join(defaultVals, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDefaultText returns the default text for this flag
|
// GetDefaultText returns the default text for this flag
|
||||||
|
@ -139,6 +142,11 @@ func (f *UintSliceFlag) GetEnvVars() []string {
|
||||||
return f.EnvVars
|
return f.EnvVars
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
func (f *UintSliceFlag) IsSliceFlag() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *UintSliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *UintSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// apply any default
|
// apply any default
|
||||||
|
@ -183,17 +191,6 @@ func (f *UintSliceFlag) Get(ctx *Context) []uint {
|
||||||
return ctx.UintSlice(f.Name)
|
return ctx.UintSlice(f.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *UintSliceFlag) stringify() string {
|
|
||||||
var defaultVals []string
|
|
||||||
if f.Value != nil && len(f.Value.Value()) > 0 {
|
|
||||||
for _, i := range f.Value.Value() {
|
|
||||||
defaultVals = append(defaultVals, strconv.FormatUint(uint64(i), 10))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stringifySliceFlag(f.Usage, f.Names(), defaultVals)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UintSlice looks up the value of a local UintSliceFlag, returns
|
// UintSlice looks up the value of a local UintSliceFlag, returns
|
||||||
// nil if not found
|
// nil if not found
|
||||||
func (cCtx *Context) UintSlice(name string) []uint {
|
func (cCtx *Context) UintSlice(name string) []uint {
|
||||||
|
|
117
vendor/github.com/urfave/cli/v2/godoc-current.txt
generated
vendored
117
vendor/github.com/urfave/cli/v2/godoc-current.txt
generated
vendored
|
@ -32,7 +32,7 @@ var (
|
||||||
SuggestDidYouMeanTemplate string = suggestDidYouMeanTemplate
|
SuggestDidYouMeanTemplate string = suggestDidYouMeanTemplate
|
||||||
)
|
)
|
||||||
var AppHelpTemplate = `NAME:
|
var AppHelpTemplate = `NAME:
|
||||||
{{$v := offset .Name 6}}{{wrap .Name 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}
|
{{template "helpNameTemplate" .}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
|
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
|
||||||
|
@ -41,51 +41,39 @@ VERSION:
|
||||||
{{.Version}}{{end}}{{end}}{{if .Description}}
|
{{.Version}}{{end}}{{end}}{{if .Description}}
|
||||||
|
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
{{wrap .Description 3}}{{end}}{{if len .Authors}}
|
{{template "descriptionTemplate" .}}{{end}}
|
||||||
|
{{- if len .Authors}}
|
||||||
|
|
||||||
AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
|
AUTHOR{{template "authorsTemplate" .}}{{end}}{{if .VisibleCommands}}
|
||||||
{{range $index, $author := .Authors}}{{if $index}}
|
|
||||||
{{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}
|
|
||||||
|
|
||||||
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
|
COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
|
||||||
{{.Name}}:{{range .VisibleCommands}}
|
|
||||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{ $cv := offsetCommands .VisibleCommands 5}}{{range .VisibleCommands}}
|
|
||||||
{{$s := join .Names ", "}}{{$s}}{{ $sp := subtract $cv (offset $s 3) }}{{ indent $sp ""}}{{wrap .Usage $cv}}{{end}}{{end}}{{end}}{{end}}{{if .VisibleFlagCategories}}
|
|
||||||
|
|
||||||
GLOBAL OPTIONS:{{range .VisibleFlagCategories}}
|
GLOBAL OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}}
|
||||||
{{if .Name}}{{.Name}}
|
|
||||||
{{end}}{{range .Flags}}{{.}}
|
|
||||||
{{end}}{{end}}{{else}}{{if .VisibleFlags}}
|
|
||||||
|
|
||||||
GLOBAL OPTIONS:
|
GLOBAL OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}{{if .Copyright}}
|
||||||
{{range $index, $option := .VisibleFlags}}{{if $index}}
|
|
||||||
{{end}}{{wrap $option.String 6}}{{end}}{{end}}{{end}}{{if .Copyright}}
|
|
||||||
|
|
||||||
COPYRIGHT:
|
COPYRIGHT:
|
||||||
{{wrap .Copyright 3}}{{end}}
|
{{template "copyrightTemplate" .}}{{end}}
|
||||||
`
|
`
|
||||||
AppHelpTemplate is the text template for the Default help topic. cli.go
|
AppHelpTemplate is the text template for the Default help topic. cli.go
|
||||||
uses text/template to render templates. You can render custom help text by
|
uses text/template to render templates. You can render custom help text by
|
||||||
setting this variable.
|
setting this variable.
|
||||||
|
|
||||||
var CommandHelpTemplate = `NAME:
|
var CommandHelpTemplate = `NAME:
|
||||||
{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}
|
{{template "helpNameTemplate" .}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
|
{{template "usageTemplate" .}}{{if .Category}}
|
||||||
|
|
||||||
CATEGORY:
|
CATEGORY:
|
||||||
{{.Category}}{{end}}{{if .Description}}
|
{{.Category}}{{end}}{{if .Description}}
|
||||||
|
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
{{wrap .Description 3}}{{end}}{{if .VisibleFlagCategories}}
|
{{template "descriptionTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
|
||||||
|
|
||||||
OPTIONS:{{range .VisibleFlagCategories}}
|
OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}}
|
||||||
{{if .Name}}{{.Name}}
|
|
||||||
{{end}}{{range .Flags}}{{.}}{{end}}{{end}}{{else}}{{if .VisibleFlags}}
|
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}
|
||||||
{{range .VisibleFlags}}{{.}}{{end}}{{end}}{{end}}
|
|
||||||
`
|
`
|
||||||
CommandHelpTemplate is the text template for the command help topic. cli.go
|
CommandHelpTemplate is the text template for the command help topic. cli.go
|
||||||
uses text/template to render templates. You can render custom help text by
|
uses text/template to render templates. You can render custom help text by
|
||||||
|
@ -145,21 +133,19 @@ var OsExiter = os.Exit
|
||||||
os.Exit.
|
os.Exit.
|
||||||
|
|
||||||
var SubcommandHelpTemplate = `NAME:
|
var SubcommandHelpTemplate = `NAME:
|
||||||
{{.HelpName}} - {{.Usage}}
|
{{template "helpNameTemplate" .}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
|
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
|
||||||
|
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
{{wrap .Description 3}}{{end}}
|
{{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
|
||||||
|
|
||||||
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
|
COMMANDS:{{template "visibleCommandTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
|
||||||
{{.Name}}:{{range .VisibleCommands}}
|
|
||||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{ $cv := offsetCommands .VisibleCommands 5}}{{range .VisibleCommands}}
|
|
||||||
{{$s := join .Names ", "}}{{$s}}{{ $sp := subtract $cv (offset $s 3) }}{{ indent $sp ""}}{{wrap .Usage $cv}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
|
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}}
|
||||||
{{range .VisibleFlags}}{{.}}{{end}}{{end}}
|
|
||||||
|
OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}
|
||||||
`
|
`
|
||||||
SubcommandHelpTemplate is the text template for the subcommand help topic.
|
SubcommandHelpTemplate is the text template for the subcommand help topic.
|
||||||
cli.go uses text/template to render templates. You can render custom help
|
cli.go uses text/template to render templates. You can render custom help
|
||||||
|
@ -458,6 +444,8 @@ type BoolFlag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
Count *int
|
Count *int
|
||||||
|
|
||||||
|
Action func(*Context, bool) error
|
||||||
}
|
}
|
||||||
BoolFlag is a flag with type bool
|
BoolFlag is a flag with type bool
|
||||||
|
|
||||||
|
@ -565,7 +553,6 @@ type Command struct {
|
||||||
// cli.go uses text/template to render templates. You can
|
// cli.go uses text/template to render templates. You can
|
||||||
// render custom help text by setting this variable.
|
// render custom help text by setting this variable.
|
||||||
CustomHelpTemplate string
|
CustomHelpTemplate string
|
||||||
|
|
||||||
// Has unexported fields.
|
// Has unexported fields.
|
||||||
}
|
}
|
||||||
Command is a subcommand for a cli.App.
|
Command is a subcommand for a cli.App.
|
||||||
|
@ -584,10 +571,6 @@ func (c *Command) Run(ctx *Context) (err error)
|
||||||
Run invokes the command given the context, parses ctx.Args() to generate
|
Run invokes the command given the context, parses ctx.Args() to generate
|
||||||
command-specific flags
|
command-specific flags
|
||||||
|
|
||||||
func (c *Command) VisibleCategories() []CommandCategory
|
|
||||||
VisibleCategories returns a slice of categories and commands that are
|
|
||||||
Hidden=false
|
|
||||||
|
|
||||||
func (c *Command) VisibleCommands() []*Command
|
func (c *Command) VisibleCommands() []*Command
|
||||||
VisibleCommands returns a slice of the Commands with Hidden=false
|
VisibleCommands returns a slice of the Commands with Hidden=false
|
||||||
|
|
||||||
|
@ -759,6 +742,14 @@ type DocGenerationFlag interface {
|
||||||
DocGenerationFlag is an interface that allows documentation generation for
|
DocGenerationFlag is an interface that allows documentation generation for
|
||||||
the flag
|
the flag
|
||||||
|
|
||||||
|
type DocGenerationSliceFlag interface {
|
||||||
|
DocGenerationFlag
|
||||||
|
|
||||||
|
// IsSliceFlag returns true for flags that can be given multiple times.
|
||||||
|
IsSliceFlag() bool
|
||||||
|
}
|
||||||
|
DocGenerationSliceFlag extends DocGenerationFlag for slice-based flags.
|
||||||
|
|
||||||
type DurationFlag struct {
|
type DurationFlag struct {
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
|
@ -776,6 +767,8 @@ type DurationFlag struct {
|
||||||
|
|
||||||
Aliases []string
|
Aliases []string
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
|
Action func(*Context, time.Duration) error
|
||||||
}
|
}
|
||||||
DurationFlag is a flag with type time.Duration
|
DurationFlag is a flag with type time.Duration
|
||||||
|
|
||||||
|
@ -952,6 +945,8 @@ type Float64Flag struct {
|
||||||
|
|
||||||
Aliases []string
|
Aliases []string
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
|
Action func(*Context, float64) error
|
||||||
}
|
}
|
||||||
Float64Flag is a flag with type float64
|
Float64Flag is a flag with type float64
|
||||||
|
|
||||||
|
@ -1038,6 +1033,8 @@ type Float64SliceFlag struct {
|
||||||
|
|
||||||
Aliases []string
|
Aliases []string
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
|
Action func(*Context, []float64) error
|
||||||
}
|
}
|
||||||
Float64SliceFlag is a flag with type *Float64Slice
|
Float64SliceFlag is a flag with type *Float64Slice
|
||||||
|
|
||||||
|
@ -1071,6 +1068,9 @@ func (f *Float64SliceFlag) IsRequired() bool
|
||||||
func (f *Float64SliceFlag) IsSet() bool
|
func (f *Float64SliceFlag) IsSet() bool
|
||||||
IsSet returns whether or not the flag has been set through env or file
|
IsSet returns whether or not the flag has been set through env or file
|
||||||
|
|
||||||
|
func (f *Float64SliceFlag) IsSliceFlag() bool
|
||||||
|
IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
|
||||||
func (f *Float64SliceFlag) IsVisible() bool
|
func (f *Float64SliceFlag) IsVisible() bool
|
||||||
IsVisible returns true if the flag is not hidden, otherwise false
|
IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
|
||||||
|
@ -1115,6 +1115,8 @@ type GenericFlag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
TakesFile bool
|
TakesFile bool
|
||||||
|
|
||||||
|
Action func(*Context, interface{}) error
|
||||||
}
|
}
|
||||||
GenericFlag is a flag with type Generic
|
GenericFlag is a flag with type Generic
|
||||||
|
|
||||||
|
@ -1181,6 +1183,8 @@ type Int64Flag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
Base int
|
Base int
|
||||||
|
|
||||||
|
Action func(*Context, int64) error
|
||||||
}
|
}
|
||||||
Int64Flag is a flag with type int64
|
Int64Flag is a flag with type int64
|
||||||
|
|
||||||
|
@ -1267,6 +1271,8 @@ type Int64SliceFlag struct {
|
||||||
|
|
||||||
Aliases []string
|
Aliases []string
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
|
Action func(*Context, []int64) error
|
||||||
}
|
}
|
||||||
Int64SliceFlag is a flag with type *Int64Slice
|
Int64SliceFlag is a flag with type *Int64Slice
|
||||||
|
|
||||||
|
@ -1300,6 +1306,9 @@ func (f *Int64SliceFlag) IsRequired() bool
|
||||||
func (f *Int64SliceFlag) IsSet() bool
|
func (f *Int64SliceFlag) IsSet() bool
|
||||||
IsSet returns whether or not the flag has been set through env or file
|
IsSet returns whether or not the flag has been set through env or file
|
||||||
|
|
||||||
|
func (f *Int64SliceFlag) IsSliceFlag() bool
|
||||||
|
IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
|
||||||
func (f *Int64SliceFlag) IsVisible() bool
|
func (f *Int64SliceFlag) IsVisible() bool
|
||||||
IsVisible returns true if the flag is not hidden, otherwise false
|
IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
|
||||||
|
@ -1338,6 +1347,8 @@ type IntFlag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
Base int
|
Base int
|
||||||
|
|
||||||
|
Action func(*Context, int) error
|
||||||
}
|
}
|
||||||
IntFlag is a flag with type int
|
IntFlag is a flag with type int
|
||||||
|
|
||||||
|
@ -1428,6 +1439,8 @@ type IntSliceFlag struct {
|
||||||
|
|
||||||
Aliases []string
|
Aliases []string
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
|
Action func(*Context, []int) error
|
||||||
}
|
}
|
||||||
IntSliceFlag is a flag with type *IntSlice
|
IntSliceFlag is a flag with type *IntSlice
|
||||||
|
|
||||||
|
@ -1461,6 +1474,9 @@ func (f *IntSliceFlag) IsRequired() bool
|
||||||
func (f *IntSliceFlag) IsSet() bool
|
func (f *IntSliceFlag) IsSet() bool
|
||||||
IsSet returns whether or not the flag has been set through env or file
|
IsSet returns whether or not the flag has been set through env or file
|
||||||
|
|
||||||
|
func (f *IntSliceFlag) IsSliceFlag() bool
|
||||||
|
IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
|
||||||
func (f *IntSliceFlag) IsVisible() bool
|
func (f *IntSliceFlag) IsVisible() bool
|
||||||
IsVisible returns true if the flag is not hidden, otherwise false
|
IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
|
||||||
|
@ -1533,6 +1549,8 @@ type PathFlag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
TakesFile bool
|
TakesFile bool
|
||||||
|
|
||||||
|
Action func(*Context, Path) error
|
||||||
}
|
}
|
||||||
PathFlag is a flag with type Path
|
PathFlag is a flag with type Path
|
||||||
|
|
||||||
|
@ -1673,6 +1691,8 @@ type StringFlag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
TakesFile bool
|
TakesFile bool
|
||||||
|
|
||||||
|
Action func(*Context, string) error
|
||||||
}
|
}
|
||||||
StringFlag is a flag with type string
|
StringFlag is a flag with type string
|
||||||
|
|
||||||
|
@ -1761,6 +1781,8 @@ type StringSliceFlag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
TakesFile bool
|
TakesFile bool
|
||||||
|
|
||||||
|
Action func(*Context, []string) error
|
||||||
}
|
}
|
||||||
StringSliceFlag is a flag with type *StringSlice
|
StringSliceFlag is a flag with type *StringSlice
|
||||||
|
|
||||||
|
@ -1794,6 +1816,9 @@ func (f *StringSliceFlag) IsRequired() bool
|
||||||
func (f *StringSliceFlag) IsSet() bool
|
func (f *StringSliceFlag) IsSet() bool
|
||||||
IsSet returns whether or not the flag has been set through env or file
|
IsSet returns whether or not the flag has been set through env or file
|
||||||
|
|
||||||
|
func (f *StringSliceFlag) IsSliceFlag() bool
|
||||||
|
IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
|
||||||
func (f *StringSliceFlag) IsVisible() bool
|
func (f *StringSliceFlag) IsVisible() bool
|
||||||
IsVisible returns true if the flag is not hidden, otherwise false
|
IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
|
||||||
|
@ -1867,6 +1892,8 @@ type TimestampFlag struct {
|
||||||
Layout string
|
Layout string
|
||||||
|
|
||||||
Timezone *time.Location
|
Timezone *time.Location
|
||||||
|
|
||||||
|
Action func(*Context, *time.Time) error
|
||||||
}
|
}
|
||||||
TimestampFlag is a flag with type *Timestamp
|
TimestampFlag is a flag with type *Timestamp
|
||||||
|
|
||||||
|
@ -1932,6 +1959,8 @@ type Uint64Flag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
Base int
|
Base int
|
||||||
|
|
||||||
|
Action func(*Context, uint64) error
|
||||||
}
|
}
|
||||||
Uint64Flag is a flag with type uint64
|
Uint64Flag is a flag with type uint64
|
||||||
|
|
||||||
|
@ -2018,6 +2047,8 @@ type Uint64SliceFlag struct {
|
||||||
|
|
||||||
Aliases []string
|
Aliases []string
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
|
Action func(*Context, []uint64) error
|
||||||
}
|
}
|
||||||
Uint64SliceFlag is a flag with type *Uint64Slice
|
Uint64SliceFlag is a flag with type *Uint64Slice
|
||||||
|
|
||||||
|
@ -2049,6 +2080,9 @@ func (f *Uint64SliceFlag) IsRequired() bool
|
||||||
func (f *Uint64SliceFlag) IsSet() bool
|
func (f *Uint64SliceFlag) IsSet() bool
|
||||||
IsSet returns whether or not the flag has been set through env or file
|
IsSet returns whether or not the flag has been set through env or file
|
||||||
|
|
||||||
|
func (f *Uint64SliceFlag) IsSliceFlag() bool
|
||||||
|
IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
|
||||||
func (f *Uint64SliceFlag) IsVisible() bool
|
func (f *Uint64SliceFlag) IsVisible() bool
|
||||||
IsVisible returns true if the flag is not hidden, otherwise false
|
IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
|
||||||
|
@ -2080,6 +2114,8 @@ type UintFlag struct {
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
Base int
|
Base int
|
||||||
|
|
||||||
|
Action func(*Context, uint) error
|
||||||
}
|
}
|
||||||
UintFlag is a flag with type uint
|
UintFlag is a flag with type uint
|
||||||
|
|
||||||
|
@ -2170,6 +2206,8 @@ type UintSliceFlag struct {
|
||||||
|
|
||||||
Aliases []string
|
Aliases []string
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
|
|
||||||
|
Action func(*Context, []uint) error
|
||||||
}
|
}
|
||||||
UintSliceFlag is a flag with type *UintSlice
|
UintSliceFlag is a flag with type *UintSlice
|
||||||
|
|
||||||
|
@ -2201,6 +2239,9 @@ func (f *UintSliceFlag) IsRequired() bool
|
||||||
func (f *UintSliceFlag) IsSet() bool
|
func (f *UintSliceFlag) IsSet() bool
|
||||||
IsSet returns whether or not the flag has been set through env or file
|
IsSet returns whether or not the flag has been set through env or file
|
||||||
|
|
||||||
|
func (f *UintSliceFlag) IsSliceFlag() bool
|
||||||
|
IsSliceFlag implements DocGenerationSliceFlag.
|
||||||
|
|
||||||
func (f *UintSliceFlag) IsVisible() bool
|
func (f *UintSliceFlag) IsVisible() bool
|
||||||
IsVisible returns true if the flag is not hidden, otherwise false
|
IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
|
||||||
|
|
30
vendor/github.com/urfave/cli/v2/help.go
generated
vendored
30
vendor/github.com/urfave/cli/v2/help.go
generated
vendored
|
@ -358,6 +358,17 @@ func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs
|
||||||
|
|
||||||
w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
|
w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
|
||||||
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
|
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
|
||||||
|
t.New("helpNameTemplate").Parse(helpNameTemplate)
|
||||||
|
t.New("usageTemplate").Parse(usageTemplate)
|
||||||
|
t.New("descriptionTemplate").Parse(descriptionTemplate)
|
||||||
|
t.New("visibleCommandTemplate").Parse(visibleCommandTemplate)
|
||||||
|
t.New("copyrightTemplate").Parse(copyrightTemplate)
|
||||||
|
t.New("versionTemplate").Parse(versionTemplate)
|
||||||
|
t.New("visibleFlagCategoryTemplate").Parse(visibleFlagCategoryTemplate)
|
||||||
|
t.New("visibleFlagTemplate").Parse(visibleFlagTemplate)
|
||||||
|
t.New("visibleGlobalFlagCategoryTemplate").Parse(strings.Replace(visibleFlagCategoryTemplate, "OPTIONS", "GLOBAL OPTIONS", -1))
|
||||||
|
t.New("authorsTemplate").Parse(authorsTemplate)
|
||||||
|
t.New("visibleCommandCategoryTemplate").Parse(visibleCommandCategoryTemplate)
|
||||||
|
|
||||||
err := t.Execute(w, data)
|
err := t.Execute(w, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -468,25 +479,28 @@ func nindent(spaces int, v string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func wrap(input string, offset int, wrapAt int) string {
|
func wrap(input string, offset int, wrapAt int) string {
|
||||||
var sb strings.Builder
|
var ss []string
|
||||||
|
|
||||||
lines := strings.Split(input, "\n")
|
lines := strings.Split(input, "\n")
|
||||||
|
|
||||||
padding := strings.Repeat(" ", offset)
|
padding := strings.Repeat(" ", offset)
|
||||||
|
|
||||||
for i, line := range lines {
|
for i, line := range lines {
|
||||||
if i != 0 {
|
if line == "" {
|
||||||
sb.WriteString(padding)
|
ss = append(ss, line)
|
||||||
|
} else {
|
||||||
|
wrapped := wrapLine(line, offset, wrapAt, padding)
|
||||||
|
if i == 0 {
|
||||||
|
ss = append(ss, wrapped)
|
||||||
|
} else {
|
||||||
|
ss = append(ss, padding+wrapped)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sb.WriteString(wrapLine(line, offset, wrapAt, padding))
|
|
||||||
|
|
||||||
if i != len(lines)-1 {
|
|
||||||
sb.WriteString("\n")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return sb.String()
|
return strings.Join(ss, "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func wrapLine(input string, offset int, wrapAt int, padding string) string {
|
func wrapLine(input string, offset int, wrapAt int, padding string) string {
|
||||||
|
|
12
vendor/github.com/urfave/cli/v2/parse.go
generated
vendored
12
vendor/github.com/urfave/cli/v2/parse.go
generated
vendored
|
@ -46,7 +46,10 @@ func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComple
|
||||||
}
|
}
|
||||||
|
|
||||||
// swap current argument with the split version
|
// swap current argument with the split version
|
||||||
args = append(args[:i], append(shortOpts, args[i+1:]...)...)
|
// do not include args that parsed correctly so far as it would
|
||||||
|
// trigger Value.Set() on those args and would result in
|
||||||
|
// duplicates for slice type flags
|
||||||
|
args = append(shortOpts, args[i+1:]...)
|
||||||
argsWereSplit = true
|
argsWereSplit = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -56,13 +59,6 @@ func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComple
|
||||||
if !argsWereSplit {
|
if !argsWereSplit {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since custom parsing failed, replace the flag set before retrying
|
|
||||||
newSet, err := ip.newFlagSet()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*set = *newSet
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
80
vendor/github.com/urfave/cli/v2/template.go
generated
vendored
80
vendor/github.com/urfave/cli/v2/template.go
generated
vendored
|
@ -1,10 +1,38 @@
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
|
var helpNameTemplate = `{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}`
|
||||||
|
var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}`
|
||||||
|
var descriptionTemplate = `{{wrap .Description 3}}`
|
||||||
|
var authorsTemplate = `{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
|
||||||
|
{{range $index, $author := .Authors}}{{if $index}}
|
||||||
|
{{end}}{{$author}}{{end}}`
|
||||||
|
var visibleCommandTemplate = `{{ $cv := offsetCommands .VisibleCommands 5}}{{range .VisibleCommands}}
|
||||||
|
{{$s := join .Names ", "}}{{$s}}{{ $sp := subtract $cv (offset $s 3) }}{{ indent $sp ""}}{{wrap .Usage $cv}}{{end}}`
|
||||||
|
var visibleCommandCategoryTemplate = `{{range .VisibleCategories}}{{if .Name}}
|
||||||
|
{{.Name}}:{{range .VisibleCommands}}
|
||||||
|
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{template "visibleCommandTemplate" .}}{{end}}{{end}}`
|
||||||
|
var visibleFlagCategoryTemplate = `{{range .VisibleFlagCategories}}
|
||||||
|
{{if .Name}}{{.Name}}
|
||||||
|
|
||||||
|
{{end}}{{$flglen := len .Flags}}{{range $i, $e := .Flags}}{{if eq (subtract $flglen $i) 1}}{{$e}}
|
||||||
|
{{else}}{{$e}}
|
||||||
|
{{end}}{{end}}{{end}}`
|
||||||
|
|
||||||
|
var visibleFlagTemplate = `{{range $i, $e := .VisibleFlags}}
|
||||||
|
{{wrap $e.String 6}}{{end}}`
|
||||||
|
|
||||||
|
var versionTemplate = `{{if .Version}}{{if not .HideVersion}}
|
||||||
|
|
||||||
|
VERSION:
|
||||||
|
{{.Version}}{{end}}{{end}}`
|
||||||
|
|
||||||
|
var copyrightTemplate = `{{wrap .Copyright 3}}`
|
||||||
|
|
||||||
// AppHelpTemplate is the text template for the Default help topic.
|
// AppHelpTemplate is the text template for the Default help topic.
|
||||||
// cli.go uses text/template to render templates. You can
|
// cli.go uses text/template to render templates. You can
|
||||||
// render custom help text by setting this variable.
|
// render custom help text by setting this variable.
|
||||||
var AppHelpTemplate = `NAME:
|
var AppHelpTemplate = `NAME:
|
||||||
{{$v := offset .Name 6}}{{wrap .Name 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}
|
{{template "helpNameTemplate" .}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
|
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
|
||||||
|
@ -13,72 +41,58 @@ VERSION:
|
||||||
{{.Version}}{{end}}{{end}}{{if .Description}}
|
{{.Version}}{{end}}{{end}}{{if .Description}}
|
||||||
|
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
{{wrap .Description 3}}{{end}}{{if len .Authors}}
|
{{template "descriptionTemplate" .}}{{end}}
|
||||||
|
{{- if len .Authors}}
|
||||||
|
|
||||||
AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
|
AUTHOR{{template "authorsTemplate" .}}{{end}}{{if .VisibleCommands}}
|
||||||
{{range $index, $author := .Authors}}{{if $index}}
|
|
||||||
{{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}
|
|
||||||
|
|
||||||
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
|
COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
|
||||||
{{.Name}}:{{range .VisibleCommands}}
|
|
||||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{ $cv := offsetCommands .VisibleCommands 5}}{{range .VisibleCommands}}
|
|
||||||
{{$s := join .Names ", "}}{{$s}}{{ $sp := subtract $cv (offset $s 3) }}{{ indent $sp ""}}{{wrap .Usage $cv}}{{end}}{{end}}{{end}}{{end}}{{if .VisibleFlagCategories}}
|
|
||||||
|
|
||||||
GLOBAL OPTIONS:{{range .VisibleFlagCategories}}
|
GLOBAL OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}}
|
||||||
{{if .Name}}{{.Name}}
|
|
||||||
{{end}}{{range .Flags}}{{.}}
|
|
||||||
{{end}}{{end}}{{else}}{{if .VisibleFlags}}
|
|
||||||
|
|
||||||
GLOBAL OPTIONS:
|
GLOBAL OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}{{if .Copyright}}
|
||||||
{{range $index, $option := .VisibleFlags}}{{if $index}}
|
|
||||||
{{end}}{{wrap $option.String 6}}{{end}}{{end}}{{end}}{{if .Copyright}}
|
|
||||||
|
|
||||||
COPYRIGHT:
|
COPYRIGHT:
|
||||||
{{wrap .Copyright 3}}{{end}}
|
{{template "copyrightTemplate" .}}{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
// CommandHelpTemplate is the text template for the command help topic.
|
// CommandHelpTemplate is the text template for the command help topic.
|
||||||
// cli.go uses text/template to render templates. You can
|
// cli.go uses text/template to render templates. You can
|
||||||
// render custom help text by setting this variable.
|
// render custom help text by setting this variable.
|
||||||
var CommandHelpTemplate = `NAME:
|
var CommandHelpTemplate = `NAME:
|
||||||
{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}
|
{{template "helpNameTemplate" .}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
|
{{template "usageTemplate" .}}{{if .Category}}
|
||||||
|
|
||||||
CATEGORY:
|
CATEGORY:
|
||||||
{{.Category}}{{end}}{{if .Description}}
|
{{.Category}}{{end}}{{if .Description}}
|
||||||
|
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
{{wrap .Description 3}}{{end}}{{if .VisibleFlagCategories}}
|
{{template "descriptionTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
|
||||||
|
|
||||||
OPTIONS:{{range .VisibleFlagCategories}}
|
OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}}
|
||||||
{{if .Name}}{{.Name}}
|
|
||||||
{{end}}{{range .Flags}}{{.}}{{end}}{{end}}{{else}}{{if .VisibleFlags}}
|
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}
|
||||||
{{range .VisibleFlags}}{{.}}{{end}}{{end}}{{end}}
|
|
||||||
`
|
`
|
||||||
|
|
||||||
// SubcommandHelpTemplate is the text template for the subcommand help topic.
|
// SubcommandHelpTemplate is the text template for the subcommand help topic.
|
||||||
// cli.go uses text/template to render templates. You can
|
// cli.go uses text/template to render templates. You can
|
||||||
// render custom help text by setting this variable.
|
// render custom help text by setting this variable.
|
||||||
var SubcommandHelpTemplate = `NAME:
|
var SubcommandHelpTemplate = `NAME:
|
||||||
{{.HelpName}} - {{.Usage}}
|
{{template "helpNameTemplate" .}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
|
{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
|
||||||
|
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
{{wrap .Description 3}}{{end}}
|
{{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
|
||||||
|
|
||||||
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
|
COMMANDS:{{template "visibleCommandTemplate" .}}{{end}}{{if .VisibleFlagCategories}}
|
||||||
{{.Name}}:{{range .VisibleCommands}}
|
|
||||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{ $cv := offsetCommands .VisibleCommands 5}}{{range .VisibleCommands}}
|
|
||||||
{{$s := join .Names ", "}}{{$s}}{{ $sp := subtract $cv (offset $s 3) }}{{ indent $sp ""}}{{wrap .Usage $cv}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
|
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}}
|
||||||
{{range .VisibleFlags}}{{.}}{{end}}{{end}}
|
|
||||||
|
OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
var MarkdownDocTemplate = `{{if gt .SectionNum 0}}% {{ .App.Name }} {{ .SectionNum }}
|
var MarkdownDocTemplate = `{{if gt .SectionNum 0}}% {{ .App.Name }} {{ .SectionNum }}
|
||||||
|
|
31
vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
generated
vendored
Normal file
31
vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
// Copyright 2022 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build (darwin || freebsd || netbsd || openbsd) && gc
|
||||||
|
// +build darwin freebsd netbsd openbsd
|
||||||
|
// +build gc
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
//
|
||||||
|
// System call support for ppc64, BSD
|
||||||
|
//
|
||||||
|
|
||||||
|
// Just jump to package syscall's implementation for all these functions.
|
||||||
|
// The runtime may know about them.
|
||||||
|
|
||||||
|
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||||
|
JMP syscall·Syscall(SB)
|
||||||
|
|
||||||
|
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||||
|
JMP syscall·Syscall6(SB)
|
||||||
|
|
||||||
|
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||||
|
JMP syscall·Syscall9(SB)
|
||||||
|
|
||||||
|
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||||
|
JMP syscall·RawSyscall(SB)
|
||||||
|
|
||||||
|
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||||
|
JMP syscall·RawSyscall6(SB)
|
4
vendor/golang.org/x/sys/unix/dirent.go
generated
vendored
4
vendor/golang.org/x/sys/unix/dirent.go
generated
vendored
|
@ -2,8 +2,8 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
|
||||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
|
|
18
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
18
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
|
@ -182,6 +182,24 @@ openbsd_mips64)
|
||||||
# API consistent across platforms.
|
# API consistent across platforms.
|
||||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
||||||
;;
|
;;
|
||||||
|
openbsd_ppc64)
|
||||||
|
mkasm="go run mkasm.go"
|
||||||
|
mkerrors="$mkerrors -m64"
|
||||||
|
mksyscall="go run mksyscall.go -openbsd -libc"
|
||||||
|
mksysctl="go run mksysctl_openbsd.go"
|
||||||
|
# Let the type of C char be signed for making the bare syscall
|
||||||
|
# API consistent across platforms.
|
||||||
|
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
||||||
|
;;
|
||||||
|
openbsd_riscv64)
|
||||||
|
mkasm="go run mkasm.go"
|
||||||
|
mkerrors="$mkerrors -m64"
|
||||||
|
mksyscall="go run mksyscall.go -openbsd -libc"
|
||||||
|
mksysctl="go run mksysctl_openbsd.go"
|
||||||
|
# Let the type of C char be signed for making the bare syscall
|
||||||
|
# API consistent across platforms.
|
||||||
|
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
||||||
|
;;
|
||||||
solaris_amd64)
|
solaris_amd64)
|
||||||
mksyscall="go run mksyscall_solaris.go"
|
mksyscall="go run mksyscall_solaris.go"
|
||||||
mkerrors="$mkerrors -m64"
|
mkerrors="$mkerrors -m64"
|
||||||
|
|
106
vendor/golang.org/x/sys/unix/syscall_illumos.go
generated
vendored
106
vendor/golang.org/x/sys/unix/syscall_illumos.go
generated
vendored
|
@ -10,8 +10,6 @@
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -79,107 +77,3 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error)
|
|
||||||
|
|
||||||
func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) {
|
|
||||||
var clp, datap *strbuf
|
|
||||||
if len(cl) > 0 {
|
|
||||||
clp = &strbuf{
|
|
||||||
Len: int32(len(cl)),
|
|
||||||
Buf: (*int8)(unsafe.Pointer(&cl[0])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(data) > 0 {
|
|
||||||
datap = &strbuf{
|
|
||||||
Len: int32(len(data)),
|
|
||||||
Buf: (*int8)(unsafe.Pointer(&data[0])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return putmsg(fd, clp, datap, flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error)
|
|
||||||
|
|
||||||
func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) {
|
|
||||||
var clp, datap *strbuf
|
|
||||||
if len(cl) > 0 {
|
|
||||||
clp = &strbuf{
|
|
||||||
Maxlen: int32(len(cl)),
|
|
||||||
Buf: (*int8)(unsafe.Pointer(&cl[0])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(data) > 0 {
|
|
||||||
datap = &strbuf{
|
|
||||||
Maxlen: int32(len(data)),
|
|
||||||
Buf: (*int8)(unsafe.Pointer(&data[0])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = getmsg(fd, clp, datap, &flags); err != nil {
|
|
||||||
return nil, nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cl) > 0 {
|
|
||||||
retCl = cl[:clp.Len]
|
|
||||||
}
|
|
||||||
if len(data) > 0 {
|
|
||||||
retData = data[:datap.Len]
|
|
||||||
}
|
|
||||||
return retCl, retData, flags, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) {
|
|
||||||
return ioctlRet(fd, req, uintptr(arg))
|
|
||||||
}
|
|
||||||
|
|
||||||
func IoctlSetString(fd int, req uint, val string) error {
|
|
||||||
bs := make([]byte, len(val)+1)
|
|
||||||
copy(bs[:len(bs)-1], val)
|
|
||||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0])))
|
|
||||||
runtime.KeepAlive(&bs[0])
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lifreq Helpers
|
|
||||||
|
|
||||||
func (l *Lifreq) SetName(name string) error {
|
|
||||||
if len(name) >= len(l.Name) {
|
|
||||||
return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1)
|
|
||||||
}
|
|
||||||
for i := range name {
|
|
||||||
l.Name[i] = int8(name[i])
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Lifreq) SetLifruInt(d int) {
|
|
||||||
*(*int)(unsafe.Pointer(&l.Lifru[0])) = d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Lifreq) GetLifruInt() int {
|
|
||||||
return *(*int)(unsafe.Pointer(&l.Lifru[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Lifreq) SetLifruUint(d uint) {
|
|
||||||
*(*uint)(unsafe.Pointer(&l.Lifru[0])) = d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Lifreq) GetLifruUint() uint {
|
|
||||||
return *(*uint)(unsafe.Pointer(&l.Lifru[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func IoctlLifreq(fd int, req uint, l *Lifreq) error {
|
|
||||||
return ioctl(fd, req, uintptr(unsafe.Pointer(l)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Strioctl Helpers
|
|
||||||
|
|
||||||
func (s *Strioctl) SetInt(i int) {
|
|
||||||
s.Len = int32(unsafe.Sizeof(i))
|
|
||||||
s.Dp = (*int8)(unsafe.Pointer(&i))
|
|
||||||
}
|
|
||||||
|
|
||||||
func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) {
|
|
||||||
return ioctlRet(fd, req, uintptr(unsafe.Pointer(s)))
|
|
||||||
}
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
generated
vendored
|
@ -2,8 +2,8 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
//go:build (openbsd && 386) || (openbsd && amd64) || (openbsd && arm) || (openbsd && arm64)
|
//go:build openbsd && !mips64
|
||||||
// +build openbsd,386 openbsd,amd64 openbsd,arm openbsd,arm64
|
// +build openbsd,!mips64
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
|
|
42
vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go
generated
vendored
Normal file
42
vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build ppc64 && openbsd
|
||||||
|
// +build ppc64,openbsd
|
||||||
|
|
||||||
|
package unix
|
||||||
|
|
||||||
|
func setTimespec(sec, nsec int64) Timespec {
|
||||||
|
return Timespec{Sec: sec, Nsec: nsec}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setTimeval(sec, usec int64) Timeval {
|
||||||
|
return Timeval{Sec: sec, Usec: usec}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetKevent(k *Kevent_t, fd, mode, flags int) {
|
||||||
|
k.Ident = uint64(fd)
|
||||||
|
k.Filter = int16(mode)
|
||||||
|
k.Flags = uint16(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iov *Iovec) SetLen(length int) {
|
||||||
|
iov.Len = uint64(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msghdr *Msghdr) SetControllen(length int) {
|
||||||
|
msghdr.Controllen = uint32(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msghdr *Msghdr) SetIovlen(length int) {
|
||||||
|
msghdr.Iovlen = uint32(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmsg *Cmsghdr) SetLen(length int) {
|
||||||
|
cmsg.Len = uint32(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
|
||||||
|
// of openbsd/ppc64 the syscall is called sysctl instead of __sysctl.
|
||||||
|
const SYS___SYSCTL = SYS_SYSCTL
|
42
vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go
generated
vendored
Normal file
42
vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build riscv64 && openbsd
|
||||||
|
// +build riscv64,openbsd
|
||||||
|
|
||||||
|
package unix
|
||||||
|
|
||||||
|
func setTimespec(sec, nsec int64) Timespec {
|
||||||
|
return Timespec{Sec: sec, Nsec: nsec}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setTimeval(sec, usec int64) Timeval {
|
||||||
|
return Timeval{Sec: sec, Usec: usec}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetKevent(k *Kevent_t, fd, mode, flags int) {
|
||||||
|
k.Ident = uint64(fd)
|
||||||
|
k.Filter = int16(mode)
|
||||||
|
k.Flags = uint16(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iov *Iovec) SetLen(length int) {
|
||||||
|
iov.Len = uint64(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msghdr *Msghdr) SetControllen(length int) {
|
||||||
|
msghdr.Controllen = uint32(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (msghdr *Msghdr) SetIovlen(length int) {
|
||||||
|
msghdr.Iovlen = uint32(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmsg *Cmsghdr) SetLen(length int) {
|
||||||
|
cmsg.Len = uint32(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
|
||||||
|
// of openbsd/riscv64 the syscall is called sysctl instead of __sysctl.
|
||||||
|
const SYS___SYSCTL = SYS_SYSCTL
|
104
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
104
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
|
@ -1026,3 +1026,107 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error)
|
||||||
}
|
}
|
||||||
return valid, err
|
return valid, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error)
|
||||||
|
|
||||||
|
func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) {
|
||||||
|
var clp, datap *strbuf
|
||||||
|
if len(cl) > 0 {
|
||||||
|
clp = &strbuf{
|
||||||
|
Len: int32(len(cl)),
|
||||||
|
Buf: (*int8)(unsafe.Pointer(&cl[0])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(data) > 0 {
|
||||||
|
datap = &strbuf{
|
||||||
|
Len: int32(len(data)),
|
||||||
|
Buf: (*int8)(unsafe.Pointer(&data[0])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return putmsg(fd, clp, datap, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error)
|
||||||
|
|
||||||
|
func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) {
|
||||||
|
var clp, datap *strbuf
|
||||||
|
if len(cl) > 0 {
|
||||||
|
clp = &strbuf{
|
||||||
|
Maxlen: int32(len(cl)),
|
||||||
|
Buf: (*int8)(unsafe.Pointer(&cl[0])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(data) > 0 {
|
||||||
|
datap = &strbuf{
|
||||||
|
Maxlen: int32(len(data)),
|
||||||
|
Buf: (*int8)(unsafe.Pointer(&data[0])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = getmsg(fd, clp, datap, &flags); err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cl) > 0 {
|
||||||
|
retCl = cl[:clp.Len]
|
||||||
|
}
|
||||||
|
if len(data) > 0 {
|
||||||
|
retData = data[:datap.Len]
|
||||||
|
}
|
||||||
|
return retCl, retData, flags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) {
|
||||||
|
return ioctlRet(fd, req, uintptr(arg))
|
||||||
|
}
|
||||||
|
|
||||||
|
func IoctlSetString(fd int, req uint, val string) error {
|
||||||
|
bs := make([]byte, len(val)+1)
|
||||||
|
copy(bs[:len(bs)-1], val)
|
||||||
|
err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0])))
|
||||||
|
runtime.KeepAlive(&bs[0])
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lifreq Helpers
|
||||||
|
|
||||||
|
func (l *Lifreq) SetName(name string) error {
|
||||||
|
if len(name) >= len(l.Name) {
|
||||||
|
return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1)
|
||||||
|
}
|
||||||
|
for i := range name {
|
||||||
|
l.Name[i] = int8(name[i])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Lifreq) SetLifruInt(d int) {
|
||||||
|
*(*int)(unsafe.Pointer(&l.Lifru[0])) = d
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Lifreq) GetLifruInt() int {
|
||||||
|
return *(*int)(unsafe.Pointer(&l.Lifru[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Lifreq) SetLifruUint(d uint) {
|
||||||
|
*(*uint)(unsafe.Pointer(&l.Lifru[0])) = d
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Lifreq) GetLifruUint() uint {
|
||||||
|
return *(*uint)(unsafe.Pointer(&l.Lifru[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func IoctlLifreq(fd int, req uint, l *Lifreq) error {
|
||||||
|
return ioctl(fd, req, uintptr(unsafe.Pointer(l)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strioctl Helpers
|
||||||
|
|
||||||
|
func (s *Strioctl) SetInt(i int) {
|
||||||
|
s.Len = int32(unsafe.Sizeof(i))
|
||||||
|
s.Dp = (*int8)(unsafe.Pointer(&i))
|
||||||
|
}
|
||||||
|
|
||||||
|
func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) {
|
||||||
|
return ioctlRet(fd, req, uintptr(unsafe.Pointer(s)))
|
||||||
|
}
|
||||||
|
|
6
vendor/golang.org/x/sys/unix/syscall_unix_gc.go
generated
vendored
6
vendor/golang.org/x/sys/unix/syscall_unix_gc.go
generated
vendored
|
@ -2,11 +2,9 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && gc && !ppc64le && !ppc64
|
//go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc
|
||||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !ppc64le
|
|
||||||
// +build !ppc64
|
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue