mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-01 14:47:38 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
8b8e547dc8
25 changed files with 394 additions and 114 deletions
13
README.md
13
README.md
|
@ -1134,6 +1134,8 @@ to a file containing a list of [relabel_config](https://prometheus.io/docs/prome
|
||||||
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||||
|
|
||||||
|
The `-relabelConfig` files can contain special placeholders in the form `%{ENV_VAR}`, which are replaced by the corresponding environment variable values.
|
||||||
|
|
||||||
Example contents for `-relabelConfig` file:
|
Example contents for `-relabelConfig` file:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
|
@ -1147,8 +1149,7 @@ Example contents for `-relabelConfig` file:
|
||||||
regex: true
|
regex: true
|
||||||
```
|
```
|
||||||
|
|
||||||
VictoriaMetrics components provide additional relabeling features such as Graphite-style relabeling.
|
VictoriaMetrics provides additional relabeling features such as Graphite-style relabeling. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
|
||||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
|
|
||||||
|
|
||||||
|
|
||||||
## Federation
|
## Federation
|
||||||
|
@ -1599,8 +1600,8 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
||||||
If the gaps are related to irregular intervals between samples, then try adjusting `-search.minStalenessInterval` command-line flag
|
If the gaps are related to irregular intervals between samples, then try adjusting `-search.minStalenessInterval` command-line flag
|
||||||
to value close to the maximum interval between samples.
|
to value close to the maximum interval between samples.
|
||||||
|
|
||||||
* If you are switching from InfluxDB or TimescaleDB, then take a look at `-search.maxStalenessInterval` command-line flag.
|
* If you are switching from InfluxDB or TimescaleDB, then it may be needed to set `-search.setLookbackToStep` command-line flag.
|
||||||
It may be needed in order to suppress default gap filling algorithm used by VictoriaMetrics - by default it assumes
|
This suppresses default gap filling algorithm used by VictoriaMetrics - by default it assumes
|
||||||
each time series is continuous instead of discrete, so it fills gaps between real samples with regular intervals.
|
each time series is continuous instead of discrete, so it fills gaps between real samples with regular intervals.
|
||||||
|
|
||||||
* Metrics and labels leading to [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) or [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) can be determined via [cardinality explorer](#cardinality-explorer) and via [/api/v1/status/tsdb](#tsdb-stats) endpoint.
|
* Metrics and labels leading to [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) or [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) can be determined via [cardinality explorer](#cardinality-explorer) and via [/api/v1/status/tsdb](#tsdb-stats) endpoint.
|
||||||
|
@ -2108,7 +2109,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-search.maxSeries int
|
-search.maxSeries int
|
||||||
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 100000)
|
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 100000)
|
||||||
-search.maxStalenessInterval duration
|
-search.maxStalenessInterval duration
|
||||||
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons
|
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.setLookbackToStep' flag
|
||||||
-search.maxStatusRequestDuration duration
|
-search.maxStatusRequestDuration duration
|
||||||
The maximum duration for /api/v1/status/* requests (default 5m0s)
|
The maximum duration for /api/v1/status/* requests (default 5m0s)
|
||||||
-search.maxStepForPointsAdjustment duration
|
-search.maxStepForPointsAdjustment duration
|
||||||
|
@ -2133,6 +2134,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
|
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
|
||||||
-search.resetCacheAuthKey string
|
-search.resetCacheAuthKey string
|
||||||
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
||||||
|
-search.setLookbackToStep
|
||||||
|
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
|
||||||
-search.treatDotsAsIsInRegexps
|
-search.treatDotsAsIsInRegexps
|
||||||
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
||||||
-selfScrapeInstance string
|
-selfScrapeInstance string
|
||||||
|
|
|
@ -183,6 +183,16 @@ Please file feature requests to [our issue tracker](https://github.com/VictoriaM
|
||||||
|
|
||||||
`vmagent` also support the following additional options in `scrape_configs` section:
|
`vmagent` also support the following additional options in `scrape_configs` section:
|
||||||
|
|
||||||
|
* `headers` - a list of HTTP headers to send to scrape target with each scrape request. This can be used when the scrape target needs custom authorization and authentication. For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: custom_headers
|
||||||
|
headers:
|
||||||
|
- "TenantID: abc"
|
||||||
|
- "My-Auth: TopSecret"
|
||||||
|
```
|
||||||
|
|
||||||
* `disable_compression: true` - to disable response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets
|
* `disable_compression: true` - to disable response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets
|
||||||
to save network bandwidth.
|
to save network bandwidth.
|
||||||
* `disable_keepalive: true` - to disable [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) on a per-job basis.
|
* `disable_keepalive: true` - to disable [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) on a per-job basis.
|
||||||
|
@ -297,6 +307,8 @@ The relabeling can be defined in the following places:
|
||||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is applied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
* At the `-remoteWrite.relabelConfig` file. This relabeling is applied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||||
|
|
||||||
|
All the files with relabeling configs can contain special placeholders in the form `%{ENV_VAR}`, which are replaced by the corresponding environment variable values.
|
||||||
|
|
||||||
You can read more about relabeling in the following articles:
|
You can read more about relabeling in the following articles:
|
||||||
|
|
||||||
* [How to use Relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2)
|
* [How to use Relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2)
|
||||||
|
@ -424,9 +436,11 @@ scrape_configs:
|
||||||
Proxy can be configured with the following optional settings:
|
Proxy can be configured with the following optional settings:
|
||||||
|
|
||||||
* `proxy_authorization` for generic token authorization. See [Prometheus docs for details on authorization section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)
|
* `proxy_authorization` for generic token authorization. See [Prometheus docs for details on authorization section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)
|
||||||
* `proxy_bearer_token` and `proxy_bearer_token_file` for Bearer token authorization
|
|
||||||
* `proxy_basic_auth` for Basic authorization. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
* `proxy_basic_auth` for Basic authorization. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
||||||
|
* `proxy_bearer_token` and `proxy_bearer_token_file` for Bearer token authorization
|
||||||
|
* `proxy_oauth2` for OAuth2 config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#oauth2).
|
||||||
* `proxy_tls_config` for TLS config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config).
|
* `proxy_tls_config` for TLS config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config).
|
||||||
|
* `proxy_headers` for passing additional HTTP headers in requests to proxy.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
|
@ -443,6 +457,8 @@ scrape_configs:
|
||||||
key_file: /path/to/key
|
key_file: /path/to/key
|
||||||
ca_file: /path/to/ca
|
ca_file: /path/to/ca
|
||||||
server_name: real-server-name
|
server_name: real-server-name
|
||||||
|
proxy_headers:
|
||||||
|
- "Proxy-Auth: top-secret"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Cardinality limiter
|
## Cardinality limiter
|
||||||
|
|
|
@ -219,7 +219,7 @@ func getAuthConfig(argIdx int) (*promauth.Config, error) {
|
||||||
InsecureSkipVerify: tlsInsecureSkipVerify.GetOptionalArg(argIdx),
|
InsecureSkipVerify: tlsInsecureSkipVerify.GetOptionalArg(argIdx),
|
||||||
}
|
}
|
||||||
|
|
||||||
authCfg, err := promauth.NewConfig(".", nil, basicAuthCfg, token, tokenFile, oauth2Cfg, tlsCfg)
|
authCfg, err := promauth.NewConfig(".", nil, basicAuthCfg, token, tokenFile, oauth2Cfg, tlsCfg, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot populate OAuth2 config for remoteWrite idx: %d, err: %w", argIdx, err)
|
return nil, fmt.Errorf("cannot populate OAuth2 config for remoteWrite idx: %d, err: %w", argIdx, err)
|
||||||
}
|
}
|
||||||
|
@ -306,9 +306,7 @@ again:
|
||||||
h.Set("Content-Type", "application/x-protobuf")
|
h.Set("Content-Type", "application/x-protobuf")
|
||||||
h.Set("Content-Encoding", "snappy")
|
h.Set("Content-Encoding", "snappy")
|
||||||
h.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
h.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
||||||
if ah := c.authCfg.GetAuthHeader(); ah != "" {
|
c.authCfg.SetHeaders(req, true)
|
||||||
req.Header.Set("Authorization", ah)
|
|
||||||
}
|
|
||||||
if c.awsCfg != nil {
|
if c.awsCfg != nil {
|
||||||
if err := c.awsCfg.SignRequest(req, sigv4Hash); err != nil {
|
if err := c.awsCfg.SignRequest(req, sigv4Hash); err != nil {
|
||||||
// there is no need in retry, request will be rejected by client.Do and retried by code below
|
// there is no need in retry, request will be rejected by client.Do and retried by code below
|
||||||
|
|
|
@ -146,9 +146,7 @@ func (s *VMStorage) newRequestPOST() (*http.Request, error) {
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
if s.authCfg != nil {
|
if s.authCfg != nil {
|
||||||
if auth := s.authCfg.GetAuthHeader(); auth != "" {
|
s.authCfg.SetHeaders(req, true)
|
||||||
req.Header.Set("Authorization", auth)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,7 +83,7 @@ func TestVMInstantQuery(t *testing.T) {
|
||||||
srv := httptest.NewServer(mux)
|
srv := httptest.NewServer(mux)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
authCfg, err := promauth.NewConfig(".", nil, baCfg, "", "", nil, nil)
|
authCfg, err := baCfg.NewConfig(".")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected: %s", err)
|
t.Fatalf("unexpected: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ func TestVMRangeQuery(t *testing.T) {
|
||||||
srv := httptest.NewServer(mux)
|
srv := httptest.NewServer(mux)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
authCfg, err := promauth.NewConfig(".", nil, baCfg, "", "", nil, nil)
|
authCfg, err := baCfg.NewConfig(".")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected: %s", err)
|
t.Fatalf("unexpected: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -247,7 +247,7 @@ func TestVMRangeQuery(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRequestParams(t *testing.T) {
|
func TestRequestParams(t *testing.T) {
|
||||||
authCfg, err := promauth.NewConfig(".", nil, baCfg, "", "", nil, nil)
|
authCfg, err := baCfg.NewConfig(".")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected: %s", err)
|
t.Fatalf("unexpected: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,9 +79,7 @@ func (am *AlertManager) send(ctx context.Context, alerts []Alert) error {
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
if am.authCfg != nil {
|
if am.authCfg != nil {
|
||||||
if auth := am.authCfg.GetAuthHeader(); auth != "" {
|
am.authCfg.SetHeaders(req, true)
|
||||||
req.Header.Set("Authorization", auth)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
resp, err := am.client.Do(req)
|
resp, err := am.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -245,9 +245,7 @@ func (c *Client) send(ctx context.Context, data []byte) error {
|
||||||
req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
||||||
|
|
||||||
if c.authCfg != nil {
|
if c.authCfg != nil {
|
||||||
if auth := c.authCfg.GetAuthHeader(); auth != "" {
|
c.authCfg.SetHeaders(req, true)
|
||||||
req.Header.Set("Authorization", auth)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if !*disablePathAppend {
|
if !*disablePathAppend {
|
||||||
req.URL.Path = path.Join(req.URL.Path, "/api/v1/write")
|
req.URL.Path = path.Join(req.URL.Path, "/api/v1/write")
|
||||||
|
|
|
@ -38,7 +38,9 @@ var (
|
||||||
maxStalenessInterval = flag.Duration("search.maxStalenessInterval", 0, "The maximum interval for staleness calculations. "+
|
maxStalenessInterval = flag.Duration("search.maxStalenessInterval", 0, "The maximum interval for staleness calculations. "+
|
||||||
"By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning "+
|
"By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning "+
|
||||||
"Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. "+
|
"Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. "+
|
||||||
"See also '-search.maxLookback' flag, which has the same meaning due to historical reasons")
|
"See also '-search.setLookbackToStep' flag")
|
||||||
|
setLookbackToStep = flag.Bool("search.setLookbackToStep", false, "Whether to fix lookback interval to 'step' query arg value. "+
|
||||||
|
"If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored")
|
||||||
maxStepForPointsAdjustment = flag.Duration("search.maxStepForPointsAdjustment", time.Minute, "The maximum step when /api/v1/query_range handler adjusts "+
|
maxStepForPointsAdjustment = flag.Duration("search.maxStepForPointsAdjustment", time.Minute, "The maximum step when /api/v1/query_range handler adjusts "+
|
||||||
"points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data")
|
"points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data")
|
||||||
|
|
||||||
|
@ -981,7 +983,19 @@ func getMaxLookback(r *http.Request) (int64, error) {
|
||||||
if d == 0 {
|
if d == 0 {
|
||||||
d = maxStalenessInterval.Milliseconds()
|
d = maxStalenessInterval.Milliseconds()
|
||||||
}
|
}
|
||||||
return searchutils.GetDuration(r, "max_lookback", d)
|
maxLookback, err := searchutils.GetDuration(r, "max_lookback", d)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
d = maxLookback
|
||||||
|
if *setLookbackToStep {
|
||||||
|
step, err := searchutils.GetDuration(r, "step", d)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
d = step
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
||||||
|
|
|
@ -27,11 +27,21 @@ VictoriaMetrics is production-ready for the following operating systems:
|
||||||
* Linux (Alpine, Ubuntu, Debian, RedHat, etc.)
|
* Linux (Alpine, Ubuntu, Debian, RedHat, etc.)
|
||||||
* FreeBSD
|
* FreeBSD
|
||||||
* OpenBSD
|
* OpenBSD
|
||||||
|
* Solaris/SmartOS
|
||||||
|
|
||||||
Some VictoriaMetrics components ([vmagent](https://docs.victoriametrics.com/vmagent.html), [vmalert](https://docs.victoriametrics.com/vmalert.html) and [vmauth](https://docs.victoriametrics.com/vmauth.html)) can run on Windows.
|
Some VictoriaMetrics components ([vmagent](https://docs.victoriametrics.com/vmagent.html), [vmalert](https://docs.victoriametrics.com/vmalert.html) and [vmauth](https://docs.victoriametrics.com/vmauth.html)) can run on Windows.
|
||||||
|
|
||||||
VictoriaMetrics can run also on MacOS for testing and development purposes.
|
VictoriaMetrics can run also on MacOS for testing and development purposes.
|
||||||
|
|
||||||
|
## Supported Architectures
|
||||||
|
|
||||||
|
* **Linux**: i386, amd64, arm, arm64, ppc64le
|
||||||
|
* **FreeBSD**: i386, amd64, arm
|
||||||
|
* **OpenBSD**: i386, amd64, arm
|
||||||
|
* **Solaris/SmartOS**: i386, amd64
|
||||||
|
* **MacOS**: amd64, arm64 (for testing and development purposes)
|
||||||
|
* **Windows**: amd64 (supported by [vmagent](https://docs.victoriametrics.com/vmagent.html), [vmalert](https://docs.victoriametrics.com/vmalert.html) and [vmauth](https://docs.victoriametrics.com/vmauth.html)).
|
||||||
|
|
||||||
## Upgrade procedure
|
## Upgrade procedure
|
||||||
|
|
||||||
It is safe to upgrade VictoriaMetrics to new versions unless the [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
It is safe to upgrade VictoriaMetrics to new versions unless the [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||||
|
|
|
@ -15,6 +15,20 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
* FEATURE: add `-search.setLookbackToStep` command-line flag, which enables InfluxDB-like gap filling during querying. See [these docs](https://docs.victoriametrics.com/guides/migrate-from-influx.html) for details.
|
||||||
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add ability to specify additional HTTP headers to send to scrape targets via `headers` section in `scrape_configs`. This can be used when the scrape target requires custom authorization and authentication like in [this stackoverflow question](https://stackoverflow.com/questions/66032498/prometheus-scrape-metric-with-custom-header). For example, the following config instructs sending `My-Auth: top-secret` and `TenantID: FooBar` headers with each request to `http://host123:8080/metrics`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: foo
|
||||||
|
headers:
|
||||||
|
- "My-Auth: top-secret"
|
||||||
|
- "TenantID: FooBar"
|
||||||
|
static_configs:
|
||||||
|
- targets: ["host123:8080"]
|
||||||
|
```
|
||||||
|
|
||||||
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): make sure that [stale markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are generated with the actual timestamp when unsuccessful scrape occurs. This should prevent from possible time series overlap on scrape target restart in dynmaic envirnoments such as Kubernetes.
|
||||||
|
|
||||||
## [v1.78.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.78.0)
|
## [v1.78.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.78.0)
|
||||||
|
|
||||||
|
|
|
@ -273,7 +273,7 @@ Cluster performance and capacity can be scaled up in two ways:
|
||||||
|
|
||||||
General recommendations for cluster scalability:
|
General recommendations for cluster scalability:
|
||||||
|
|
||||||
- Adding more CPU and RAM to existing `vmselect` nodes improves the performance for heavy queries, which process big number of time series with big number of raw samples.
|
- Adding more CPU and RAM to existing `vmselect` nodes improves the performance for heavy queries, which process big number of time series with big number of raw samples. See [this article on how to detect and optimize heavy queries](https://valyala.medium.com/how-to-optimize-promql-and-metricsql-queries-85a1b75bf986).
|
||||||
- Adding more `vmstorage` nodes increases the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) the cluster can handle. This also increases query performance over time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). The cluster stability is also improved with the number of `vmstorage` nodes, since active `vmstorage` nodes need to handle lower additional workload when some of `vmstorage` nodes become unavailable.
|
- Adding more `vmstorage` nodes increases the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) the cluster can handle. This also increases query performance over time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). The cluster stability is also improved with the number of `vmstorage` nodes, since active `vmstorage` nodes need to handle lower additional workload when some of `vmstorage` nodes become unavailable.
|
||||||
- Adding more CPU and RAM to existing `vmstorage` nodes increases the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) the cluster can handle. It is preferred to add more `vmstorage` nodes over adding more CPU and RAM to existing `vmstorage` nodes, since higher number of `vmstorage` nodes increases cluster stability and improves query performance over time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
- Adding more CPU and RAM to existing `vmstorage` nodes increases the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) the cluster can handle. It is preferred to add more `vmstorage` nodes over adding more CPU and RAM to existing `vmstorage` nodes, since higher number of `vmstorage` nodes increases cluster stability and improves query performance over time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||||
- Adding more `vminsert` nodes increases the maximum possible data ingestion speed, since the ingested data may be split among bigger number of `vminsert` nodes.
|
- Adding more `vminsert` nodes increases the maximum possible data ingestion speed, since the ingested data may be split among bigger number of `vminsert` nodes.
|
||||||
|
@ -294,8 +294,6 @@ with new configs.
|
||||||
Cluster should remain in working state if at least a single node of each type remains available during
|
Cluster should remain in working state if at least a single node of each type remains available during
|
||||||
the update process. See [cluster availability](#cluster-availability) section for details.
|
the update process. See [cluster availability](#cluster-availability) section for details.
|
||||||
|
|
||||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
|
||||||
|
|
||||||
## Cluster availability
|
## Cluster availability
|
||||||
|
|
||||||
- HTTP load balancer must stop routing requests to unavailable `vminsert` and `vmselect` nodes.
|
- HTTP load balancer must stop routing requests to unavailable `vminsert` and `vmselect` nodes.
|
||||||
|
@ -348,7 +346,7 @@ By default cluster components of VictoriaMetrics are tuned for an optimal resour
|
||||||
- `-search.maxTagKeys` at `vmselect` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
- `-search.maxTagKeys` at `vmselect` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||||
- `-search.maxTagValues` at `vmselect` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
- `-search.maxTagValues` at `vmselect` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||||
|
|
||||||
See also [capacity planning docs](#capacity-planning).
|
See also [capacity planning docs](#capacity-planning) and [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||||
|
|
||||||
## High availability
|
## High availability
|
||||||
|
|
||||||
|
@ -398,7 +396,7 @@ When the replication is enabled, `-dedup.minScrapeInterval=1ms` command-line fla
|
||||||
|
|
||||||
Note that [replication doesn't save from disaster](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883), so it is recommended performing regular backups. See [these docs](#backups) for details.
|
Note that [replication doesn't save from disaster](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883), so it is recommended performing regular backups. See [these docs](#backups) for details.
|
||||||
|
|
||||||
Note that the replication increases resource usage - CPU, RAM, disk space, network bandwidth - by up to `-replicationFactor=N` times, because `vminsert` stores `N` copies of incoming data to distinct `vmstorage` nodes and `vmselect` needs to de-duplicate the replicated data obtained from `vmstorage` nodes during querying. So it is more cost-effective to offload the replication to underlying replicated durable storage pointed by `-storageDataPath` such as [Google Compute Engine persistent disk](https://cloud.google.com/compute/docs/disks/#pdspecs), which is protected from data loss and data corruption. It also provide consistently high performance and [may be resized](https://cloud.google.com/compute/docs/disks/add-persistent-disk) without downtime. HDD-based persistent disks should be enough for the majority of use cases. It is recommended using durable replicated persistent volumes in Kubernetes.
|
Note that the replication increases resource usage - CPU, RAM, disk space, network bandwidth - by up to `-replicationFactor=N` times, because `vminsert` stores `N` copies of incoming data to distinct `vmstorage` nodes and `vmselect` needs to de-duplicate the replicated data obtained from `vmstorage` nodes during querying. So it is more cost-effective to offload the replication to underlying replicated durable storage pointed by `-storageDataPath` such as [Google Compute Engine persistent disk](https://cloud.google.com/compute/docs/disks/#pdspecs), which is protected from data loss and data corruption. It also provides consistently high performance and [may be resized](https://cloud.google.com/compute/docs/disks/add-persistent-disk) without downtime. HDD-based persistent disks should be enough for the majority of use cases. It is recommended using durable replicated persistent volumes in Kubernetes.
|
||||||
|
|
||||||
## Deduplication
|
## Deduplication
|
||||||
|
|
||||||
|
@ -776,7 +774,7 @@ Below is the output for `/path/to/vmselect -help`:
|
||||||
-search.maxSeries int
|
-search.maxSeries int
|
||||||
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 100000)
|
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 100000)
|
||||||
-search.maxStalenessInterval duration
|
-search.maxStalenessInterval duration
|
||||||
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons
|
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.setLookbackToStep' flag
|
||||||
-search.maxStatusRequestDuration duration
|
-search.maxStatusRequestDuration duration
|
||||||
The maximum duration for /api/v1/status/* requests (default 5m0s)
|
The maximum duration for /api/v1/status/* requests (default 5m0s)
|
||||||
-search.maxStepForPointsAdjustment duration
|
-search.maxStepForPointsAdjustment duration
|
||||||
|
@ -795,6 +793,8 @@ Below is the output for `/path/to/vmselect -help`:
|
||||||
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
|
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
|
||||||
-search.resetCacheAuthKey string
|
-search.resetCacheAuthKey string
|
||||||
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
||||||
|
-search.setLookbackToStep
|
||||||
|
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
|
||||||
-search.treatDotsAsIsInRegexps
|
-search.treatDotsAsIsInRegexps
|
||||||
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
||||||
-selectNode array
|
-selectNode array
|
||||||
|
|
|
@ -1134,6 +1134,8 @@ to a file containing a list of [relabel_config](https://prometheus.io/docs/prome
|
||||||
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||||
|
|
||||||
|
The `-relabelConfig` files can contain special placeholders in the form `%{ENV_VAR}`, which are replaced by the corresponding environment variable values.
|
||||||
|
|
||||||
Example contents for `-relabelConfig` file:
|
Example contents for `-relabelConfig` file:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
|
@ -1147,8 +1149,7 @@ Example contents for `-relabelConfig` file:
|
||||||
regex: true
|
regex: true
|
||||||
```
|
```
|
||||||
|
|
||||||
VictoriaMetrics components provide additional relabeling features such as Graphite-style relabeling.
|
VictoriaMetrics provides additional relabeling features such as Graphite-style relabeling. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
|
||||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
|
|
||||||
|
|
||||||
|
|
||||||
## Federation
|
## Federation
|
||||||
|
@ -1599,8 +1600,8 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
||||||
If the gaps are related to irregular intervals between samples, then try adjusting `-search.minStalenessInterval` command-line flag
|
If the gaps are related to irregular intervals between samples, then try adjusting `-search.minStalenessInterval` command-line flag
|
||||||
to value close to the maximum interval between samples.
|
to value close to the maximum interval between samples.
|
||||||
|
|
||||||
* If you are switching from InfluxDB or TimescaleDB, then take a look at `-search.maxStalenessInterval` command-line flag.
|
* If you are switching from InfluxDB or TimescaleDB, then it may be needed to set `-search.setLookbackToStep` command-line flag.
|
||||||
It may be needed in order to suppress default gap filling algorithm used by VictoriaMetrics - by default it assumes
|
This suppresses default gap filling algorithm used by VictoriaMetrics - by default it assumes
|
||||||
each time series is continuous instead of discrete, so it fills gaps between real samples with regular intervals.
|
each time series is continuous instead of discrete, so it fills gaps between real samples with regular intervals.
|
||||||
|
|
||||||
* Metrics and labels leading to [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) or [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) can be determined via [cardinality explorer](#cardinality-explorer) and via [/api/v1/status/tsdb](#tsdb-stats) endpoint.
|
* Metrics and labels leading to [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) or [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) can be determined via [cardinality explorer](#cardinality-explorer) and via [/api/v1/status/tsdb](#tsdb-stats) endpoint.
|
||||||
|
@ -2108,7 +2109,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-search.maxSeries int
|
-search.maxSeries int
|
||||||
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 100000)
|
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 100000)
|
||||||
-search.maxStalenessInterval duration
|
-search.maxStalenessInterval duration
|
||||||
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons
|
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.setLookbackToStep' flag
|
||||||
-search.maxStatusRequestDuration duration
|
-search.maxStatusRequestDuration duration
|
||||||
The maximum duration for /api/v1/status/* requests (default 5m0s)
|
The maximum duration for /api/v1/status/* requests (default 5m0s)
|
||||||
-search.maxStepForPointsAdjustment duration
|
-search.maxStepForPointsAdjustment duration
|
||||||
|
@ -2133,6 +2134,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
|
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
|
||||||
-search.resetCacheAuthKey string
|
-search.resetCacheAuthKey string
|
||||||
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
||||||
|
-search.setLookbackToStep
|
||||||
|
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
|
||||||
-search.treatDotsAsIsInRegexps
|
-search.treatDotsAsIsInRegexps
|
||||||
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
||||||
-selfScrapeInstance string
|
-selfScrapeInstance string
|
||||||
|
|
|
@ -1138,6 +1138,8 @@ to a file containing a list of [relabel_config](https://prometheus.io/docs/prome
|
||||||
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
The `-relabelConfig` also can point to http or https url. For example, `-relabelConfig=https://config-server/relabel_config.yml`.
|
||||||
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
See [this article with relabeling tips and tricks](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2).
|
||||||
|
|
||||||
|
The `-relabelConfig` files can contain special placeholders in the form `%{ENV_VAR}`, which are replaced by the corresponding environment variable values.
|
||||||
|
|
||||||
Example contents for `-relabelConfig` file:
|
Example contents for `-relabelConfig` file:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
|
@ -1151,8 +1153,7 @@ Example contents for `-relabelConfig` file:
|
||||||
regex: true
|
regex: true
|
||||||
```
|
```
|
||||||
|
|
||||||
VictoriaMetrics components provide additional relabeling features such as Graphite-style relabeling.
|
VictoriaMetrics provides additional relabeling features such as Graphite-style relabeling. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
|
||||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for more details.
|
|
||||||
|
|
||||||
|
|
||||||
## Federation
|
## Federation
|
||||||
|
@ -1603,8 +1604,8 @@ See also more advanced [cardinality limiter in vmagent](https://docs.victoriamet
|
||||||
If the gaps are related to irregular intervals between samples, then try adjusting `-search.minStalenessInterval` command-line flag
|
If the gaps are related to irregular intervals between samples, then try adjusting `-search.minStalenessInterval` command-line flag
|
||||||
to value close to the maximum interval between samples.
|
to value close to the maximum interval between samples.
|
||||||
|
|
||||||
* If you are switching from InfluxDB or TimescaleDB, then take a look at `-search.maxStalenessInterval` command-line flag.
|
* If you are switching from InfluxDB or TimescaleDB, then it may be needed to set `-search.setLookbackToStep` command-line flag.
|
||||||
It may be needed in order to suppress default gap filling algorithm used by VictoriaMetrics - by default it assumes
|
This suppresses default gap filling algorithm used by VictoriaMetrics - by default it assumes
|
||||||
each time series is continuous instead of discrete, so it fills gaps between real samples with regular intervals.
|
each time series is continuous instead of discrete, so it fills gaps between real samples with regular intervals.
|
||||||
|
|
||||||
* Metrics and labels leading to [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) or [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) can be determined via [cardinality explorer](#cardinality-explorer) and via [/api/v1/status/tsdb](#tsdb-stats) endpoint.
|
* Metrics and labels leading to [high cardinality](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) or [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) can be determined via [cardinality explorer](#cardinality-explorer) and via [/api/v1/status/tsdb](#tsdb-stats) endpoint.
|
||||||
|
@ -2112,7 +2113,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
-search.maxSeries int
|
-search.maxSeries int
|
||||||
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 100000)
|
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 100000)
|
||||||
-search.maxStalenessInterval duration
|
-search.maxStalenessInterval duration
|
||||||
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons
|
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.setLookbackToStep' flag
|
||||||
-search.maxStatusRequestDuration duration
|
-search.maxStatusRequestDuration duration
|
||||||
The maximum duration for /api/v1/status/* requests (default 5m0s)
|
The maximum duration for /api/v1/status/* requests (default 5m0s)
|
||||||
-search.maxStepForPointsAdjustment duration
|
-search.maxStepForPointsAdjustment duration
|
||||||
|
@ -2137,6 +2138,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||||
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
|
The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats (default 1ms)
|
||||||
-search.resetCacheAuthKey string
|
-search.resetCacheAuthKey string
|
||||||
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
||||||
|
-search.setLookbackToStep
|
||||||
|
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
|
||||||
-search.treatDotsAsIsInRegexps
|
-search.treatDotsAsIsInRegexps
|
||||||
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter
|
||||||
-selfScrapeInstance string
|
-selfScrapeInstance string
|
||||||
|
|
|
@ -202,8 +202,10 @@ detail [here](https://docs.victoriametrics.com/keyConcepts.html#range-query). In
|
||||||
behavior by adding `fill(previous)` to the query.
|
behavior by adding `fill(previous)` to the query.
|
||||||
|
|
||||||
VictoriaMetrics fills the gaps on the graph assuming time series are always continious and not discrete.
|
VictoriaMetrics fills the gaps on the graph assuming time series are always continious and not discrete.
|
||||||
To limit the interval on which VictoriaMetrics will try to fill the gaps try setting `-search.maxStalenessInterval`
|
To limit the interval on which VictoriaMetrics will try to fill the gaps, set `-search.setLookbackToStep`
|
||||||
command-line flag to the value equal to actual resolution between data points (for example, to `10s`).
|
command-line flag. This limits the gap filling to a single `step` interval passed to
|
||||||
|
[/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries).
|
||||||
|
This behavior is close to InfluxDB data model.
|
||||||
|
|
||||||
|
|
||||||
### Advanced usage
|
### Advanced usage
|
||||||
|
|
|
@ -187,6 +187,16 @@ Please file feature requests to [our issue tracker](https://github.com/VictoriaM
|
||||||
|
|
||||||
`vmagent` also support the following additional options in `scrape_configs` section:
|
`vmagent` also support the following additional options in `scrape_configs` section:
|
||||||
|
|
||||||
|
* `headers` - a list of HTTP headers to send to scrape target with each scrape request. This can be used when the scrape target needs custom authorization and authentication. For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: custom_headers
|
||||||
|
headers:
|
||||||
|
- "TenantID: abc"
|
||||||
|
- "My-Auth: TopSecret"
|
||||||
|
```
|
||||||
|
|
||||||
* `disable_compression: true` - to disable response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets
|
* `disable_compression: true` - to disable response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets
|
||||||
to save network bandwidth.
|
to save network bandwidth.
|
||||||
* `disable_keepalive: true` - to disable [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) on a per-job basis.
|
* `disable_keepalive: true` - to disable [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) on a per-job basis.
|
||||||
|
@ -301,6 +311,8 @@ The relabeling can be defined in the following places:
|
||||||
* At the `-remoteWrite.relabelConfig` file. This relabeling is applied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
* At the `-remoteWrite.relabelConfig` file. This relabeling is applied to all the collected metrics before sending them to remote storage. This relabeling can be debugged by passing `-remoteWrite.relabelDebug` command-line option to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to remote storage.
|
||||||
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
* At the `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`. This relabeling can be debugged by passing `-remoteWrite.urlRelabelDebug` command-line options to `vmagent`. In this case `vmagent` logs metrics before and after the relabeling and then drops all the logged metrics instead of sending them to the corresponding `-remoteWrite.url`.
|
||||||
|
|
||||||
|
All the files with relabeling configs can contain special placeholders in the form `%{ENV_VAR}`, which are replaced by the corresponding environment variable values.
|
||||||
|
|
||||||
You can read more about relabeling in the following articles:
|
You can read more about relabeling in the following articles:
|
||||||
|
|
||||||
* [How to use Relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2)
|
* [How to use Relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2)
|
||||||
|
@ -428,9 +440,11 @@ scrape_configs:
|
||||||
Proxy can be configured with the following optional settings:
|
Proxy can be configured with the following optional settings:
|
||||||
|
|
||||||
* `proxy_authorization` for generic token authorization. See [Prometheus docs for details on authorization section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)
|
* `proxy_authorization` for generic token authorization. See [Prometheus docs for details on authorization section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)
|
||||||
* `proxy_bearer_token` and `proxy_bearer_token_file` for Bearer token authorization
|
|
||||||
* `proxy_basic_auth` for Basic authorization. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
* `proxy_basic_auth` for Basic authorization. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
||||||
|
* `proxy_bearer_token` and `proxy_bearer_token_file` for Bearer token authorization
|
||||||
|
* `proxy_oauth2` for OAuth2 config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#oauth2).
|
||||||
* `proxy_tls_config` for TLS config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config).
|
* `proxy_tls_config` for TLS config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config).
|
||||||
|
* `proxy_headers` for passing additional HTTP headers in requests to proxy.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
|
@ -447,6 +461,8 @@ scrape_configs:
|
||||||
key_file: /path/to/key
|
key_file: /path/to/key
|
||||||
ca_file: /path/to/ca
|
ca_file: /path/to/ca
|
||||||
server_name: real-server-name
|
server_name: real-server-name
|
||||||
|
proxy_headers:
|
||||||
|
- "Proxy-Auth: top-secret"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Cardinality limiter
|
## Cardinality limiter
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
|
"github.com/VictoriaMetrics/fasthttp"
|
||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/clientcredentials"
|
"golang.org/x/oauth2/clientcredentials"
|
||||||
|
@ -116,6 +117,9 @@ type HTTPClientConfig struct {
|
||||||
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
|
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
|
||||||
OAuth2 *OAuth2Config `yaml:"oauth2,omitempty"`
|
OAuth2 *OAuth2Config `yaml:"oauth2,omitempty"`
|
||||||
TLSConfig *TLSConfig `yaml:"tls_config,omitempty"`
|
TLSConfig *TLSConfig `yaml:"tls_config,omitempty"`
|
||||||
|
|
||||||
|
// Headers contains optional HTTP headers, which must be sent in the request to the server
|
||||||
|
Headers []string `yaml:"headers,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProxyClientConfig represents proxy client config.
|
// ProxyClientConfig represents proxy client config.
|
||||||
|
@ -124,7 +128,11 @@ type ProxyClientConfig struct {
|
||||||
BasicAuth *BasicAuthConfig `yaml:"proxy_basic_auth,omitempty"`
|
BasicAuth *BasicAuthConfig `yaml:"proxy_basic_auth,omitempty"`
|
||||||
BearerToken *Secret `yaml:"proxy_bearer_token,omitempty"`
|
BearerToken *Secret `yaml:"proxy_bearer_token,omitempty"`
|
||||||
BearerTokenFile string `yaml:"proxy_bearer_token_file,omitempty"`
|
BearerTokenFile string `yaml:"proxy_bearer_token_file,omitempty"`
|
||||||
|
OAuth2 *OAuth2Config `yaml:"proxy_oauth2,omitempty"`
|
||||||
TLSConfig *TLSConfig `yaml:"proxy_tls_config,omitempty"`
|
TLSConfig *TLSConfig `yaml:"proxy_tls_config,omitempty"`
|
||||||
|
|
||||||
|
// Headers contains optional HTTP headers, which must be sent in the request to the proxy
|
||||||
|
Headers []string `yaml:"proxy_headers,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// OAuth2Config represent OAuth2 configuration
|
// OAuth2Config represent OAuth2 configuration
|
||||||
|
@ -257,9 +265,71 @@ type Config struct {
|
||||||
authHeader string
|
authHeader string
|
||||||
authHeaderDeadline uint64
|
authHeaderDeadline uint64
|
||||||
|
|
||||||
|
headers []keyValue
|
||||||
|
|
||||||
authDigest string
|
authDigest string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type keyValue struct {
|
||||||
|
key string
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseHeaders(headers []string) ([]keyValue, error) {
|
||||||
|
if len(headers) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
kvs := make([]keyValue, len(headers))
|
||||||
|
for i, h := range headers {
|
||||||
|
n := strings.IndexByte(h, ':')
|
||||||
|
if n < 0 {
|
||||||
|
return nil, fmt.Errorf(`missing ':' in header %q; expecting "key: value" format`, h)
|
||||||
|
}
|
||||||
|
kv := &kvs[i]
|
||||||
|
kv.key = strings.TrimSpace(h[:n])
|
||||||
|
kv.value = strings.TrimSpace(h[n+1:])
|
||||||
|
}
|
||||||
|
return kvs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadersNoAuthString returns string representation of ac headers
|
||||||
|
func (ac *Config) HeadersNoAuthString() string {
|
||||||
|
if len(ac.headers) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
a := make([]string, len(ac.headers))
|
||||||
|
for i, h := range ac.headers {
|
||||||
|
a[i] = h.key + ": " + h.value + "\r\n"
|
||||||
|
}
|
||||||
|
return strings.Join(a, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHeaders sets the configuted ac headers to req.
|
||||||
|
func (ac *Config) SetHeaders(req *http.Request, setAuthHeader bool) {
|
||||||
|
reqHeaders := req.Header
|
||||||
|
for _, h := range ac.headers {
|
||||||
|
reqHeaders.Set(h.key, h.value)
|
||||||
|
}
|
||||||
|
if setAuthHeader {
|
||||||
|
if ah := ac.GetAuthHeader(); ah != "" {
|
||||||
|
reqHeaders.Set("Authorization", ah)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFasthttpHeaders sets the configured ac headers to req.
|
||||||
|
func (ac *Config) SetFasthttpHeaders(req *fasthttp.Request, setAuthHeader bool) {
|
||||||
|
reqHeaders := &req.Header
|
||||||
|
for _, h := range ac.headers {
|
||||||
|
reqHeaders.Set(h.key, h.value)
|
||||||
|
}
|
||||||
|
if setAuthHeader {
|
||||||
|
if ah := ac.GetAuthHeader(); ah != "" {
|
||||||
|
reqHeaders.Set("Authorization", ah)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// GetAuthHeader returns optional `Authorization: ...` http header.
|
// GetAuthHeader returns optional `Authorization: ...` http header.
|
||||||
func (ac *Config) GetAuthHeader() string {
|
func (ac *Config) GetAuthHeader() string {
|
||||||
f := ac.getAuthHeader
|
f := ac.getAuthHeader
|
||||||
|
@ -281,8 +351,8 @@ func (ac *Config) GetAuthHeader() string {
|
||||||
// It is also used for comparing Config objects for equality. If two Config
|
// It is also used for comparing Config objects for equality. If two Config
|
||||||
// objects have the same string representation, then they are considered equal.
|
// objects have the same string representation, then they are considered equal.
|
||||||
func (ac *Config) String() string {
|
func (ac *Config) String() string {
|
||||||
return fmt.Sprintf("AuthDigest=%s, TLSRootCA=%s, TLSCertificate=%s, TLSServerName=%s, TLSInsecureSkipVerify=%v, TLSMinVersion=%d",
|
return fmt.Sprintf("AuthDigest=%s, Headers=%s, TLSRootCA=%s, TLSCertificate=%s, TLSServerName=%s, TLSInsecureSkipVerify=%v, TLSMinVersion=%d",
|
||||||
ac.authDigest, ac.tlsRootCAString(), ac.tlsCertDigest, ac.TLSServerName, ac.TLSInsecureSkipVerify, ac.TLSMinVersion)
|
ac.authDigest, ac.headers, ac.tlsRootCAString(), ac.tlsCertDigest, ac.TLSServerName, ac.TLSInsecureSkipVerify, ac.TLSMinVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *Config) tlsRootCAString() string {
|
func (ac *Config) tlsRootCAString() string {
|
||||||
|
@ -330,21 +400,26 @@ func (ac *Config) NewTLSConfig() *tls.Config {
|
||||||
|
|
||||||
// NewConfig creates auth config for the given hcc.
|
// NewConfig creates auth config for the given hcc.
|
||||||
func (hcc *HTTPClientConfig) NewConfig(baseDir string) (*Config, error) {
|
func (hcc *HTTPClientConfig) NewConfig(baseDir string) (*Config, error) {
|
||||||
return NewConfig(baseDir, hcc.Authorization, hcc.BasicAuth, hcc.BearerToken.String(), hcc.BearerTokenFile, hcc.OAuth2, hcc.TLSConfig)
|
return NewConfig(baseDir, hcc.Authorization, hcc.BasicAuth, hcc.BearerToken.String(), hcc.BearerTokenFile, hcc.OAuth2, hcc.TLSConfig, hcc.Headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig creates auth config for the given pcc.
|
// NewConfig creates auth config for the given pcc.
|
||||||
func (pcc *ProxyClientConfig) NewConfig(baseDir string) (*Config, error) {
|
func (pcc *ProxyClientConfig) NewConfig(baseDir string) (*Config, error) {
|
||||||
return NewConfig(baseDir, pcc.Authorization, pcc.BasicAuth, pcc.BearerToken.String(), pcc.BearerTokenFile, nil, pcc.TLSConfig)
|
return NewConfig(baseDir, pcc.Authorization, pcc.BasicAuth, pcc.BearerToken.String(), pcc.BearerTokenFile, pcc.OAuth2, pcc.TLSConfig, pcc.Headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig creates auth config for the given o.
|
// NewConfig creates auth config for the given o.
|
||||||
func (o *OAuth2Config) NewConfig(baseDir string) (*Config, error) {
|
func (o *OAuth2Config) NewConfig(baseDir string) (*Config, error) {
|
||||||
return NewConfig(baseDir, nil, nil, "", "", nil, o.TLSConfig)
|
return NewConfig(baseDir, nil, nil, "", "", nil, o.TLSConfig, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfig creates auth config for the given ba.
|
||||||
|
func (ba *BasicAuthConfig) NewConfig(baseDir string) (*Config, error) {
|
||||||
|
return NewConfig(baseDir, nil, ba, "", "", nil, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig creates auth config from the given args.
|
// NewConfig creates auth config from the given args.
|
||||||
func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, bearerToken, bearerTokenFile string, o *OAuth2Config, tlsConfig *TLSConfig) (*Config, error) {
|
func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, bearerToken, bearerTokenFile string, o *OAuth2Config, tlsConfig *TLSConfig, headers []string) (*Config, error) {
|
||||||
var getAuthHeader func() string
|
var getAuthHeader func() string
|
||||||
authDigest := ""
|
authDigest := ""
|
||||||
if az != nil {
|
if az != nil {
|
||||||
|
@ -517,6 +592,10 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
||||||
tlsMinVersion = v
|
tlsMinVersion = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
parsedHeaders, err := parseHeaders(headers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
ac := &Config{
|
ac := &Config{
|
||||||
TLSRootCA: tlsRootCA,
|
TLSRootCA: tlsRootCA,
|
||||||
TLSServerName: tlsServerName,
|
TLSServerName: tlsServerName,
|
||||||
|
@ -527,6 +606,7 @@ func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, be
|
||||||
tlsCertDigest: tlsCertDigest,
|
tlsCertDigest: tlsCertDigest,
|
||||||
|
|
||||||
getAuthHeader: getAuthHeader,
|
getAuthHeader: getAuthHeader,
|
||||||
|
headers: parsedHeaders,
|
||||||
authDigest: authDigest,
|
authDigest: authDigest,
|
||||||
}
|
}
|
||||||
return ac, nil
|
return ac, nil
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/fasthttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewConfig(t *testing.T) {
|
func TestNewConfig(t *testing.T) {
|
||||||
|
@ -116,18 +118,103 @@ func TestNewConfig(t *testing.T) {
|
||||||
mock := httptest.NewServer(r)
|
mock := httptest.NewServer(r)
|
||||||
tt.args.oauth.TokenURL = mock.URL
|
tt.args.oauth.TokenURL = mock.URL
|
||||||
}
|
}
|
||||||
got, err := NewConfig(tt.args.baseDir, tt.args.az, tt.args.basicAuth, tt.args.bearerToken, tt.args.bearerTokenFile, tt.args.oauth, tt.args.tlsConfig)
|
got, err := NewConfig(tt.args.baseDir, tt.args.az, tt.args.basicAuth, tt.args.bearerToken, tt.args.bearerTokenFile, tt.args.oauth, tt.args.tlsConfig, nil)
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("NewConfig() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("NewConfig() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if got != nil {
|
if got != nil {
|
||||||
ah := got.GetAuthHeader()
|
req, err := http.NewRequest("GET", "http://foo", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error in http.NewRequest: %s", err)
|
||||||
|
}
|
||||||
|
got.SetHeaders(req, true)
|
||||||
|
ah := req.Header.Get("Authorization")
|
||||||
if ah != tt.expectHeader {
|
if ah != tt.expectHeader {
|
||||||
t.Fatalf("unexpected auth header; got %q; want %q", ah, tt.expectHeader)
|
t.Fatalf("unexpected auth header from net/http request; got %q; want %q", ah, tt.expectHeader)
|
||||||
|
}
|
||||||
|
var fhreq fasthttp.Request
|
||||||
|
got.SetFasthttpHeaders(&fhreq, true)
|
||||||
|
ahb := fhreq.Header.Peek("Authorization")
|
||||||
|
if string(ahb) != tt.expectHeader {
|
||||||
|
t.Fatalf("unexpected auth header from fasthttp request; got %q; want %q", ahb, tt.expectHeader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseHeadersSuccess(t *testing.T) {
|
||||||
|
f := func(headers []string) {
|
||||||
|
t.Helper()
|
||||||
|
headersParsed, err := parseHeaders(headers)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error when parsing %s: %s", headers, err)
|
||||||
|
}
|
||||||
|
for i, h := range headersParsed {
|
||||||
|
s := h.key + ": " + h.value
|
||||||
|
if s != headers[i] {
|
||||||
|
t.Fatalf("unexpected header parsed; got %q; want %q", s, headers[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f(nil)
|
||||||
|
f([]string{"foo: bar"})
|
||||||
|
f([]string{"Foo: bar", "A-b-c: d-e-f"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseHeadersFailure(t *testing.T) {
|
||||||
|
f := func(headers []string) {
|
||||||
|
t.Helper()
|
||||||
|
headersParsed, err := parseHeaders(headers)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expecting non-nil error from parseHeaders(%s)", headers)
|
||||||
|
}
|
||||||
|
if headersParsed != nil {
|
||||||
|
t.Fatalf("expecting nil result from parseHeaders(%s)", headers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f([]string{"foo"})
|
||||||
|
f([]string{"foo bar baz"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigHeaders(t *testing.T) {
|
||||||
|
f := func(headers []string, resultExpected string) {
|
||||||
|
t.Helper()
|
||||||
|
headersParsed, err := parseHeaders(headers)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot parse headers: %s", err)
|
||||||
|
}
|
||||||
|
c, err := NewConfig("", nil, nil, "", "", nil, nil, headers)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot create config: %s", err)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("GET", "http://foo", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error in http.NewRequest: %s", err)
|
||||||
|
}
|
||||||
|
result := c.HeadersNoAuthString()
|
||||||
|
if result != resultExpected {
|
||||||
|
t.Fatalf("unexpected result from HeadersNoAuthString; got\n%s\nwant\n%s", result, resultExpected)
|
||||||
|
}
|
||||||
|
c.SetHeaders(req, false)
|
||||||
|
for _, h := range headersParsed {
|
||||||
|
v := req.Header.Get(h.key)
|
||||||
|
if v != h.value {
|
||||||
|
t.Fatalf("unexpected value for net/http header %q; got %q; want %q", h.key, v, h.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var fhreq fasthttp.Request
|
||||||
|
c.SetFasthttpHeaders(&fhreq, false)
|
||||||
|
for _, h := range headersParsed {
|
||||||
|
v := fhreq.Header.Peek(h.key)
|
||||||
|
if string(v) != h.value {
|
||||||
|
t.Fatalf("unexpected value for fasthttp header %q; got %q; want %q", h.key, v, h.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f(nil, "")
|
||||||
|
f([]string{"foo: bar"}, "foo: bar\r\n")
|
||||||
|
f([]string{"Foo-Bar: Baz s:sdf", "A:b", "X-Forwarded-For: A-B:c"}, "Foo-Bar: Baz s:sdf\r\nA: b\r\nX-Forwarded-For: A-B:c\r\n")
|
||||||
|
}
|
||||||
|
|
|
@ -48,8 +48,10 @@ type client struct {
|
||||||
scrapeTimeoutSecondsStr string
|
scrapeTimeoutSecondsStr string
|
||||||
host string
|
host string
|
||||||
requestURI string
|
requestURI string
|
||||||
getAuthHeader func() string
|
setHeaders func(req *http.Request)
|
||||||
getProxyAuthHeader func() string
|
setProxyHeaders func(req *http.Request)
|
||||||
|
setFasthttpHeaders func(req *fasthttp.Request)
|
||||||
|
setFasthttpProxyHeaders func(req *fasthttp.Request)
|
||||||
denyRedirects bool
|
denyRedirects bool
|
||||||
disableCompression bool
|
disableCompression bool
|
||||||
disableKeepAlive bool
|
disableKeepAlive bool
|
||||||
|
@ -65,7 +67,8 @@ func newClient(sw *ScrapeWork) *client {
|
||||||
if isTLS {
|
if isTLS {
|
||||||
tlsCfg = sw.AuthConfig.NewTLSConfig()
|
tlsCfg = sw.AuthConfig.NewTLSConfig()
|
||||||
}
|
}
|
||||||
getProxyAuthHeader := func() string { return "" }
|
setProxyHeaders := func(req *http.Request) {}
|
||||||
|
setFasthttpProxyHeaders := func(req *fasthttp.Request) {}
|
||||||
proxyURL := sw.ProxyURL
|
proxyURL := sw.ProxyURL
|
||||||
if !isTLS && proxyURL.IsHTTPOrHTTPS() {
|
if !isTLS && proxyURL.IsHTTPOrHTTPS() {
|
||||||
// Send full sw.ScrapeURL in requests to a proxy host for non-TLS scrape targets
|
// Send full sw.ScrapeURL in requests to a proxy host for non-TLS scrape targets
|
||||||
|
@ -79,8 +82,11 @@ func newClient(sw *ScrapeWork) *client {
|
||||||
tlsCfg = sw.ProxyAuthConfig.NewTLSConfig()
|
tlsCfg = sw.ProxyAuthConfig.NewTLSConfig()
|
||||||
}
|
}
|
||||||
proxyURLOrig := proxyURL
|
proxyURLOrig := proxyURL
|
||||||
getProxyAuthHeader = func() string {
|
setProxyHeaders = func(req *http.Request) {
|
||||||
return proxyURLOrig.GetAuthHeader(sw.ProxyAuthConfig)
|
proxyURLOrig.SetHeaders(sw.ProxyAuthConfig, req)
|
||||||
|
}
|
||||||
|
setFasthttpProxyHeaders = func(req *fasthttp.Request) {
|
||||||
|
proxyURLOrig.SetFasthttpHeaders(sw.ProxyAuthConfig, req)
|
||||||
}
|
}
|
||||||
proxyURL = &proxy.URL{}
|
proxyURL = &proxy.URL{}
|
||||||
}
|
}
|
||||||
|
@ -148,8 +154,10 @@ func newClient(sw *ScrapeWork) *client {
|
||||||
scrapeTimeoutSecondsStr: fmt.Sprintf("%.3f", sw.ScrapeTimeout.Seconds()),
|
scrapeTimeoutSecondsStr: fmt.Sprintf("%.3f", sw.ScrapeTimeout.Seconds()),
|
||||||
host: host,
|
host: host,
|
||||||
requestURI: requestURI,
|
requestURI: requestURI,
|
||||||
getAuthHeader: sw.AuthConfig.GetAuthHeader,
|
setHeaders: func(req *http.Request) { sw.AuthConfig.SetHeaders(req, true) },
|
||||||
getProxyAuthHeader: getProxyAuthHeader,
|
setProxyHeaders: setProxyHeaders,
|
||||||
|
setFasthttpHeaders: func(req *fasthttp.Request) { sw.AuthConfig.SetFasthttpHeaders(req, true) },
|
||||||
|
setFasthttpProxyHeaders: setFasthttpProxyHeaders,
|
||||||
denyRedirects: sw.DenyRedirects,
|
denyRedirects: sw.DenyRedirects,
|
||||||
disableCompression: sw.DisableCompression,
|
disableCompression: sw.DisableCompression,
|
||||||
disableKeepAlive: sw.DisableKeepAlive,
|
disableKeepAlive: sw.DisableKeepAlive,
|
||||||
|
@ -173,12 +181,8 @@ func (c *client) GetStreamReader() (*streamReader, error) {
|
||||||
// Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx.
|
// Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162
|
||||||
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr)
|
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr)
|
||||||
if ah := c.getAuthHeader(); ah != "" {
|
c.setHeaders(req)
|
||||||
req.Header.Set("Authorization", ah)
|
c.setProxyHeaders(req)
|
||||||
}
|
|
||||||
if ah := c.getProxyAuthHeader(); ah != "" {
|
|
||||||
req.Header.Set("Proxy-Authorization", ah)
|
|
||||||
}
|
|
||||||
resp, err := c.sc.Do(req)
|
resp, err := c.sc.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
|
@ -224,12 +228,8 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
||||||
// Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx.
|
// Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162
|
||||||
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr)
|
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr)
|
||||||
if ah := c.getAuthHeader(); ah != "" {
|
c.setFasthttpHeaders(req)
|
||||||
req.Header.Set("Authorization", ah)
|
c.setFasthttpProxyHeaders(req)
|
||||||
}
|
|
||||||
if ah := c.getProxyAuthHeader(); ah != "" {
|
|
||||||
req.Header.Set("Proxy-Authorization", ah)
|
|
||||||
}
|
|
||||||
if !*disableCompression && !c.disableCompression {
|
if !*disableCompression && !c.disableCompression {
|
||||||
req.Header.Set("Accept-Encoding", "gzip")
|
req.Header.Set("Accept-Encoding", "gzip")
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,6 +141,9 @@ scrape_configs:
|
||||||
- x
|
- x
|
||||||
authorization:
|
authorization:
|
||||||
type: foobar
|
type: foobar
|
||||||
|
headers:
|
||||||
|
- 'TenantID: fooBar'
|
||||||
|
- 'X: y:z'
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
- source_labels: [abc]
|
- source_labels: [abc]
|
||||||
static_configs:
|
static_configs:
|
||||||
|
@ -149,6 +152,8 @@ scrape_configs:
|
||||||
relabel_debug: true
|
relabel_debug: true
|
||||||
scrape_align_interval: 1h30m0s
|
scrape_align_interval: 1h30m0s
|
||||||
proxy_bearer_token_file: file.txt
|
proxy_bearer_token_file: file.txt
|
||||||
|
proxy_headers:
|
||||||
|
- 'My-Auth-Header: top-secret'
|
||||||
`)
|
`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -332,7 +337,7 @@ scrape_configs:
|
||||||
jobNameOriginal: "blackbox",
|
jobNameOriginal: "blackbox",
|
||||||
}}
|
}}
|
||||||
if !reflect.DeepEqual(sws, swsExpected) {
|
if !reflect.DeepEqual(sws, swsExpected) {
|
||||||
t.Fatalf("unexpected scrapeWork;\ngot\n%+v\nwant\n%+v", sws, swsExpected)
|
t.Fatalf("unexpected scrapeWork;\ngot\n%#v\nwant\n%#v", sws, swsExpected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1650,12 +1655,25 @@ scrape_configs:
|
||||||
jobNameOriginal: "aaa",
|
jobNameOriginal: "aaa",
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ac, err := promauth.NewConfig(".", nil, nil, "", "", nil, nil, []string{"My-Auth: foo-Bar"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error when creating promauth.Config: %s", err)
|
||||||
|
}
|
||||||
|
proxyAC, err := promauth.NewConfig(".", nil, nil, "", "", nil, nil, []string{"Foo:bar"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error when creating promauth.Config for proxy: %s", err)
|
||||||
|
}
|
||||||
f(`
|
f(`
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: 'snmp'
|
- job_name: 'snmp'
|
||||||
sample_limit: 100
|
sample_limit: 100
|
||||||
disable_keepalive: true
|
disable_keepalive: true
|
||||||
disable_compression: true
|
disable_compression: true
|
||||||
|
headers:
|
||||||
|
- "My-Auth: foo-Bar"
|
||||||
|
proxy_headers:
|
||||||
|
- "Foo: bar"
|
||||||
scrape_align_interval: 1s
|
scrape_align_interval: 1s
|
||||||
scrape_offset: 0.5s
|
scrape_offset: 0.5s
|
||||||
static_configs:
|
static_configs:
|
||||||
|
@ -1727,8 +1745,8 @@ scrape_configs:
|
||||||
Value: "snmp",
|
Value: "snmp",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
AuthConfig: &promauth.Config{},
|
AuthConfig: ac,
|
||||||
ProxyAuthConfig: &promauth.Config{},
|
ProxyAuthConfig: proxyAC,
|
||||||
SampleLimit: 100,
|
SampleLimit: 100,
|
||||||
DisableKeepAlive: true,
|
DisableKeepAlive: true,
|
||||||
DisableCompression: true,
|
DisableCompression: true,
|
||||||
|
|
|
@ -16,7 +16,8 @@ func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected `role`: %q; must be one of `node`, `pod`, `service`, `endpoints`, `endpointslice` or `ingress`", role)
|
return nil, fmt.Errorf("unexpected `role`: %q; must be one of `node`, `pod`, `service`, `endpoints`, `endpointslice` or `ingress`", role)
|
||||||
}
|
}
|
||||||
ac, err := sdc.HTTPClientConfig.NewConfig(baseDir)
|
cc := &sdc.HTTPClientConfig
|
||||||
|
ac, err := cc.NewConfig(baseDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse auth config: %w", err)
|
return nil, fmt.Errorf("cannot parse auth config: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -30,7 +31,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot build kube config from the specified `kubeconfig_file` config option: %w", err)
|
return nil, fmt.Errorf("cannot build kube config from the specified `kubeconfig_file` config option: %w", err)
|
||||||
}
|
}
|
||||||
acNew, err := promauth.NewConfig(".", nil, kc.basicAuth, kc.token, kc.tokenFile, nil, kc.tlsConfig)
|
acNew, err := promauth.NewConfig(".", nil, kc.basicAuth, kc.token, kc.tokenFile, cc.OAuth2, kc.tlsConfig, cc.Headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot initialize auth config from `kubeconfig_file: %q`: %w", sdc.KubeConfigFile, err)
|
return nil, fmt.Errorf("cannot initialize auth config from `kubeconfig_file: %q`: %w", sdc.KubeConfigFile, err)
|
||||||
}
|
}
|
||||||
|
@ -57,7 +58,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu
|
||||||
tlsConfig := promauth.TLSConfig{
|
tlsConfig := promauth.TLSConfig{
|
||||||
CAFile: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
CAFile: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
||||||
}
|
}
|
||||||
acNew, err := promauth.NewConfig(".", nil, nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", nil, &tlsConfig)
|
acNew, err := promauth.NewConfig(".", nil, nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", cc.OAuth2, &tlsConfig, cc.Headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot initialize service account auth: %w; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
|
return nil, fmt.Errorf("cannot initialize service account auth: %w; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,8 +207,8 @@ type groupWatcher struct {
|
||||||
selectors []Selector
|
selectors []Selector
|
||||||
attachNodeMetadata bool
|
attachNodeMetadata bool
|
||||||
|
|
||||||
getAuthHeader func() string
|
setHeaders func(req *http.Request)
|
||||||
client *http.Client
|
client *http.Client
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
m map[string]*urlWatcher
|
m map[string]*urlWatcher
|
||||||
|
@ -235,9 +235,9 @@ func newGroupWatcher(apiServer string, ac *promauth.Config, namespaces []string,
|
||||||
selectors: selectors,
|
selectors: selectors,
|
||||||
attachNodeMetadata: attachNodeMetadata,
|
attachNodeMetadata: attachNodeMetadata,
|
||||||
|
|
||||||
getAuthHeader: ac.GetAuthHeader,
|
setHeaders: func(req *http.Request) { ac.SetHeaders(req, true) },
|
||||||
client: client,
|
client: client,
|
||||||
m: make(map[string]*urlWatcher),
|
m: make(map[string]*urlWatcher),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -407,9 +407,7 @@ func (gw *groupWatcher) doRequest(requestURL string) (*http.Response, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatalf("cannot create a request for %q: %s", requestURL, err)
|
logger.Fatalf("cannot create a request for %q: %s", requestURL, err)
|
||||||
}
|
}
|
||||||
if ah := gw.getAuthHeader(); ah != "" {
|
gw.setHeaders(req)
|
||||||
req.Header.Set("Authorization", ah)
|
|
||||||
}
|
|
||||||
resp, err := gw.client.Do(req)
|
resp, err := gw.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -81,7 +81,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
port: sdc.Port,
|
port: sdc.Port,
|
||||||
}
|
}
|
||||||
if sdc.TLSConfig != nil {
|
if sdc.TLSConfig != nil {
|
||||||
ac, err := promauth.NewConfig(baseDir, nil, nil, "", "", nil, sdc.TLSConfig)
|
ac, err := promauth.NewConfig(baseDir, nil, nil, "", "", nil, sdc.TLSConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,10 +42,10 @@ type Client struct {
|
||||||
|
|
||||||
apiServer string
|
apiServer string
|
||||||
|
|
||||||
hostPort string
|
hostPort string
|
||||||
getAuthHeader func() string
|
setFasthttpHeaders func(req *fasthttp.Request)
|
||||||
getProxyAuthHeader func() string
|
setFasthttpProxyHeaders func(req *fasthttp.Request)
|
||||||
sendFullURL bool
|
sendFullURL bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient returns new Client for the given args.
|
// NewClient returns new Client for the given args.
|
||||||
|
@ -70,7 +70,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
||||||
tlsCfg = ac.NewTLSConfig()
|
tlsCfg = ac.NewTLSConfig()
|
||||||
}
|
}
|
||||||
sendFullURL := !isTLS && proxyURL.IsHTTPOrHTTPS()
|
sendFullURL := !isTLS && proxyURL.IsHTTPOrHTTPS()
|
||||||
getProxyAuthHeader := func() string { return "" }
|
setFasthttpProxyHeaders := func(req *fasthttp.Request) {}
|
||||||
if sendFullURL {
|
if sendFullURL {
|
||||||
// Send full urls in requests to a proxy host for non-TLS apiServer
|
// Send full urls in requests to a proxy host for non-TLS apiServer
|
||||||
// like net/http package from Go does.
|
// like net/http package from Go does.
|
||||||
|
@ -82,8 +82,8 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
||||||
tlsCfg = proxyAC.NewTLSConfig()
|
tlsCfg = proxyAC.NewTLSConfig()
|
||||||
}
|
}
|
||||||
proxyURLOrig := proxyURL
|
proxyURLOrig := proxyURL
|
||||||
getProxyAuthHeader = func() string {
|
setFasthttpProxyHeaders = func(req *fasthttp.Request) {
|
||||||
return proxyURLOrig.GetAuthHeader(proxyAC)
|
proxyURLOrig.SetFasthttpHeaders(proxyAC, req)
|
||||||
}
|
}
|
||||||
proxyURL = &proxy.URL{}
|
proxyURL = &proxy.URL{}
|
||||||
}
|
}
|
||||||
|
@ -123,18 +123,18 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
||||||
MaxConns: 64 * 1024,
|
MaxConns: 64 * 1024,
|
||||||
Dial: dialFunc,
|
Dial: dialFunc,
|
||||||
}
|
}
|
||||||
getAuthHeader := func() string { return "" }
|
setFasthttpHeaders := func(req *fasthttp.Request) {}
|
||||||
if ac != nil {
|
if ac != nil {
|
||||||
getAuthHeader = ac.GetAuthHeader
|
setFasthttpHeaders = func(req *fasthttp.Request) { ac.SetFasthttpHeaders(req, true) }
|
||||||
}
|
}
|
||||||
return &Client{
|
return &Client{
|
||||||
hc: hc,
|
hc: hc,
|
||||||
blockingClient: blockingClient,
|
blockingClient: blockingClient,
|
||||||
apiServer: apiServer,
|
apiServer: apiServer,
|
||||||
hostPort: hostPort,
|
hostPort: hostPort,
|
||||||
getAuthHeader: getAuthHeader,
|
setFasthttpHeaders: setFasthttpHeaders,
|
||||||
getProxyAuthHeader: getProxyAuthHeader,
|
setFasthttpProxyHeaders: setFasthttpProxyHeaders,
|
||||||
sendFullURL: sendFullURL,
|
sendFullURL: sendFullURL,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,12 +202,8 @@ func (c *Client) getAPIResponseWithParamsAndClient(client *fasthttp.HostClient,
|
||||||
}
|
}
|
||||||
req.Header.SetHost(c.hostPort)
|
req.Header.SetHost(c.hostPort)
|
||||||
req.Header.Set("Accept-Encoding", "gzip")
|
req.Header.Set("Accept-Encoding", "gzip")
|
||||||
if ah := c.getAuthHeader(); ah != "" {
|
c.setFasthttpHeaders(&req)
|
||||||
req.Header.Set("Authorization", ah)
|
c.setFasthttpProxyHeaders(&req)
|
||||||
}
|
|
||||||
if ah := c.getProxyAuthHeader(); ah != "" {
|
|
||||||
req.Header.Set("Proxy-Authorization", ah)
|
|
||||||
}
|
|
||||||
if modifyRequest != nil {
|
if modifyRequest != nil {
|
||||||
modifyRequest(&req)
|
modifyRequest(&req)
|
||||||
}
|
}
|
||||||
|
|
|
@ -332,7 +332,10 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}, globalStopCh <-chan struct{})
|
||||||
// Do not send staleness markers on graceful shutdown as Prometheus does.
|
// Do not send staleness markers on graceful shutdown as Prometheus does.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2013#issuecomment-1006994079
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2013#issuecomment-1006994079
|
||||||
default:
|
default:
|
||||||
// Send staleness markers when the given target disappears.
|
// Send staleness markers to all the metrics scraped last time from the target
|
||||||
|
// when the given target disappears as Prometheus does.
|
||||||
|
// Use the current real timestamp for staleness markers, so queries
|
||||||
|
// stop returning data just after the time the target disappears.
|
||||||
sw.sendStaleSeries(lastScrape, "", t, true)
|
sw.sendStaleSeries(lastScrape, "", t, true)
|
||||||
}
|
}
|
||||||
if sw.seriesLimiter != nil {
|
if sw.seriesLimiter != nil {
|
||||||
|
@ -491,7 +494,9 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
||||||
}
|
}
|
||||||
// body must be released only after wc is released, since wc refers to body.
|
// body must be released only after wc is released, since wc refers to body.
|
||||||
if !areIdenticalSeries {
|
if !areIdenticalSeries {
|
||||||
sw.sendStaleSeries(lastScrape, bodyString, scrapeTimestamp, false)
|
// Send stale markers for disappeared metrics with the real scrape timestamp
|
||||||
|
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
|
||||||
|
sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false)
|
||||||
sw.storeLastScrape(body.B)
|
sw.storeLastScrape(body.B)
|
||||||
}
|
}
|
||||||
sw.finalizeLastScrape()
|
sw.finalizeLastScrape()
|
||||||
|
@ -599,7 +604,9 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
||||||
wc.reset()
|
wc.reset()
|
||||||
writeRequestCtxPool.Put(wc)
|
writeRequestCtxPool.Put(wc)
|
||||||
if !areIdenticalSeries {
|
if !areIdenticalSeries {
|
||||||
sw.sendStaleSeries(lastScrape, bodyString, scrapeTimestamp, false)
|
// Send stale markers for disappeared metrics with the real scrape timestamp
|
||||||
|
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
|
||||||
|
sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false)
|
||||||
sw.storeLastScrape(sbr.body)
|
sw.storeLastScrape(sbr.body)
|
||||||
}
|
}
|
||||||
sw.finalizeLastScrape()
|
sw.finalizeLastScrape()
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -60,8 +61,26 @@ func (u *URL) String() string {
|
||||||
return pu.String()
|
return pu.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAuthHeader returns Proxy-Authorization auth header for the given u and ac.
|
// SetHeaders sets headers to req according to u and ac configs.
|
||||||
func (u *URL) GetAuthHeader(ac *promauth.Config) string {
|
func (u *URL) SetHeaders(ac *promauth.Config, req *http.Request) {
|
||||||
|
ah := u.getAuthHeader(ac)
|
||||||
|
if ah != "" {
|
||||||
|
req.Header.Set("Proxy-Authorization", ah)
|
||||||
|
}
|
||||||
|
ac.SetHeaders(req, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFasthttpHeaders sets headers to req according to u and ac configs.
|
||||||
|
func (u *URL) SetFasthttpHeaders(ac *promauth.Config, req *fasthttp.Request) {
|
||||||
|
ah := u.getAuthHeader(ac)
|
||||||
|
if ah != "" {
|
||||||
|
req.Header.Set("Proxy-Authorization", ah)
|
||||||
|
}
|
||||||
|
ac.SetFasthttpHeaders(req, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAuthHeader returns Proxy-Authorization auth header for the given u and ac.
|
||||||
|
func (u *URL) getAuthHeader(ac *promauth.Config) string {
|
||||||
authHeader := ""
|
authHeader := ""
|
||||||
if ac != nil {
|
if ac != nil {
|
||||||
authHeader = ac.GetAuthHeader()
|
authHeader = ac.GetAuthHeader()
|
||||||
|
@ -130,9 +149,10 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) {
|
||||||
if isTLS {
|
if isTLS {
|
||||||
proxyConn = tls.Client(proxyConn, tlsCfg)
|
proxyConn = tls.Client(proxyConn, tlsCfg)
|
||||||
}
|
}
|
||||||
authHeader := u.GetAuthHeader(ac)
|
authHeader := u.getAuthHeader(ac)
|
||||||
if authHeader != "" {
|
if authHeader != "" {
|
||||||
authHeader = "Proxy-Authorization: " + authHeader + "\r\n"
|
authHeader = "Proxy-Authorization: " + authHeader + "\r\n"
|
||||||
|
authHeader += ac.HeadersNoAuthString()
|
||||||
}
|
}
|
||||||
conn, err := sendConnectRequest(proxyConn, proxyAddr, addr, authHeader)
|
conn, err := sendConnectRequest(proxyConn, proxyAddr, addr, authHeader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in a new issue