mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-10 15:14:09 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
628e87e727
51 changed files with 491 additions and 1470 deletions
|
@ -6,7 +6,6 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -308,7 +307,7 @@ func readIn(readFor string, t *testing.T, insertTime time.Time) []test {
|
|||
if filepath.Ext(path) != ".json" {
|
||||
return nil
|
||||
}
|
||||
b, err := ioutil.ReadFile(path)
|
||||
b, err := os.ReadFile(path)
|
||||
s.noError(err)
|
||||
item := test{}
|
||||
s.noError(json.Unmarshal(b, &item))
|
||||
|
|
|
@ -178,7 +178,7 @@ See [the list of supported service discovery types for Prometheus scrape targets
|
|||
|
||||
## scrape_config enhancements
|
||||
|
||||
`vmagent` supports the following additional options in `scrape_configs` section:
|
||||
`vmagent` supports the following additional options in [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) section:
|
||||
|
||||
* `headers` - a list of HTTP headers to send to scrape target with each scrape request. This can be used when the scrape target needs custom authorization and authentication. For example:
|
||||
|
||||
|
@ -199,9 +199,12 @@ scrape_configs:
|
|||
* `relabel_debug: true` for enabling debug logging during relabeling of the discovered targets. See [these docs](#relabeling).
|
||||
* `metric_relabel_debug: true` for enabling debug logging during relabeling of the scraped metrics. See [these docs](#relabeling).
|
||||
|
||||
See [scrape_configs docs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) for more details on all the supported options.
|
||||
|
||||
|
||||
## Loading scrape configs from multiple files
|
||||
|
||||
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||
`vmagent` supports loading [scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||
|
||||
```yml
|
||||
scrape_config_files:
|
||||
|
@ -210,7 +213,7 @@ scrape_config_files:
|
|||
- https://config-server/scrape_config.yml
|
||||
```
|
||||
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
|
||||
```yml
|
||||
- job_name: foo
|
||||
|
@ -338,85 +341,94 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
|
||||
## Relabeling enhancements
|
||||
|
||||
VictoriaMetrics provides the following additional relabeling actions on top of standard actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
* The `replacement` option can refer arbitrary labels via {% raw %}`{{label_name}}`{% endraw %} placeholders. Such placeholders are substituted with the corresponding label value. For example, the following relabeling rule sets `instance-job` label value to `host123-foo` when applied to the metric with `{instance="host123",job="foo"}` labels:
|
||||
|
||||
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
|
||||
{% raw %}
|
||||
```yaml
|
||||
- target_label: "instance-job"
|
||||
replacement: "{{instance}}-{{job}}"
|
||||
```
|
||||
{% endraw %}
|
||||
|
||||
* An optional `if` filter can be used for conditional relabeling. The `if` filter may contain arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). For example, the following relabeling rule drops metrics, which don't match `foo{bar="baz"}` series selector, while leaving the rest of metrics:
|
||||
|
||||
```yaml
|
||||
- action: replace_all
|
||||
source_labels: ["__name__"]
|
||||
target_label: "__name__"
|
||||
regex: "-"
|
||||
replacement: "_"
|
||||
- action: keep
|
||||
if: 'foo{bar="baz"}'
|
||||
```
|
||||
|
||||
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurrences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
|
||||
This is equivalent to less clear Prometheus-compatible relabeling rule:
|
||||
|
||||
```yaml
|
||||
- action: labelmap_all
|
||||
regex: "-"
|
||||
replacement: "_"
|
||||
- action: keep
|
||||
source_labels: [__name__, bar]
|
||||
regex: 'foo;baz'
|
||||
```
|
||||
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal, while dropping all the other entries. For example, the following relabeling config keeps targets if they contain equal values for `instance` and `host` labels, while dropping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: keep_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal, while keeping all the other entries. For example, the following relabeling config drops targets if they contain equal values for `instance` and `host` labels, while keeping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: drop_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`, while dropping all the other metrics. For example, the following relabeling config keeps metrics with `fo` and `bar` names, while dropping all the other metrics:
|
||||
* The `regex` value can be split into multiple lines for improved readability and maintainability. These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "foo|bar"
|
||||
regex: "metric_a|metric_b|foo_.+"
|
||||
```
|
||||
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`, while keeping all the other metrics. For example, the following relabeling config drops metrics with `foo` and `bar` names, while leaving all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: drop_metrics
|
||||
regex: "foo|bar"
|
||||
- action: keep_metrics
|
||||
regex:
|
||||
- "metric_a"
|
||||
- "metric_b"
|
||||
- "foo_.+"
|
||||
```
|
||||
|
||||
* `graphite`: applies Graphite-style relabeling to metric name. See [these docs](#graphite-relabeling) for details.
|
||||
* VictoriaMetrics provides the following additional relabeling actions on top of standard actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
|
||||
The `regex` value can be split into multiple lines for improved readability and maintainability. These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "metric_a|metric_b|foo_.+"
|
||||
```
|
||||
```yaml
|
||||
- action: replace_all
|
||||
source_labels: ["__name__"]
|
||||
target_label: "__name__"
|
||||
regex: "-"
|
||||
replacement: "_"
|
||||
```
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex:
|
||||
- "metric_a"
|
||||
- "metric_b"
|
||||
- "foo_.+"
|
||||
```
|
||||
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurrences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
|
||||
|
||||
VictoriaMetrics components support an optional `if` filter in relabeling configs, which can be used for conditional relabeling. The `if` filter may contain arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). For example, the following relabeling rule drops metrics, which don't match `foo{bar="baz"}` series selector, while leaving the rest of metrics:
|
||||
```yaml
|
||||
- action: labelmap_all
|
||||
regex: "-"
|
||||
replacement: "_"
|
||||
```
|
||||
|
||||
```yaml
|
||||
- action: keep
|
||||
if: 'foo{bar="baz"}'
|
||||
```
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal, while dropping all the other entries. For example, the following relabeling config keeps targets if they contain equal values for `instance` and `host` labels, while dropping all the other targets:
|
||||
|
||||
This is equivalent to less clear Prometheus-compatible relabeling rule:
|
||||
```yaml
|
||||
- action: keep_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
```yaml
|
||||
- action: keep
|
||||
source_labels: [__name__, bar]
|
||||
regex: 'foo;baz'
|
||||
```
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal, while keeping all the other entries. For example, the following relabeling config drops targets if they contain equal values for `instance` and `host` labels, while keeping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: drop_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`, while dropping all the other metrics. For example, the following relabeling config keeps metrics with `fo` and `bar` names, while dropping all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "foo|bar"
|
||||
```
|
||||
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`, while keeping all the other metrics. For example, the following relabeling config drops metrics with `foo` and `bar` names, while leaving all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: drop_metrics
|
||||
regex: "foo|bar"
|
||||
```
|
||||
|
||||
* `graphite`: applies Graphite-style relabeling to metric name. See [these docs](#graphite-relabeling) for details.
|
||||
|
||||
## Graphite relabeling
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ package remotewrite
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -351,7 +351,7 @@ again:
|
|||
}
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="%d"}`, c.sanitizedURL, statusCode)).Inc()
|
||||
if statusCode == 409 || statusCode == 400 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
remoteWriteRejectedLogger.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; "+
|
||||
|
@ -375,7 +375,7 @@ again:
|
|||
if retryDuration > time.Minute {
|
||||
retryDuration = time.Minute
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read response body from %q during retry #%d: %s", c.sanitizedURL, retriesCount, err)
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"crypto/md5"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -214,7 +214,7 @@ func Parse(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressio
|
|||
}
|
||||
|
||||
func parseFile(path string) ([]Group, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading alert rule file: %w", err)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package datasource
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -156,7 +156,7 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
|
|||
return nil, fmt.Errorf("error getting response from %s: %w", req.URL.Redacted(), err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
return nil, fmt.Errorf("unexpected response code %d for %s. Response body %s", resp.StatusCode, req.URL.Redacted(), body)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
@ -87,7 +86,7 @@ groups:
|
|||
`
|
||||
)
|
||||
|
||||
f, err := ioutil.TempFile("", "")
|
||||
f, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -154,7 +153,7 @@ groups:
|
|||
|
||||
func writeToFile(t *testing.T, file, b string) {
|
||||
t.Helper()
|
||||
err := ioutil.WriteFile(file, []byte(b), 0644)
|
||||
err := os.WriteFile(file, []byte(b), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -89,7 +89,7 @@ func (am *AlertManager) send(ctx context.Context, alerts []Alert) error {
|
|||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response from %q: %w", am.addr, err)
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"crypto/md5"
|
||||
"fmt"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -104,7 +104,7 @@ func (cfg *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
|
||||
func parseConfig(path string) (*Config, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading config file: %w", err)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package notifier
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -12,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
func TestConfigWatcherReload(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "")
|
||||
f, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -34,7 +33,7 @@ static_configs:
|
|||
t.Fatalf("expected to have 2 notifiers; got %d %#v", len(ns), ns)
|
||||
}
|
||||
|
||||
f2, err := ioutil.TempFile("", "")
|
||||
f2, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -61,7 +60,7 @@ func TestConfigWatcherStart(t *testing.T) {
|
|||
consulSDServer := newFakeConsulServer()
|
||||
defer consulSDServer.Close()
|
||||
|
||||
consulSDFile, err := ioutil.TempFile("", "")
|
||||
consulSDFile, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -107,7 +106,7 @@ func TestConfigWatcherReloadConcurrent(t *testing.T) {
|
|||
consulSDServer2 := newFakeConsulServer()
|
||||
defer consulSDServer2.Close()
|
||||
|
||||
consulSDFile, err := ioutil.TempFile("", "")
|
||||
consulSDFile, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -123,7 +122,7 @@ consul_sd_configs:
|
|||
- consul
|
||||
`, consulSDServer1.URL, consulSDServer2.URL))
|
||||
|
||||
staticAndConsulSDFile, err := ioutil.TempFile("", "")
|
||||
staticAndConsulSDFile, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -175,7 +174,7 @@ consul_sd_configs:
|
|||
|
||||
func writeToFile(t *testing.T, file, b string) {
|
||||
t.Helper()
|
||||
checkErr(t, ioutil.WriteFile(file, []byte(b), 0644))
|
||||
checkErr(t, os.WriteFile(file, []byte(b), 0644))
|
||||
}
|
||||
|
||||
func checkErr(t *testing.T, err error) {
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
@ -257,7 +257,7 @@ func (c *Client) send(ctx context.Context, data []byte) error {
|
|||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("unexpected response code %d for %s. Response body %q",
|
||||
resp.StatusCode, req.URL.Redacted(), body)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package remotewrite
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -96,7 +96,7 @@ func (rw *rwServer) handler(w http.ResponseWriter, r *http.Request) {
|
|||
rw.err(w, fmt.Errorf("header read error: X-Prometheus-Remote-Write-Version is not 0.1.0 (%q)", h))
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
data, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
rw.err(w, fmt.Errorf("body read err: %w", err))
|
||||
return
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
htmlTpl "html/template"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/url"
|
||||
|
@ -71,7 +71,7 @@ func Load(pathPatterns []string, overwrite bool) error {
|
|||
}
|
||||
}
|
||||
if len(tmpl.Templates()) > 0 {
|
||||
err := tmpl.Execute(ioutil.Discard, nil)
|
||||
err := tmpl.Execute(io.Discard, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute template: %w", err)
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -38,7 +38,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify
|
|||
|
||||
var rootCAs *x509.CertPool
|
||||
if CAFile != "" {
|
||||
pem, err := ioutil.ReadFile(CAFile)
|
||||
pem, err := os.ReadFile(CAFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", CAFile, err)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package opentsdb
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
@ -115,7 +115,7 @@ func (c Client) FindMetrics(q string) ([]string, error) {
|
|||
return nil, fmt.Errorf("Bad return from OpenTSDB: %q: %v", resp.StatusCode, resp)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve metric data from %q: %s", q, err)
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func (c Client) FindSeries(metric string) ([]Meta, error) {
|
|||
return nil, fmt.Errorf("Bad return from OpenTSDB: %q: %v", resp.StatusCode, resp)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve series data from %q: %s", q, err)
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64, m
|
|||
return Metric{}, nil
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Println("couldn't read response body from OpenTSDB query...skipping")
|
||||
return Metric{}, nil
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
@ -394,7 +393,7 @@ func do(req *http.Request) error {
|
|||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
|
@ -148,7 +147,7 @@ func (c *vmNativeClient) do(req *http.Request, expSC int) (*http.Response, error
|
|||
}
|
||||
|
||||
if resp.StatusCode != expSC {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package netstorage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
|
@ -109,7 +108,7 @@ func (tbf *tmpBlocksFile) WriteBlockRefData(b []byte) (tmpBlockAddr, error) {
|
|||
|
||||
// Slow path: flush the data from tbf.buf to file.
|
||||
if tbf.f == nil {
|
||||
f, err := ioutil.TempFile(tmpBlocksDir, "")
|
||||
f, err := os.CreateTemp(tmpBlocksDir, "")
|
||||
if err != nil {
|
||||
return addr, err
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -413,7 +413,7 @@ func mustLoadRollupResultCacheKeyPrefix(path string) {
|
|||
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot load %s: %s; reset rollupResult cache", path, err)
|
||||
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||
|
|
|
@ -24,6 +24,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
* FEATURE: return shorter error messages to Grafana and to other clients requesting [/api/v1/query](https://docs.victoriametrics.com/keyConcepts.html#instant-query) and [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) endpoints. This should simplify reading these errors by humans. The long error message with full context is still written to logs.
|
||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): reduce the amounts of logging at `vmstorage` when `vmselect` connects/disconnects to `vmstorage`.
|
||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve performance for heavy queries on systems with many CPU cores.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add ability to use {% raw %}`{{label_name}}`{% endraw %} placeholders in the `replacement` option of relabeling rules. This simplifies constructing label values from multiple existing label values. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling-enhancements) for details.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): generate additional per-target metrics - `scrape_series_limit`, `scrape_series_current` and `scrape_series_limit_samples_dropped` if series limit is set according to [these docs](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). This simplifies alerting on targets with the exceeded series limit. See [these docs](https://docs.victoriametrics.com/vmagent.html#automatically-generated-metrics) for details on these metrics.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for MX record types in [dns_sd_configs](https://docs.victoriametrics.com/sd_configs.html#dns_sd_configs) in the same way as Prometheus 2.38 [does](https://github.com/prometheus/prometheus/pull/10099).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `__meta_kubernetes_service_port_number` meta-label for `role: service` in [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs) in the same way as Prometheus 2.38 [does](https://github.com/prometheus/prometheus/pull/11002).
|
||||
|
@ -36,6 +37,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
|
||||
* BUGFIX: prevent from excess CPU usage when the storage enters [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
* BUGFIX: improve performance for requests to [/api/v1/labels](https://docs.victoriametrics.com/url-examples.html#apiv1labels) and [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples.html#apiv1labelvalues) when the filter in the `match[]` query arg matches small number of time series. The performance for this case has been reduced in [v1.78.0](https://docs.victoriametrics.com/CHANGELOG.html#v1780). See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1533) issues.
|
||||
* BUGFIX: increase the default limit on the number of concurrent merges for small parts from 8 to 16. This should help resolving potential issues with heavy data ingestion. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2673#issuecomment-1218185978) from @lukepalmer .
|
||||
|
||||
## [v1.80.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.80.0)
|
||||
|
||||
|
|
|
@ -300,8 +300,47 @@ All the node types - `vminsert`, `vmselect` and `vmstorage` - may be updated via
|
|||
Send `SIGINT` signal to the corresponding process, wait until it finishes and then start new version
|
||||
with new configs.
|
||||
|
||||
Cluster should remain in working state if at least a single node of each type remains available during
|
||||
the update process. See [cluster availability](#cluster-availability) section for details.
|
||||
There are the following cluster update / upgrade approaches exist:
|
||||
|
||||
* `No downtime` strategy. Gracefully restart every node in the cluster one-by-one with the updated config / upgraded binary.
|
||||
|
||||
It is recommended restarting the nodes in the following order:
|
||||
|
||||
1. Restart `vmstorage` nodes.
|
||||
2. Restart `vminsert` nodes.
|
||||
3. Restart `vmselect` nodes.
|
||||
|
||||
This strategy allows upgrading the cluster without downtime if the following conditions are met:
|
||||
|
||||
- The cluster has at least a pair of nodes of each type - `vminsert`, `vmselect` and `vmstorage`,
|
||||
so it can continue accept new data and serve incoming requests when a single node is temporary unavailable
|
||||
during its restart. See [cluster availability docs](#cluster-availability) for details.
|
||||
- The cluster has enough compute resources (CPU, RAM, network bandwidth, disk IO) for processing
|
||||
the current workload when a single node of any type (`vminsert`, `vmselect` or `vmstorage`)
|
||||
is temporarily unavailable during its restart.
|
||||
- The updated config / upgraded binary is compatible with the remaining components in the cluster.
|
||||
See the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) for compatibility notes between different releases.
|
||||
|
||||
If at least a single condition isn't met, then the rolling restart may result in cluster unavailability
|
||||
during the config update / version upgrade. In this case the following strategy is recommended.
|
||||
|
||||
* `Minimum downtime` strategy:
|
||||
|
||||
1. Gracefully stop all the `vminsert` and `vmselect` nodes in parallel.
|
||||
2. Gracefully restart all the `vmstorage` nodes in parallel.
|
||||
3. Start all the `vminsert` and `vmselect` nodes in parallel.
|
||||
|
||||
The cluster is unavailable for data ingestion and querying when performing the steps above.
|
||||
The downtime is minimized by restarting cluster nodes in parallel at every step above.
|
||||
The `minimum downtime` strategy has the following benefits comparing to `no downtime` startegy:
|
||||
|
||||
- It allows performing config update / version upgrade with minimum disruption
|
||||
when the previous config / version is incompatible with the new config / version.
|
||||
- It allows perorming config update / version upgrade with minimum disruption
|
||||
when the cluster has no enough compute resources (CPU, RAM, disk IO, network bandwidth)
|
||||
for rolling upgrade.
|
||||
- It allows minimizing the duration of config update / version ugprade for clusters with big number of nodes
|
||||
of for clusters with big `vmstorage` nodes, which may take long time for graceful restart.
|
||||
|
||||
## Cluster availability
|
||||
|
||||
|
@ -391,8 +430,8 @@ By default cluster components of VictoriaMetrics are tuned for an optimal resour
|
|||
- `-search.maxSamplesPerSeries` at `vmselect` limits the number of raw samples the query can process per each time series. `vmselect` sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage at `vmselect` in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-search.maxSamplesPerQuery` at `vmselect` limits the number of raw samples a single query can process. This allows limiting CPU usage at `vmselect` for heavy queries.
|
||||
- `-search.maxSeries` at `vmselect` limits the number of time series, which may be returned from [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers). This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
- `-search.maxTagKeys` at `vmselect` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagValues` at `vmselect` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagKeys` at `vmstorage` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagValues` at `vmstorage` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory at `vmstorage` and `vmselect` when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
- `-storage.maxDailySeries` at `vmstorage` can be used for limiting the number of time series seen per day.
|
||||
- `-storage.maxHourlySeries` at `vmstorage` can be used for limiting the number of [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series).
|
||||
|
||||
|
|
|
@ -335,10 +335,4 @@ The query engine may behave differently for some functions. Please see [this art
|
|||
|
||||
Single-node VictoriaMetrics cannot be restarted / upgraded or downgraded without downtime, since it needs to be gracefully shut down and then started again. See [how to upgrade VictoriaMetrics](https://docs.victoriametrics.com/#how-to-upgrade-victoriametrics).
|
||||
|
||||
[Cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) can be restarted / upgraded / downgraded without downtime if the following conditions are met:
|
||||
|
||||
* If every component of the cluster - `vminsert`, `vmselect` and `vmstorage` - has at least 2 instances.
|
||||
* If the cluster has enough compute resources (CPU, RAM, disk IO, network bandwidth) to perform rolling restart of all its components.
|
||||
* If the version used for upgrade / downgrade is compatible with the currently running version. The [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) contains compatibility notes for the published releases.
|
||||
|
||||
See [updating / reconfiguring cluster nodes](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#updating--reconfiguring-cluster-nodes) for details on cluster upgrade.
|
||||
[Cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) can be restarted / upgraded / downgraded without downtime according to [these instructions](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#updating--reconfiguring-cluster-nodes).
|
||||
|
|
128
docs/vmagent.md
128
docs/vmagent.md
|
@ -182,7 +182,7 @@ See [the list of supported service discovery types for Prometheus scrape targets
|
|||
|
||||
## scrape_config enhancements
|
||||
|
||||
`vmagent` supports the following additional options in `scrape_configs` section:
|
||||
`vmagent` supports the following additional options in [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) section:
|
||||
|
||||
* `headers` - a list of HTTP headers to send to scrape target with each scrape request. This can be used when the scrape target needs custom authorization and authentication. For example:
|
||||
|
||||
|
@ -203,9 +203,12 @@ scrape_configs:
|
|||
* `relabel_debug: true` for enabling debug logging during relabeling of the discovered targets. See [these docs](#relabeling).
|
||||
* `metric_relabel_debug: true` for enabling debug logging during relabeling of the scraped metrics. See [these docs](#relabeling).
|
||||
|
||||
See [scrape_configs docs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) for more details on all the supported options.
|
||||
|
||||
|
||||
## Loading scrape configs from multiple files
|
||||
|
||||
`vmagent` supports loading scrape configs from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||
`vmagent` supports loading [scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs) from multiple files specified in the `scrape_config_files` section of `-promscrape.config` file. For example, the following `-promscrape.config` instructs `vmagent` loading scrape configs from all the `*.yml` files under `configs` directory, from `single_scrape_config.yml` local file and from `https://config-server/scrape_config.yml` url:
|
||||
|
||||
```yml
|
||||
scrape_config_files:
|
||||
|
@ -214,7 +217,7 @@ scrape_config_files:
|
|||
- https://config-server/scrape_config.yml
|
||||
```
|
||||
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](#how-to-collect-metrics-in-prometheus-format). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
Every referred file can contain arbitrary number of [supported scrape configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs). There is no need in specifying top-level `scrape_configs` section in these files. For example:
|
||||
|
||||
```yml
|
||||
- job_name: foo
|
||||
|
@ -342,85 +345,94 @@ The following articles contain useful information about Prometheus relabeling:
|
|||
|
||||
## Relabeling enhancements
|
||||
|
||||
VictoriaMetrics provides the following additional relabeling actions on top of standard actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
* The `replacement` option can refer arbitrary labels via {% raw %}`{{label_name}}`{% endraw %} placeholders. Such placeholders are substituted with the corresponding label value. For example, the following relabeling rule sets `instance-job` label value to `host123-foo` when applied to the metric with `{instance="host123",job="foo"}` labels:
|
||||
|
||||
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
|
||||
{% raw %}
|
||||
```yaml
|
||||
- target_label: "instance-job"
|
||||
replacement: "{{instance}}-{{job}}"
|
||||
```
|
||||
{% endraw %}
|
||||
|
||||
* An optional `if` filter can be used for conditional relabeling. The `if` filter may contain arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). For example, the following relabeling rule drops metrics, which don't match `foo{bar="baz"}` series selector, while leaving the rest of metrics:
|
||||
|
||||
```yaml
|
||||
- action: replace_all
|
||||
source_labels: ["__name__"]
|
||||
target_label: "__name__"
|
||||
regex: "-"
|
||||
replacement: "_"
|
||||
- action: keep
|
||||
if: 'foo{bar="baz"}'
|
||||
```
|
||||
|
||||
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurrences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
|
||||
This is equivalent to less clear Prometheus-compatible relabeling rule:
|
||||
|
||||
```yaml
|
||||
- action: labelmap_all
|
||||
regex: "-"
|
||||
replacement: "_"
|
||||
- action: keep
|
||||
source_labels: [__name__, bar]
|
||||
regex: 'foo;baz'
|
||||
```
|
||||
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal, while dropping all the other entries. For example, the following relabeling config keeps targets if they contain equal values for `instance` and `host` labels, while dropping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: keep_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal, while keeping all the other entries. For example, the following relabeling config drops targets if they contain equal values for `instance` and `host` labels, while keeping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: drop_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`, while dropping all the other metrics. For example, the following relabeling config keeps metrics with `fo` and `bar` names, while dropping all the other metrics:
|
||||
* The `regex` value can be split into multiple lines for improved readability and maintainability. These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "foo|bar"
|
||||
regex: "metric_a|metric_b|foo_.+"
|
||||
```
|
||||
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`, while keeping all the other metrics. For example, the following relabeling config drops metrics with `foo` and `bar` names, while leaving all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: drop_metrics
|
||||
regex: "foo|bar"
|
||||
- action: keep_metrics
|
||||
regex:
|
||||
- "metric_a"
|
||||
- "metric_b"
|
||||
- "foo_.+"
|
||||
```
|
||||
|
||||
* `graphite`: applies Graphite-style relabeling to metric name. See [these docs](#graphite-relabeling) for details.
|
||||
* VictoriaMetrics provides the following additional relabeling actions on top of standard actions from the [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config):
|
||||
|
||||
The `regex` value can be split into multiple lines for improved readability and maintainability. These lines are automatically joined with `|` char when parsed. For example, the following configs are equivalent:
|
||||
* `replace_all` replaces all of the occurrences of `regex` in the values of `source_labels` with the `replacement` and stores the results in the `target_label`. For example, the following relabeling config replaces all the occurrences of `-` char in metric names with `_` char (e.g. `foo-bar-baz` metric name is transformed into `foo_bar_baz`):
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "metric_a|metric_b|foo_.+"
|
||||
```
|
||||
```yaml
|
||||
- action: replace_all
|
||||
source_labels: ["__name__"]
|
||||
target_label: "__name__"
|
||||
regex: "-"
|
||||
replacement: "_"
|
||||
```
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex:
|
||||
- "metric_a"
|
||||
- "metric_b"
|
||||
- "foo_.+"
|
||||
```
|
||||
* `labelmap_all` replaces all of the occurrences of `regex` in all the label names with the `replacement`. For example, the following relabeling config replaces all the occurrences of `-` char in all the label names with `_` char (e.g. `foo-bar-baz` label name is transformed into `foo_bar_baz`):
|
||||
|
||||
VictoriaMetrics components support an optional `if` filter in relabeling configs, which can be used for conditional relabeling. The `if` filter may contain arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). For example, the following relabeling rule drops metrics, which don't match `foo{bar="baz"}` series selector, while leaving the rest of metrics:
|
||||
```yaml
|
||||
- action: labelmap_all
|
||||
regex: "-"
|
||||
replacement: "_"
|
||||
```
|
||||
|
||||
```yaml
|
||||
- action: keep
|
||||
if: 'foo{bar="baz"}'
|
||||
```
|
||||
* `keep_if_equal`: keeps the entry if all the label values from `source_labels` are equal, while dropping all the other entries. For example, the following relabeling config keeps targets if they contain equal values for `instance` and `host` labels, while dropping all the other targets:
|
||||
|
||||
This is equivalent to less clear Prometheus-compatible relabeling rule:
|
||||
```yaml
|
||||
- action: keep_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
```yaml
|
||||
- action: keep
|
||||
source_labels: [__name__, bar]
|
||||
regex: 'foo;baz'
|
||||
```
|
||||
* `drop_if_equal`: drops the entry if all the label values from `source_labels` are equal, while keeping all the other entries. For example, the following relabeling config drops targets if they contain equal values for `instance` and `host` labels, while keeping all the other targets:
|
||||
|
||||
```yaml
|
||||
- action: drop_if_equal
|
||||
source_labels: ["instance", "host"]
|
||||
```
|
||||
|
||||
* `keep_metrics`: keeps all the metrics with names matching the given `regex`, while dropping all the other metrics. For example, the following relabeling config keeps metrics with `fo` and `bar` names, while dropping all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: keep_metrics
|
||||
regex: "foo|bar"
|
||||
```
|
||||
|
||||
* `drop_metrics`: drops all the metrics with names matching the given `regex`, while keeping all the other metrics. For example, the following relabeling config drops metrics with `foo` and `bar` names, while leaving all the other metrics:
|
||||
|
||||
```yaml
|
||||
- action: drop_metrics
|
||||
regex: "foo|bar"
|
||||
```
|
||||
|
||||
* `graphite`: applies Graphite-style relabeling to metric name. See [these docs](#graphite-relabeling) for details.
|
||||
|
||||
## Graphite relabeling
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -130,7 +130,7 @@ func (cfg *Config) SignRequest(req *http.Request, payloadHash string) error {
|
|||
}
|
||||
|
||||
func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
|
||||
|
@ -196,7 +196,7 @@ func (cfg *Config) getAPICredentials() (*credentials, error) {
|
|||
SecretAccessKey: cfg.defaultSecretKey,
|
||||
}
|
||||
if len(cfg.webTokenPath) > 0 {
|
||||
token, err := ioutil.ReadFile(cfg.webTokenPath)
|
||||
token, err := os.ReadFile(cfg.webTokenPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read webToken from path: %q, err: %w", cfg.webTokenPath, err)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package fsremote
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -218,7 +217,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
|||
if err := fs.mkdirAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||
if err := os.WriteFile(path, data, 0600); err != nil {
|
||||
return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -2,7 +2,6 @@ package cgroup
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -80,7 +79,7 @@ func getCPUStat(statName string) (int64, error) {
|
|||
|
||||
func getOnlineCPUCount() float64 {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685#issuecomment-674423728
|
||||
data, err := ioutil.ReadFile("/sys/devices/system/cpu/online")
|
||||
data, err := os.ReadFile("/sys/devices/system/cpu/online")
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package cgroup
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -23,11 +23,11 @@ func getStatGeneric(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string) (i
|
|||
|
||||
func getFileContents(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string) (string, error) {
|
||||
filepath := path.Join(sysfsPrefix, statName)
|
||||
data, err := ioutil.ReadFile(filepath)
|
||||
data, err := os.ReadFile(filepath)
|
||||
if err == nil {
|
||||
return string(data), nil
|
||||
}
|
||||
cgroupData, err := ioutil.ReadFile(cgroupPath)
|
||||
cgroupData, err := os.ReadFile(cgroupPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func getFileContents(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string) (
|
|||
return "", fmt.Errorf("cannot find cgroup path for %q in %q: %w", cgroupGrepLine, cgroupPath, err)
|
||||
}
|
||||
filepath = path.Join(sysfsPrefix, subPath, statName)
|
||||
data, err = ioutil.ReadFile(filepath)
|
||||
data, err = os.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
15
lib/fs/fs.go
15
lib/fs/fs.go
|
@ -3,7 +3,6 @@ package fs
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -269,20 +268,20 @@ func SymlinkRelative(srcPath, dstPath string) error {
|
|||
|
||||
// CopyDirectory copies all the files in srcPath to dstPath.
|
||||
func CopyDirectory(srcPath, dstPath string) error {
|
||||
fis, err := ioutil.ReadDir(srcPath)
|
||||
des, err := os.ReadDir(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := MkdirAllIfNotExist(dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
if !fi.Mode().IsRegular() {
|
||||
for _, de := range des {
|
||||
if !de.Type().IsRegular() {
|
||||
// Skip non-files
|
||||
continue
|
||||
}
|
||||
src := filepath.Join(srcPath, fi.Name())
|
||||
dst := filepath.Join(dstPath, fi.Name())
|
||||
src := filepath.Join(srcPath, de.Name())
|
||||
dst := filepath.Join(dstPath, de.Name())
|
||||
if err := copyFile(src, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -383,14 +382,14 @@ func ReadFileOrHTTP(path string) ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch %q: %w", path, err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read %q: %s", path, err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read %q: %w", path, err)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package fs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -18,7 +18,7 @@ func testReaderAt(t *testing.T, bufSize int) {
|
|||
path := "TestReaderAt"
|
||||
const fileSize = 8 * 1024 * 1024
|
||||
data := make([]byte, fileSize)
|
||||
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||
if err := os.WriteFile(path, data, 0600); err != nil {
|
||||
t.Fatalf("cannot create %q: %s", path, err)
|
||||
}
|
||||
defer MustRemoveAll(path)
|
||||
|
|
|
@ -2,7 +2,7 @@ package fs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -25,7 +25,7 @@ func benchmarkReaderAtMustReadAt(b *testing.B, isMmap bool) {
|
|||
path := "BenchmarkReaderAtMustReadAt"
|
||||
const fileSize = 8 * 1024 * 1024
|
||||
data := make([]byte, fileSize)
|
||||
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||
if err := os.WriteFile(path, data, 0600); err != nil {
|
||||
b.Fatalf("cannot create %q: %s", path, err)
|
||||
}
|
||||
defer MustRemoveAll(path)
|
||||
|
|
|
@ -3,7 +3,6 @@ package mergeset
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
|
@ -83,7 +82,7 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) {
|
|||
func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, error) {
|
||||
// It is ok to read all the metaindex in memory,
|
||||
// since it is quite small.
|
||||
compressedData, err := ioutil.ReadAll(r)
|
||||
compressedData, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot read metaindex data: %w", err)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -124,7 +124,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
|
|||
|
||||
// Read other ph fields from metadata.
|
||||
metadataPath := partPath + "/metadata.json"
|
||||
metadata, err := ioutil.ReadFile(metadataPath)
|
||||
metadata, err := os.ReadFile(metadataPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %q: %w", metadataPath, err)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package mergeset
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -1259,7 +1258,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
|||
txnLock.RLock()
|
||||
defer txnLock.RUnlock()
|
||||
|
||||
data, err := ioutil.ReadFile(txnPath)
|
||||
data, err := os.ReadFile(txnPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read transaction file: %w", err)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
@ -221,14 +220,14 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
|
|||
}
|
||||
|
||||
// Locate reader and writer chunks in the path.
|
||||
fis, err := ioutil.ReadDir(path)
|
||||
des, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read contents of the directory %q: %w", path, err)
|
||||
}
|
||||
for _, fi := range fis {
|
||||
fname := fi.Name()
|
||||
for _, de := range des {
|
||||
fname := de.Name()
|
||||
filepath := path + "/" + fname
|
||||
if fi.IsDir() {
|
||||
if de.IsDir() {
|
||||
logger.Errorf("skipping unknown directory %q", filepath)
|
||||
continue
|
||||
}
|
||||
|
@ -671,7 +670,7 @@ func (mi *metainfo) WriteToFile(path string) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %w", mi, err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||
if err := os.WriteFile(path, data, 0600); err != nil {
|
||||
return fmt.Errorf("cannot write persistent queue metainfo to %q: %w", path, err)
|
||||
}
|
||||
fs.MustSyncPath(path)
|
||||
|
@ -680,7 +679,7 @@ func (mi *metainfo) WriteToFile(path string) error {
|
|||
|
||||
func (mi *metainfo) ReadFromFile(path string) error {
|
||||
mi.Reset()
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return err
|
||||
|
|
|
@ -2,7 +2,6 @@ package persistentqueue
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
@ -371,7 +370,7 @@ func TestQueueLimitedSize(t *testing.T) {
|
|||
}
|
||||
|
||||
func mustCreateFile(path, contents string) {
|
||||
if err := ioutil.WriteFile(path, []byte(contents), 0600); err != nil {
|
||||
if err := os.WriteFile(path, []byte(contents), 0600); err != nil {
|
||||
panic(fmt.Errorf("cannot create file %q with %d bytes contents: %w", path, len(contents), err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -343,8 +343,10 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
|
|||
graphiteMatchTemplate: graphiteMatchTemplate,
|
||||
graphiteLabelRules: graphiteLabelRules,
|
||||
|
||||
regexOriginal: regexOriginalCompiled,
|
||||
hasCaptureGroupInTargetLabel: strings.Contains(targetLabel, "$"),
|
||||
hasCaptureGroupInReplacement: strings.Contains(replacement, "$"),
|
||||
regexOriginal: regexOriginalCompiled,
|
||||
|
||||
hasCaptureGroupInTargetLabel: strings.Contains(targetLabel, "$"),
|
||||
hasCaptureGroupInReplacement: strings.Contains(replacement, "$"),
|
||||
hasLabelReferenceInReplacement: strings.Contains(replacement, "{{"),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
|
@ -188,6 +189,12 @@ func TestIfExpressionMismatch(t *testing.T) {
|
|||
}
|
||||
|
||||
func parseMetricWithLabels(metricWithLabels string) ([]prompbmarshal.Label, error) {
|
||||
stripDummyMetric := false
|
||||
if strings.HasPrefix(metricWithLabels, "{") {
|
||||
// Add a dummy metric name, since the parser needs it
|
||||
metricWithLabels = "dummy_metric" + metricWithLabels
|
||||
stripDummyMetric = true
|
||||
}
|
||||
// add a value to metricWithLabels, so it could be parsed by prometheus protocol parser.
|
||||
s := metricWithLabels + " 123"
|
||||
var rows prometheus.Rows
|
||||
|
@ -203,7 +210,7 @@ func parseMetricWithLabels(metricWithLabels string) ([]prompbmarshal.Label, erro
|
|||
}
|
||||
r := rows.Rows[0]
|
||||
var lfs []prompbmarshal.Label
|
||||
if r.Metric != "" {
|
||||
if !stripDummyMetric {
|
||||
lfs = append(lfs, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: r.Metric,
|
||||
|
|
|
@ -28,9 +28,11 @@ type parsedRelabelConfig struct {
|
|||
graphiteMatchTemplate *graphiteMatchTemplate
|
||||
graphiteLabelRules []graphiteLabelRule
|
||||
|
||||
regexOriginal *regexp.Regexp
|
||||
hasCaptureGroupInTargetLabel bool
|
||||
hasCaptureGroupInReplacement bool
|
||||
regexOriginal *regexp.Regexp
|
||||
|
||||
hasCaptureGroupInTargetLabel bool
|
||||
hasCaptureGroupInReplacement bool
|
||||
hasLabelReferenceInReplacement bool
|
||||
}
|
||||
|
||||
// String returns human-readable representation for prc.
|
||||
|
@ -172,10 +174,16 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
return labels
|
||||
case "replace":
|
||||
// Store `replacement` at `target_label` if the `regex` matches `source_labels` joined with `separator`
|
||||
replacement := prc.Replacement
|
||||
bb := relabelBufPool.Get()
|
||||
if prc.hasLabelReferenceInReplacement {
|
||||
// Fill {{labelName}} references in the replacement
|
||||
bb.B = fillLabelReferences(bb.B[:0], replacement, labels[labelsOffset:])
|
||||
replacement = string(bb.B)
|
||||
}
|
||||
bb.B = concatLabelValues(bb.B[:0], src, prc.SourceLabels, prc.Separator)
|
||||
if prc.Regex == defaultRegexForRelabelConfig && !prc.hasCaptureGroupInTargetLabel {
|
||||
if prc.Replacement == "$1" {
|
||||
if replacement == "$1" {
|
||||
// Fast path for the rule that copies source label values to destination:
|
||||
// - source_labels: [...]
|
||||
// target_label: foobar
|
||||
|
@ -188,7 +196,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
// - target_label: foobar
|
||||
// replacement: something-here
|
||||
relabelBufPool.Put(bb)
|
||||
labels = setLabelValue(labels, labelsOffset, prc.TargetLabel, prc.Replacement)
|
||||
labels = setLabelValue(labels, labelsOffset, prc.TargetLabel, replacement)
|
||||
return labels
|
||||
}
|
||||
}
|
||||
|
@ -203,7 +211,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
|
|||
if prc.hasCaptureGroupInTargetLabel {
|
||||
nameStr = prc.expandCaptureGroups(nameStr, sourceStr, match)
|
||||
}
|
||||
valueStr := prc.expandCaptureGroups(prc.Replacement, sourceStr, match)
|
||||
valueStr := prc.expandCaptureGroups(replacement, sourceStr, match)
|
||||
relabelBufPool.Put(bb)
|
||||
return setLabelValue(labels, labelsOffset, nameStr, valueStr)
|
||||
case "replace_all":
|
||||
|
@ -539,3 +547,25 @@ func labelsToString(labels []prompbmarshal.Label) string {
|
|||
b = append(b, '}')
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func fillLabelReferences(dst []byte, replacement string, labels []prompbmarshal.Label) []byte {
|
||||
s := replacement
|
||||
for len(s) > 0 {
|
||||
n := strings.Index(s, "{{")
|
||||
if n < 0 {
|
||||
return append(dst, s...)
|
||||
}
|
||||
dst = append(dst, s[:n]...)
|
||||
s = s[n+2:]
|
||||
n = strings.Index(s, "}}")
|
||||
if n < 0 {
|
||||
dst = append(dst, "{{"...)
|
||||
return append(dst, s...)
|
||||
}
|
||||
labelName := s[:n]
|
||||
s = s[n+2:]
|
||||
labelValue := GetLabelValueByName(labels, labelName)
|
||||
dst = append(dst, labelValue...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -6,7 +6,6 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -198,7 +197,7 @@ func (c *client) GetStreamReader() (*streamReader, error) {
|
|||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_total{status_code="%d"}`, resp.StatusCode)).Inc()
|
||||
respBody, _ := ioutil.ReadAll(resp.Body)
|
||||
respBody, _ := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
cancel()
|
||||
return nil, fmt.Errorf("unexpected status code returned when scraping %q: %d; expecting %d; response body: %q",
|
||||
|
|
|
@ -3,7 +3,6 @@ package azure
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
|
@ -145,7 +144,7 @@ func getCloudEnvByName(name string) (*cloudEnvironmentEndpoints, error) {
|
|||
}
|
||||
|
||||
func readCloudEndpointsFromFile(filePath string) (*cloudEnvironmentEndpoints, error) {
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot file %q: %w", filePath, err)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package consul
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -109,7 +108,7 @@ func getToken(token *promauth.Secret) (string, error) {
|
|||
return token.String(), nil
|
||||
}
|
||||
if tokenFile := os.Getenv("CONSUL_HTTP_TOKEN_FILE"); tokenFile != "" {
|
||||
data, err := ioutil.ReadFile(tokenFile)
|
||||
data, err := os.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %w", tokenFile, err)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package gce
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -94,7 +94,7 @@ func getAPIResponse(client *http.Client, apiURL, filter, pageToken string) ([]by
|
|||
}
|
||||
|
||||
func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
|
||||
|
|
|
@ -6,9 +6,9 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -66,7 +66,7 @@ func newAPIWatcher(apiServer string, ac *promauth.Config, sdc *SDConfig, swcFunc
|
|||
namespaces := sdc.Namespaces.Names
|
||||
if len(namespaces) == 0 {
|
||||
if sdc.Namespaces.OwnNamespace {
|
||||
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot determine namespace for the current pod according to `own_namespace: true` option in kubernetes_sd_config: %s", err)
|
||||
}
|
||||
|
@ -579,7 +579,7 @@ func (uw *urlWatcher) reloadObjects() string {
|
|||
return ""
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
logger.Errorf("unexpected status code for request to %q: %d; want %d; response: %q", requestURL, resp.StatusCode, http.StatusOK, body)
|
||||
return ""
|
||||
|
@ -675,7 +675,7 @@ func (uw *urlWatcher) watchForUpdates() {
|
|||
uw.staleResourceVersions.Inc()
|
||||
uw.resourceVersion = ""
|
||||
} else {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
logger.Errorf("unexpected status code for request to %q: %d; want %d; response: %q", requestURL, resp.StatusCode, http.StatusOK, body)
|
||||
backoffSleep()
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
@ -138,7 +138,7 @@ func getCreds(cfg *apiConfig) (*apiCredentials, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed query openstack identity api, url: %s, err: %w", apiURL.String(), err)
|
||||
}
|
||||
r, err := ioutil.ReadAll(resp.Body)
|
||||
r, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL.String(), err)
|
||||
|
@ -168,7 +168,7 @@ func getCreds(cfg *apiConfig) (*apiCredentials, error) {
|
|||
|
||||
// readResponseBody reads body from http.Response.
|
||||
func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
@ -235,7 +235,7 @@ func getAPIResponse(apiURL string, cfg *apiConfig) ([]byte, error) {
|
|||
|
||||
// readResponseBody reads body from http.Response.
|
||||
func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"math"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
|
@ -406,7 +406,7 @@ func (sw *scrapeWork) getTargetResponse() ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := ioutil.ReadAll(sr)
|
||||
data, err := io.ReadAll(sr)
|
||||
sr.MustClose()
|
||||
return data, err
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
@ -34,7 +34,7 @@ func Create(createSnapshotURL string) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func Delete(deleteSnapshotURL string, snapshotName string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package storage
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
|
@ -128,7 +127,7 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) {
|
|||
}
|
||||
|
||||
func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, error) {
|
||||
compressedData, err := ioutil.ReadAll(r)
|
||||
compressedData, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot read metaindex rows: %w", err)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package storage
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
@ -131,7 +130,7 @@ func (ph *partHeader) Reset() {
|
|||
|
||||
func (ph *partHeader) readMinDedupInterval(partPath string) error {
|
||||
filePath := partPath + "/min_dedup_interval"
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// The minimum dedup interval may not exist for old parts.
|
||||
|
|
|
@ -3,7 +3,6 @@ package storage
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -871,7 +870,7 @@ func hasActiveMerges(pws []*partWrapper) bool {
|
|||
|
||||
var (
|
||||
bigMergeWorkersCount = getDefaultMergeConcurrency(4)
|
||||
smallMergeWorkersCount = getDefaultMergeConcurrency(8)
|
||||
smallMergeWorkersCount = getDefaultMergeConcurrency(16)
|
||||
)
|
||||
|
||||
func getDefaultMergeConcurrency(max int) int {
|
||||
|
@ -1743,7 +1742,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, txnPath str
|
|||
txnLock.RLock()
|
||||
defer txnLock.RUnlock()
|
||||
|
||||
data, err := ioutil.ReadFile(txnPath)
|
||||
data, err := os.ReadFile(txnPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read transaction file: %w", err)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -845,7 +844,7 @@ func (s *Storage) mustLoadNextDayMetricIDs(date uint64) *byDateMetricIDEntry {
|
|||
logger.Infof("nothing to load from %q", path)
|
||||
return e
|
||||
}
|
||||
src, err := ioutil.ReadFile(path)
|
||||
src, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot read %s: %s", path, err)
|
||||
}
|
||||
|
@ -889,7 +888,7 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
|
|||
logger.Infof("nothing to load from %q", path)
|
||||
return hm
|
||||
}
|
||||
src, err := ioutil.ReadFile(path)
|
||||
src, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot read %s: %s", path, err)
|
||||
}
|
||||
|
@ -938,7 +937,7 @@ func (s *Storage) mustSaveNextDayMetricIDs(e *byDateMetricIDEntry) {
|
|||
// Marshal e.v
|
||||
dst = marshalUint64Set(dst, &e.v)
|
||||
|
||||
if err := ioutil.WriteFile(path, dst, 0644); err != nil {
|
||||
if err := os.WriteFile(path, dst, 0644); err != nil {
|
||||
logger.Panicf("FATAL: cannot write %d bytes to %q: %s", len(dst), path, err)
|
||||
}
|
||||
logger.Infof("saved %s to %q in %.3f seconds; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime).Seconds(), e.v.Len(), len(dst))
|
||||
|
@ -961,7 +960,7 @@ func (s *Storage) mustSaveHourMetricIDs(hm *hourMetricIDs, name string) {
|
|||
// Marshal hm.m
|
||||
dst = marshalUint64Set(dst, hm.m)
|
||||
|
||||
if err := ioutil.WriteFile(path, dst, 0644); err != nil {
|
||||
if err := os.WriteFile(path, dst, 0644); err != nil {
|
||||
logger.Panicf("FATAL: cannot write %d bytes to %q: %s", len(dst), path, err)
|
||||
}
|
||||
logger.Infof("saved %s to %q in %.3f seconds; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime).Seconds(), hm.m.Len(), len(dst))
|
||||
|
@ -1022,7 +1021,7 @@ func mustGetMinTimestampForCompositeIndex(metadataDir string, isEmptyDB bool) in
|
|||
}
|
||||
|
||||
func loadMinTimestampForCompositeIndex(path string) (int64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue