Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2022-11-29 21:43:25 -08:00
commit d92da32041
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
745 changed files with 210187 additions and 14922 deletions

View file

@ -317,7 +317,8 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
## Automatically generated metrics
`vmagent` automatically generates the following metrics per each scrape of every [Prometheus-compatible target](#how-to-collect-metrics-in-prometheus-format):
`vmagent` automatically generates the following metrics per each scrape of every [Prometheus-compatible target](#how-to-collect-metrics-in-prometheus-format)
and attaches target-specific `instance` and `job` labels to these metrics:
* `up` - this metric exposes `1` value on successful scrape and `0` value on unsuccessful scrape. This allows monitoring
failing scrapes with the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html):
@ -405,6 +406,9 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
sum_over_time(scrape_series_limit_samples_dropped[1h]) > 0
```
If the target exports metrics with names clashing with the automatically generated metric names, then `vmagent` automatically
adds `exported_` prefix to these metric names, so they don't clash with automatically generated metric names.
## Relabeling

View file

@ -835,7 +835,7 @@ The shortlist of configuration flags is the following:
-datasource.tlsServerName string
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
-datasource.url string
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend and -datasource.showURL
-defaultTenant.graphite string
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy .This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-defaultTenant.prometheus string

View file

@ -14,7 +14,7 @@ import (
var (
addr = flag.String("datasource.url", "", "Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. "+
"E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.")
"E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend and -datasource.showURL")
appendTypePrefix = flag.Bool("datasource.appendTypePrefix", false, "Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.")
showDatasourceURL = flag.Bool("datasource.showURL", false, "Whether to show -datasource.url in the exported metrics. "+
"It is hidden by default, since it can contain sensitive info such as auth key")

View file

@ -12,7 +12,6 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dns"
@ -130,24 +129,24 @@ func parseConfig(path string) (*Config, error) {
return cfg, nil
}
func parseLabels(target string, metaLabels map[string]string, cfg *Config) (string, []prompbmarshal.Label, error) {
func parseLabels(target string, metaLabels *promutils.Labels, cfg *Config) (string, *promutils.Labels, error) {
labels := mergeLabels(target, metaLabels, cfg)
labels = cfg.parsedRelabelConfigs.Apply(labels, 0)
labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
promrelabel.SortLabels(labels)
labels.Labels = cfg.parsedRelabelConfigs.Apply(labels.Labels, 0)
labels.RemoveMetaLabels()
labels.Sort()
// Remove references to already deleted labels, so GC could clean strings for label name and label value past len(labels).
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
labels = append([]prompbmarshal.Label{}, labels...)
labels = labels.Clone()
if len(labels) == 0 {
if labels.Len() == 0 {
return "", nil, nil
}
schemeRelabeled := promrelabel.GetLabelValueByName(labels, "__scheme__")
schemeRelabeled := labels.Get("__scheme__")
if len(schemeRelabeled) == 0 {
schemeRelabeled = "http"
}
addressRelabeled := promrelabel.GetLabelValueByName(labels, "__address__")
addressRelabeled := labels.Get("__address__")
if len(addressRelabeled) == 0 {
return "", nil, nil
}
@ -155,7 +154,7 @@ func parseLabels(target string, metaLabels map[string]string, cfg *Config) (stri
return "", nil, nil
}
addressRelabeled = addMissingPort(schemeRelabeled, addressRelabeled)
alertsPathRelabeled := promrelabel.GetLabelValueByName(labels, "__alerts_path__")
alertsPathRelabeled := labels.Get("__alerts_path__")
if !strings.HasPrefix(alertsPathRelabeled, "/") {
alertsPathRelabeled = "/" + alertsPathRelabeled
}
@ -179,21 +178,12 @@ func addMissingPort(scheme, target string) string {
return target
}
func mergeLabels(target string, metaLabels map[string]string, cfg *Config) []prompbmarshal.Label {
func mergeLabels(target string, metaLabels *promutils.Labels, cfg *Config) *promutils.Labels {
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
m := make(map[string]string)
m["__address__"] = target
m["__scheme__"] = cfg.Scheme
m["__alerts_path__"] = path.Join("/", cfg.PathPrefix, alertManagerPath)
for k, v := range metaLabels {
m[k] = v
}
result := make([]prompbmarshal.Label, 0, len(m))
for k, v := range m {
result = append(result, prompbmarshal.Label{
Name: k,
Value: v,
})
}
return result
m := promutils.NewLabels(3 + metaLabels.Len())
m.Add("__address__", target)
m.Add("__scheme__", cfg.Scheme)
m.Add("__alerts_path__", path.Join("/", cfg.PathPrefix, alertManagerPath))
m.AddFrom(metaLabels)
return m
}

View file

@ -9,6 +9,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dns"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// configWatcher supports dynamic reload of Notifier objects
@ -123,7 +124,7 @@ func targetsFromLabels(labelsFn getLabels, cfg *Config, genFn AlertURLGenerator)
var errors []error
duplicates := make(map[string]struct{})
for _, labels := range metaLabels {
target := labels["__address__"]
target := labels.Get("__address__")
u, processedLabels, err := parseLabels(target, labels, cfg)
if err != nil {
errors = append(errors, err)
@ -156,7 +157,7 @@ func targetsFromLabels(labelsFn getLabels, cfg *Config, genFn AlertURLGenerator)
return targets, errors
}
type getLabels func() ([]map[string]string, error)
type getLabels func() ([]*promutils.Labels, error)
func (cw *configWatcher) start() error {
if len(cw.cfg.StaticConfigs) > 0 {
@ -182,8 +183,8 @@ func (cw *configWatcher) start() error {
}
if len(cw.cfg.ConsulSDConfigs) > 0 {
err := cw.add(TargetConsul, *consul.SDCheckInterval, func() ([]map[string]string, error) {
var labels []map[string]string
err := cw.add(TargetConsul, *consul.SDCheckInterval, func() ([]*promutils.Labels, error) {
var labels []*promutils.Labels
for i := range cw.cfg.ConsulSDConfigs {
sdc := &cw.cfg.ConsulSDConfigs[i]
targetLabels, err := sdc.GetLabels(cw.cfg.baseDir)
@ -200,8 +201,8 @@ func (cw *configWatcher) start() error {
}
if len(cw.cfg.DNSSDConfigs) > 0 {
err := cw.add(TargetDNS, *dns.SDCheckInterval, func() ([]map[string]string, error) {
var labels []map[string]string
err := cw.add(TargetDNS, *dns.SDCheckInterval, func() ([]*promutils.Labels, error) {
var labels []*promutils.Labels
for i := range cw.cfg.DNSSDConfigs {
sdc := &cw.cfg.DNSSDConfigs[i]
targetLabels, err := sdc.GetLabels(cw.cfg.baseDir)

View file

@ -10,7 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
var (
@ -159,7 +159,7 @@ func notifiersFromFlags(gen AlertURLGenerator) ([]Notifier, error) {
// list of labels added during discovery.
type Target struct {
Notifier
Labels []prompbmarshal.Label
Labels *promutils.Labels
}
// TargetType defines how the Target was discovered

View file

@ -248,7 +248,7 @@
{% for _, n := range ns %}
<tr>
<td>
{% for _, l := range n.Labels %}
{% for _, l := range n.Labels.GetLabels() %}
<span class="ms-1 badge bg-primary">{%s l.Name %}={%s l.Value %}</span>
{% endfor %}
</td>

View file

@ -824,7 +824,7 @@ func StreamListTargets(qw422016 *qt422016.Writer, r *http.Request, targets map[n
<td>
`)
//line app/vmalert/web.qtpl:251
for _, l := range n.Labels {
for _, l := range n.Labels.GetLabels() {
//line app/vmalert/web.qtpl:251
qw422016.N().S(`
<span class="ms-1 badge bg-primary">`)

View file

@ -7,9 +7,12 @@ vmctl provides various useful actions with VictoriaMetrics components.
Features:
- migrate data from [Prometheus](#migrating-data-from-prometheus) to VictoriaMetrics using snapshot API
- migrate data from [Thanos](#migrating-data-from-thanos) to VictoriaMetrics
- migrate data from [Cortex](#migrating-data-from-cortex) to VictoriaMetrics
- migrate data from [Mimir](#migrating-data-from-mimir) to VictoriaMetrics
- migrate data from [InfluxDB](#migrating-data-from-influxdb-1x) to VictoriaMetrics
- migrate data from [OpenTSDB](#migrating-data-from-opentsdb) to VictoriaMetrics
- migrate data between [VictoriaMetrics](#migrating-data-from-victoriametrics) single or cluster version.
- migrate data by [Prometheus remote read protocol](#migrating-data-by-remote-read-protocol) to VictoriaMetrics
- [verify](#verifying-exported-blocks-from-victoriametrics) exported blocks from VictoriaMetrics single or cluster version.
To see the full list of supported modes
@ -28,6 +31,7 @@ COMMANDS:
influx Migrate timeseries from InfluxDB
prometheus Migrate timeseries from Prometheus
vm-native Migrate time series between VictoriaMetrics installations via native binary format
remote-read Migrate timeseries by Prometheus remote read protocol
verify-block Verifies correctness of data blocks exported via VictoriaMetrics Native format. See https://docs.victoriametrics.com/#how-to-export-data-in-native-format
```
@ -432,6 +436,64 @@ Found 2 blocks to import. Continue? [Y/n] y
2020/02/23 15:51:07 Total time: 7.153158218s
```
## Migrating data by remote read protocol
`vmctl` supports the `remote-read` mode for migrating data from databases which support
[Prometheus remote read API](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/)
See `./vmctl remote-read --help` for details and full list of flags.
To start the migration process configure the following flags:
1. `--remote-read-src-addr` - data source address to read from;
2. `--vm-addr` - VictoriaMetrics address to write to. For single-node VM is usually equal to `--httpListenAddr`,
and for cluster version is equal to `--httpListenAddr` flag of vminsert component (for example `http://<vminsert>:8480/insert/<accountID>/prometheus`);
3. `--remote-read-filter-time-start` - the time filter in RFC3339 format to select time series with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z';
4. `--remote-read-filter-time-end` - the time filter in RFC3339 format to select time series with timestamp equal or smaller than provided value. E.g. '2020-01-01T20:07:00Z'. Current time is used when omitted.;
5. `--remote-read-step-interval` - split export data into chunks. Valid values are `month, day, hour, minute`;
The importing process example for local installation of Prometheus
and single-node VictoriaMetrics(`http://localhost:8428`):
```
./vmctl remote-read \
--remote-read-src-addr=http://127.0.0.1:9091 \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \
--vm-addr=http://127.0.0.1:8428 \
--vm-concurrency=6
Split defined times into 8798 ranges to import. Continue? [Y/n]
VM worker 0:↘ 127177 samples/s
VM worker 1:↘ 140137 samples/s
VM worker 2:↘ 151606 samples/s
VM worker 3:↘ 130765 samples/s
VM worker 4:↘ 131904 samples/s
VM worker 5:↘ 132693 samples/s
Processing ranges: 8798 / 8798 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2022/10/19 16:45:37 Import finished!
2022/10/19 16:45:37 VictoriaMetrics importer stats:
idle duration: 6m57.793987511s;
time spent while importing: 1m18.463744801s;
total samples: 25348208;
samples/s: 323056.31;
total bytes: 669.7 MB;
bytes/s: 8.5 MB;
import requests: 127;
import requests retries: 0;
2022/10/19 16:45:37 Total time: 1m19.406283424s
```
### Filtering
The filtering consists of two parts: by labels and time.
Filtering by time can be configured via flags `--remote-read-filter-time-start` and `--remote-read-filter-time-end`
in RFC3339 format.
Filtering by labels can be configured via flags `--remote-read-filter-label` and `--remote-read-filter-label-value`.
For example, `--remote-read-filter-label=tenant` and `--remote-read-filter-label-value="team-eu"` will select only series
with `tenant="team-eu"` label-value pair.
## Migrating data from Thanos
Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means
@ -478,6 +540,187 @@ then import it into VM using `vmctl` in `prometheus` mode.
vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428
```
### Remote read protocol
Currently, Thanos doesn't support streaming remote read protocol. It is [recommended](https://thanos.io/tip/thanos/integrations.md/#storeapi-as-prometheus-remote-read)
to use [thanos-remote-read](https://github.com/G-Research/thanos-remote-read) a proxy, that allows exposing any Thanos
service (or anything that exposes gRPC StoreAPI e.g. Querier) via Prometheus remote read protocol.
If you want to migrate data, you should run [thanos-remote-read](https://github.com/G-Research/thanos-remote-read) proxy
and define the Thanos store address `./thanos-remote-read -store 127.0.0.1:19194`.
It is important to know that `store` flag is Thanos Store API gRPC endpoint.
Also, it is important to know that thanos-remote-read proxy doesn't support `STREAMED_XOR_CHUNKS` mode.
When you run thanos-remote-read proxy, it exposes port to serve HTTP on `10080 by default`.
The importing process example for local installation of Thanos
and single-node VictoriaMetrics(`http://localhost:8428`):
```
./vmctl remote-read \
--remote-read-src-addr=http://127.0.0.1:10080 \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \
--vm-addr=http://127.0.0.1:8428 \
--vm-concurrency=6
```
On the [thanos-remote-read](https://github.com/G-Research/thanos-remote-read) proxy side you will see logs like:
```
ts=2022-10-19T15:05:04.193916Z caller=main.go:278 level=info traceID=00000000000000000000000000000000 msg="thanos request" request="min_time:1666180800000 max_time:1666184399999 matchers:<type:RE value:\".*\" > aggregates:RAW "
ts=2022-10-19T15:05:04.468852Z caller=main.go:278 level=info traceID=00000000000000000000000000000000 msg="thanos request" request="min_time:1666184400000 max_time:1666187999999 matchers:<type:RE value:\".*\" > aggregates:RAW "
ts=2022-10-19T15:05:04.553914Z caller=main.go:278 level=info traceID=00000000000000000000000000000000 msg="thanos request" request="min_time:1666188000000 max_time:1666191364863 matchers:<type:RE value:\".*\" > aggregates:RAW "
```
And when process will finish you will see:
```
Split defined times into 8799 ranges to import. Continue? [Y/n]
VM worker 0:↓ 98183 samples/s
VM worker 1:↓ 114640 samples/s
VM worker 2:↓ 131710 samples/s
VM worker 3:↓ 114256 samples/s
VM worker 4:↓ 105671 samples/s
VM worker 5:↓ 124000 samples/s
Processing ranges: 8799 / 8799 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2022/10/19 18:05:07 Import finished!
2022/10/19 18:05:07 VictoriaMetrics importer stats:
idle duration: 52m13.987637229s;
time spent while importing: 9m1.728983776s;
total samples: 70836111;
samples/s: 130759.32;
total bytes: 2.2 GB;
bytes/s: 4.0 MB;
import requests: 356;
import requests retries: 0;
2022/10/19 18:05:07 Total time: 9m2.607521618s
```
## Migrating data from Cortex
Cortex has an implementation of the Prometheus remote read protocol. That means
`vmctl` in mode `remote-read` may also be used for Cortex historical data migration.
These instructions may vary based on the details of your Cortex configuration.
Please read carefully and verify as you go.
### Remote read protocol
If you want to migrate data, you should check your cortex configuration in the section
```yaml
api:
prometheus_http_prefix:
```
If you defined some prometheus prefix, you should use it when you define flag `--remote-read-src-addr=http://127.0.0.1:9009/{prometheus_http_prefix}`.
By default, Cortex uses the `prometheus` path prefix, so you should define the flag `--remote-read-src-addr=http://127.0.0.1:9009/prometheus`.
It is important to know that Cortex doesn't support the `STREAMED_XOR_CHUNKS` mode.
When you run Cortex, it exposes a port to serve HTTP on `9009 by default`.
The importing process example for the local installation of Cortex
and single-node VictoriaMetrics(`http://localhost:8428`):
```
./vmctl remote-read \
--remote-read-src-addr=http://127.0.0.1:9009/prometheus \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \
--remote-read-src-check-alive=false \
--vm-addr=http://127.0.0.1:8428 \
--vm-concurrency=6
```
And when the process finishes, you will see the following:
```
Split defined times into 8842 ranges to import. Continue? [Y/n]
VM worker 0:↗ 3863 samples/s
VM worker 1:↗ 2686 samples/s
VM worker 2:↗ 2620 samples/s
VM worker 3:↗ 2705 samples/s
VM worker 4:↗ 2643 samples/s
VM worker 5:↗ 2593 samples/s
Processing ranges: 8842 / 8842 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2022/10/21 12:09:49 Import finished!
2022/10/21 12:09:49 VictoriaMetrics importer stats:
idle duration: 0s;
time spent while importing: 3.82640757s;
total samples: 160232;
samples/s: 41875.31;
total bytes: 11.3 MB;
bytes/s: 3.0 MB;
import requests: 6;
import requests retries: 0;
2022/10/21 12:09:49 Total time: 4.71824253s
```
It is important to know that if you run your Cortex installation in multi-tenant mode, remote read protocol
requires an Authentication header like `X-Scope-OrgID`. You can define it via the flag `--remote-read-headers=X-Scope-OrgID:demo`
## Migrating data from Mimir
Mimir has similar implemintation as Cortex and also support of the Prometheus remote read protocol. That means
`vmctl` in mode `remote-read` may also be used for Mimir historical data migration.
These instructions may vary based on the details of your Mimir configuration.
Please read carefully and verify as you go.
### Remote read protocol
If you want to migrate data, you should check your Mimir configuration in the section
```yaml
api:
prometheus_http_prefix:
```
If you defined some prometheus prefix, you should use it when you define flag `--remote-read-src-addr=http://127.0.0.1:9009/{prometheus_http_prefix}`.
By default, Mimir uses the `prometheus` path prefix, so you should define the flag `--remote-read-src-addr=http://127.0.0.1:9009/prometheus`.
Mimir supports both remote read mode, so you can use `STREAMED_XOR_CHUNKS` mode and `SAMPLES` mode.
When you run Mimir, it exposes a port to serve HTTP on `8080 by default`.
Next example of the local installation was in multi-tenant mode (3 instances of mimir) with nginx as load balancer.
Load balancer expose single port `:9090`.
As you can see in the example we call `:9009` instead of `:8080` because of proxy.
The importing process example for the local installation of Mimir
and single-node VictoriaMetrics(`http://localhost:8428`):
```
./vmctl remote-read
--remote-read-src-addr=http://127.0.0.1:9009/prometheus \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \
--remote-read-src-check-alive=false \
--remote-read-headers=X-Scope-OrgID:demo \
--remote-read-use-stream=true \
--vm-addr=http://127.0.0.1:8428 \
--vm-concurrency=6
```
And when the process finishes, you will see the following:
```
Split defined times into 8847 ranges to import. Continue? [Y/n]
VM worker 0:→ 12176 samples/s
VM worker 1:→ 11918 samples/s
VM worker 2:→ 11261 samples/s
VM worker 3:→ 12861 samples/s
VM worker 4:→ 11096 samples/s
VM worker 5:→ 11575 samples/s
Processing ranges: 8847 / 8847 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2022/10/21 17:22:23 Import finished!
2022/10/21 17:22:23 VictoriaMetrics importer stats:
idle duration: 0s;
time spent while importing: 15.379614356s;
total samples: 81243;
samples/s: 5282.51;
total bytes: 6.1 MB;
bytes/s: 397.8 kB;
import requests: 6;
import requests retries: 0;
2022/10/21 17:22:23 Total time: 16.287405248s
```
It is important to know that if you run your Mimir installation in multi-tenant mode, remote read protocol
requires an Authentication header like `X-Scope-OrgID`. You can define it via the flag `--remote-read-headers=X-Scope-OrgID:demo`
## Migrating data from VictoriaMetrics
### Native protocol

View file

@ -2,6 +2,7 @@ package main
import (
"fmt"
"time"
"github.com/urfave/cli/v2"
@ -400,6 +401,93 @@ var (
}
)
const (
remoteRead = "remote-read"
remoteReadUseStream = "remote-read-use-stream"
remoteReadConcurrency = "remote-read-concurrency"
remoteReadFilterTimeStart = "remote-read-filter-time-start"
remoteReadFilterTimeEnd = "remote-read-filter-time-end"
remoteReadFilterLabel = "remote-read-filter-label"
remoteReadFilterLabelValue = "remote-read-filter-label-value"
remoteReadStepInterval = "remote-read-step-interval"
remoteReadSrcAddr = "remote-read-src-addr"
remoteReadUser = "remote-read-user"
remoteReadPassword = "remote-read-password"
remoteReadHTTPTimeout = "remote-read-http-timeout"
remoteReadHeaders = "remote-read-headers"
)
var (
remoteReadFlags = []cli.Flag{
&cli.IntFlag{
Name: remoteReadConcurrency,
Usage: "Number of concurrently running remote read readers",
Value: 1,
},
&cli.TimestampFlag{
Name: remoteReadFilterTimeStart,
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'",
Layout: time.RFC3339,
},
&cli.TimestampFlag{
Name: remoteReadFilterTimeEnd,
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'",
Layout: time.RFC3339,
},
&cli.StringFlag{
Name: remoteReadFilterLabel,
Usage: "Prometheus label name to filter timeseries by. E.g. '__name__' will filter timeseries by name.",
Value: "__name__",
},
&cli.StringFlag{
Name: remoteReadFilterLabelValue,
Usage: fmt.Sprintf("Prometheus regular expression to filter label from %q flag.", remoteReadFilterLabelValue),
Value: ".*",
},
&cli.BoolFlag{
Name: remoteRead,
Usage: "Use Prometheus remote read protocol",
Value: false,
},
&cli.BoolFlag{
Name: remoteReadUseStream,
Usage: "Defines whether to use SAMPLES or STREAMED_XOR_CHUNKS mode. By default is uses SAMPLES mode. See https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/#streamed-chunks",
Value: false,
},
&cli.StringFlag{
Name: remoteReadStepInterval,
Usage: fmt.Sprintf("Split export data into chunks. Requires setting --%s. Valid values are %q,%q,%q,%q.", remoteReadFilterTimeStart, stepper.StepMonth, stepper.StepDay, stepper.StepHour, stepper.StepMinute),
Required: true,
},
&cli.StringFlag{
Name: remoteReadSrcAddr,
Usage: "Remote read address to perform read from.",
Required: true,
},
&cli.StringFlag{
Name: remoteReadUser,
Usage: "Remote read username for basic auth",
EnvVars: []string{"REMOTE_READ_USERNAME"},
},
&cli.StringFlag{
Name: remoteReadPassword,
Usage: "Remote read password for basic auth",
EnvVars: []string{"REMOTE_READ_PASSWORD"},
},
&cli.DurationFlag{
Name: remoteReadHTTPTimeout,
Usage: "Timeout defines timeout for HTTP write request to remote storage",
},
&cli.StringFlag{
Name: remoteReadHeaders,
Value: "",
Usage: "Optional HTTP headers to send with each request to the corresponding remote source storage \n" +
"For example, --remote-read-headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding remote source storage. \n" +
"Multiple headers must be delimited by '^^': --remote-read-headers='header1:value1^^header2:value2'",
},
}
)
func mergeFlags(flags ...[]cli.Flag) []cli.Flag {
var result []cli.Flag
for _, f := range flags {

View file

@ -11,6 +11,7 @@ import (
"syscall"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
"github.com/urfave/cli/v2"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
@ -40,7 +41,7 @@ func main() {
Commands: []*cli.Command{
{
Name: "opentsdb",
Usage: "Migrate timeseries from OpenTSDB",
Usage: "Migrate time series from OpenTSDB",
Flags: mergeFlags(globalFlags, otsdbFlags, vmFlags),
Action: func(c *cli.Context) error {
fmt.Println("OpenTSDB import mode")
@ -75,7 +76,7 @@ func main() {
},
{
Name: "influx",
Usage: "Migrate timeseries from InfluxDB",
Usage: "Migrate time series from InfluxDB",
Flags: mergeFlags(globalFlags, influxFlags, vmFlags),
Action: func(c *cli.Context) error {
fmt.Println("InfluxDB import mode")
@ -114,9 +115,48 @@ func main() {
return processor.run(c.Bool(globalSilent), c.Bool(globalVerbose))
},
},
{
Name: "remote-read",
Usage: "Migrate time series via Prometheus remote-read protocol",
Flags: mergeFlags(globalFlags, remoteReadFlags, vmFlags),
Action: func(c *cli.Context) error {
rr, err := remoteread.NewClient(remoteread.Config{
Addr: c.String(remoteReadSrcAddr),
Username: c.String(remoteReadUser),
Password: c.String(remoteReadPassword),
Timeout: c.Duration(remoteReadHTTPTimeout),
UseStream: c.Bool(remoteReadUseStream),
Headers: c.String(remoteReadHeaders),
LabelName: c.String(remoteReadFilterLabel),
LabelValue: c.String(remoteReadFilterLabelValue),
})
if err != nil {
return fmt.Errorf("error create remote read client: %s", err)
}
vmCfg := initConfigVM(c)
importer, err := vm.NewImporter(vmCfg)
if err != nil {
return fmt.Errorf("failed to create VM importer: %s", err)
}
rmp := remoteReadProcessor{
src: rr,
dst: importer,
filter: remoteReadFilter{
timeStart: c.Timestamp(remoteReadFilterTimeStart),
timeEnd: c.Timestamp(remoteReadFilterTimeEnd),
chunk: c.String(remoteReadStepInterval),
},
cc: c.Int(remoteReadConcurrency),
}
return rmp.run(ctx, c.Bool(globalSilent), c.Bool(globalVerbose))
},
},
{
Name: "prometheus",
Usage: "Migrate timeseries from Prometheus",
Usage: "Migrate time series from Prometheus",
Flags: mergeFlags(globalFlags, promFlags, vmFlags),
Action: func(c *cli.Context) error {
fmt.Println("Prometheus import mode")

View file

@ -9,6 +9,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
type prometheusProcessor struct {
@ -123,7 +124,15 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
var timestamps []int64
var values []float64
it := series.Iterator()
for it.Next() {
for {
typ := it.Next()
if typ == chunkenc.ValNone {
break
}
if typ != chunkenc.ValFloat {
// Skip unsupported values
continue
}
t, v := it.At()
timestamps = append(timestamps, t)
values = append(values, v)

View file

@ -4,7 +4,7 @@ import (
"fmt"
"time"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
)

127
app/vmctl/remoteread.go Normal file
View file

@ -0,0 +1,127 @@
package main
import (
"context"
"fmt"
"log"
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/cheggaaa/pb/v3"
)
type remoteReadProcessor struct {
filter remoteReadFilter
dst *vm.Importer
src *remoteread.Client
cc int
}
type remoteReadFilter struct {
timeStart *time.Time
timeEnd *time.Time
chunk string
}
func (rrp *remoteReadProcessor) run(ctx context.Context, silent, verbose bool) error {
rrp.dst.ResetStats()
if rrp.filter.timeEnd == nil {
t := time.Now().In(rrp.filter.timeStart.Location())
rrp.filter.timeEnd = &t
}
if rrp.cc < 1 {
rrp.cc = 1
}
ranges, err := stepper.SplitDateRange(*rrp.filter.timeStart, *rrp.filter.timeEnd, rrp.filter.chunk)
if err != nil {
return fmt.Errorf("failed to create date ranges for the given time filters: %v", err)
}
question := fmt.Sprintf("Selected time range %q - %q will be split into %d ranges according to %q step. Continue?",
rrp.filter.timeStart.String(), rrp.filter.timeEnd.String(), len(ranges), rrp.filter.chunk)
if !silent && !prompt(question) {
return nil
}
var bar *pb.ProgressBar
if !silent {
bar = barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing ranges"), len(ranges))
if err := barpool.Start(); err != nil {
return err
}
}
defer func() {
if !silent {
barpool.Stop()
}
log.Println("Import finished!")
log.Print(rrp.dst.Stats())
}()
rangeC := make(chan *remoteread.Filter)
errCh := make(chan error)
var wg sync.WaitGroup
wg.Add(rrp.cc)
for i := 0; i < rrp.cc; i++ {
go func() {
defer wg.Done()
for r := range rangeC {
if err := rrp.do(ctx, r); err != nil {
errCh <- fmt.Errorf("request failed for: %s", err)
return
}
if bar != nil {
bar.Increment()
}
}
}()
}
for _, r := range ranges {
select {
case infErr := <-errCh:
return fmt.Errorf("remote read error: %s", infErr)
case vmErr := <-rrp.dst.Errors():
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
case rangeC <- &remoteread.Filter{
StartTimestampMs: r[0].UnixMilli(),
EndTimestampMs: r[1].UnixMilli(),
}:
}
}
close(rangeC)
wg.Wait()
rrp.dst.Close()
close(errCh)
// drain import errors channel
for vmErr := range rrp.dst.Errors() {
if vmErr.Err != nil {
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
}
}
for err := range errCh {
return fmt.Errorf("import process failed: %s", err)
}
return nil
}
func (rrp *remoteReadProcessor) do(ctx context.Context, filter *remoteread.Filter) error {
return rrp.src.Read(ctx, filter, func(series *vm.TimeSeries) error {
if err := rrp.dst.Input(series); err != nil {
return fmt.Errorf(
"failed to read data for time range start: %d, end: %d, %s",
filter.StartTimestampMs, filter.EndTimestampMs, err)
}
return nil
})
}

View file

@ -0,0 +1,349 @@
package remoteread
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
const (
defaultReadTimeout = 30 * time.Second
remoteReadPath = "/api/v1/read"
healthPath = "/-/healthy"
)
// StreamCallback is a callback function for processing time series
type StreamCallback func(series *vm.TimeSeries) error
// Client is an HTTP client for reading
// time series via remote read protocol.
type Client struct {
addr string
c *http.Client
user string
password string
useStream bool
headers []keyValue
matchers []*prompb.LabelMatcher
}
// Config is config for remote read.
type Config struct {
// Addr of remote storage
Addr string
// Timeout defines timeout for HTTP requests
// made by remote read client
Timeout time.Duration
// Username is the remote read username, optional.
Username string
// Password is the remote read password, optional.
Password string
// UseStream defines whether to use SAMPLES or STREAMED_XOR_CHUNKS mode
// see https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/#samples
// https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/#streamed-chunks
UseStream bool
// Headers optional HTTP headers to send with each request to the corresponding remote storage
Headers string
// LabelName, LabelValue stands for label=~value pair used for read requests.
// Is optional.
LabelName, LabelValue string
}
// Filter defines a list of filters applied to requested data
type Filter struct {
StartTimestampMs int64
EndTimestampMs int64
}
// NewClient returns client for
// reading time series via remote read protocol.
func NewClient(cfg Config) (*Client, error) {
if cfg.Addr == "" {
return nil, fmt.Errorf("config.Addr can't be empty")
}
if cfg.Timeout == 0 {
cfg.Timeout = defaultReadTimeout
}
var hdrs []string
if cfg.Headers != "" {
hdrs = strings.Split(cfg.Headers, "^^")
}
headers, err := parseHeaders(hdrs)
if err != nil {
return nil, err
}
var m *prompb.LabelMatcher
if cfg.LabelName != "" && cfg.LabelValue != "" {
m = &prompb.LabelMatcher{
Type: prompb.LabelMatcher_RE,
Name: cfg.LabelName,
Value: cfg.LabelValue,
}
}
c := &Client{
c: &http.Client{
Timeout: cfg.Timeout,
Transport: http.DefaultTransport.(*http.Transport).Clone(),
},
addr: strings.TrimSuffix(cfg.Addr, "/"),
user: cfg.Username,
password: cfg.Password,
useStream: cfg.UseStream,
headers: headers,
matchers: []*prompb.LabelMatcher{m},
}
return c, nil
}
// Read fetch data from remote read source
func (c *Client) Read(ctx context.Context, filter *Filter, streamCb StreamCallback) error {
req := &prompb.ReadRequest{
Queries: []*prompb.Query{
{
StartTimestampMs: filter.StartTimestampMs,
EndTimestampMs: filter.EndTimestampMs - 1,
Matchers: c.matchers,
},
},
}
if c.useStream {
req.AcceptedResponseTypes = []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS}
}
data, err := proto.Marshal(req)
if err != nil {
return fmt.Errorf("unable to marshal read request: %w", err)
}
b := snappy.Encode(nil, data)
if err := c.fetch(ctx, b, streamCb); err != nil {
if errors.Is(err, context.Canceled) {
return fmt.Errorf("fetch request has ben cancelled")
}
return fmt.Errorf("error while fetching data from remote storage: %s", err)
}
return nil
}
func (c *Client) do(req *http.Request) (*http.Response, error) {
if c.user != "" {
req.SetBasicAuth(c.user, c.password)
}
for _, h := range c.headers {
req.Header.Add(h.key, h.value)
}
return c.c.Do(req)
}
// Ping checks the health of the read source
func (c *Client) Ping() error {
url := c.addr + healthPath
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return fmt.Errorf("cannot create request to %q: %s", url, err)
}
resp, err := c.do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("bad status code: %d", resp.StatusCode)
}
return nil
}
func (c *Client) fetch(ctx context.Context, data []byte, streamCb StreamCallback) error {
r := bytes.NewReader(data)
url := c.addr + remoteReadPath
req, err := http.NewRequest("POST", url, r)
if err != nil {
return fmt.Errorf("failed to create new HTTP request: %w", err)
}
req.Header.Add("Content-Encoding", "snappy")
req.Header.Add("Accept-Encoding", "snappy")
req.Header.Set("Content-Type", "application/x-protobuf")
if c.useStream {
req.Header.Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
}
req.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0")
resp, err := c.do(req.WithContext(ctx))
if err != nil {
return fmt.Errorf("error while sending request to %s: %w; Data len %d(%d)",
req.URL.Redacted(), err, len(data), r.Size())
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("unexpected response code %d for %s. Response body %q",
resp.StatusCode, req.URL.Redacted(), body)
}
if c.useStream {
return processStreamResponse(resp.Body, streamCb)
}
return processResponse(resp.Body, streamCb)
}
func processResponse(body io.ReadCloser, callback StreamCallback) error {
d, err := io.ReadAll(body)
if err != nil {
return fmt.Errorf("error reading response: %w", err)
}
uncompressed, err := snappy.Decode(nil, d)
if err != nil {
return fmt.Errorf("error decoding response: %w", err)
}
var readResp prompb.ReadResponse
err = proto.Unmarshal(uncompressed, &readResp)
if err != nil {
return fmt.Errorf("unable to unmarshal response body: %w", err)
}
// response could have no results for the given filter, but that
// shouldn't be accounted as an error.
for _, res := range readResp.Results {
for _, ts := range res.Timeseries {
vmTs := convertSamples(ts.Samples, ts.Labels)
if err := callback(vmTs); err != nil {
return err
}
}
}
return nil
}
var bbPool bytesutil.ByteBufferPool
func processStreamResponse(body io.ReadCloser, callback StreamCallback) error {
bb := bbPool.Get()
defer func() { bbPool.Put(bb) }()
stream := remote.NewChunkedReader(body, remote.DefaultChunkedReadLimit, bb.B)
for {
res := &prompb.ChunkedReadResponse{}
err := stream.NextProto(res)
if err == io.EOF {
break
}
if err != nil {
return err
}
for _, series := range res.ChunkedSeries {
samples := make([]prompb.Sample, 0)
for _, chunk := range series.Chunks {
s, err := parseSamples(chunk.Data)
if err != nil {
return err
}
samples = append(samples, s...)
}
ts := convertSamples(samples, series.Labels)
if err := callback(ts); err != nil {
return err
}
}
}
return nil
}
func parseSamples(chunk []byte) ([]prompb.Sample, error) {
c, err := chunkenc.FromData(chunkenc.EncXOR, chunk)
if err != nil {
return nil, fmt.Errorf("error read chunk: %w", err)
}
var samples []prompb.Sample
it := c.Iterator(nil)
for {
typ := it.Next()
if typ == chunkenc.ValNone {
break
}
if typ != chunkenc.ValFloat {
// Skip unsupported values
continue
}
if it.Err() != nil {
return nil, fmt.Errorf("error iterate over chunks: %w", it.Err())
}
ts, v := it.At()
s := prompb.Sample{
Timestamp: ts,
Value: v,
}
samples = append(samples, s)
}
return samples, it.Err()
}
type keyValue struct {
key string
value string
}
func parseHeaders(headers []string) ([]keyValue, error) {
if len(headers) == 0 {
return nil, nil
}
kvs := make([]keyValue, len(headers))
for i, h := range headers {
n := strings.IndexByte(h, ':')
if n < 0 {
return nil, fmt.Errorf(`missing ':' in header %q; expecting "key: value" format`, h)
}
kv := &kvs[i]
kv.key = strings.TrimSpace(h[:n])
kv.value = strings.TrimSpace(h[n+1:])
}
return kvs, nil
}
func convertSamples(samples []prompb.Sample, labels []prompb.Label) *vm.TimeSeries {
labelPairs := make([]vm.LabelPair, 0, len(labels))
nameValue := ""
for _, label := range labels {
if label.Name == "__name__" {
nameValue = label.Value
continue
}
labelPairs = append(labelPairs, vm.LabelPair{Name: label.Name, Value: label.Value})
}
n := len(samples)
values := make([]float64, 0, n)
timestamps := make([]int64, 0, n)
for _, sample := range samples {
values = append(values, sample.Value)
timestamps = append(timestamps, sample.Timestamp)
}
return &vm.TimeSeries{
Name: nameValue,
LabelPairs: labelPairs,
Timestamps: timestamps,
Values: values,
}
}

View file

@ -12,6 +12,8 @@ const (
StepDay string = "day"
// StepHour represents a one hour interval
StepHour string = "hour"
// StepMinute represents a one minute interval
StepMinute string = "minute"
)
// SplitDateRange splits start-end range in a subset of ranges respecting the given step
@ -42,8 +44,13 @@ func SplitDateRange(start, end time.Time, step string) ([][]time.Time, error) {
nextStep = func(t time.Time) (time.Time, time.Time) {
return t, t.Add(time.Hour * 1)
}
case StepMinute:
nextStep = func(t time.Time) (time.Time, time.Time) {
return t, t.Add(time.Minute * 1)
}
default:
return nil, fmt.Errorf("failed to parse step value, valid values are: '%s', '%s', '%s'. provided: '%s'", StepMonth, StepDay, StepHour, step)
return nil, fmt.Errorf("failed to parse step value, valid values are: '%s', '%s', '%s', '%s'. provided: '%s'",
StepMonth, StepDay, StepHour, StepMinute, step)
}
currentStep := start

View file

@ -182,6 +182,8 @@ func (im *Importer) Errors() chan *ImportError { return im.errors }
// that need to be imported
func (im *Importer) Input(ts *TimeSeries) error {
select {
case <-im.close:
return fmt.Errorf("importer is closed")
case im.input <- ts:
return nil
case err := <-im.errors:
@ -197,6 +199,7 @@ func (im *Importer) Input(ts *TimeSeries) error {
func (im *Importer) Close() {
im.once.Do(func() {
close(im.close)
close(im.input)
im.wg.Wait()
close(im.errors)
})
@ -209,6 +212,10 @@ func (im *Importer) startWorker(bar *pb.ProgressBar, batchSize, significantFigur
for {
select {
case <-im.close:
for ts := range im.input {
ts = roundTimeseriesValue(ts, significantFigures, roundDigits)
batch = append(batch, ts)
}
exitErr := &ImportError{
Batch: batch,
}
@ -217,24 +224,17 @@ func (im *Importer) startWorker(bar *pb.ProgressBar, batchSize, significantFigur
}
im.errors <- exitErr
return
case ts := <-im.input:
case ts, ok := <-im.input:
if !ok {
continue
}
// init waitForBatch when first
// value was received
if waitForBatch.IsZero() {
waitForBatch = time.Now()
}
if significantFigures > 0 {
for i, v := range ts.Values {
ts.Values[i] = decimal.RoundToSignificantFigures(v, significantFigures)
}
}
if roundDigits < 100 {
for i, v := range ts.Values {
ts.Values[i] = decimal.RoundToDecimalDigits(v, roundDigits)
}
}
ts = roundTimeseriesValue(ts, significantFigures, roundDigits)
batch = append(batch, ts)
dataPoints += len(ts.Values)
@ -418,3 +418,18 @@ func byteCountSI(b int64) string {
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
}
func roundTimeseriesValue(ts *TimeSeries, significantFigures, roundDigits int) *TimeSeries {
if significantFigures > 0 {
for i, v := range ts.Values {
ts.Values[i] = decimal.RoundToSignificantFigures(v, significantFigures)
}
}
if roundDigits < 100 {
for i, v := range ts.Values {
ts.Values[i] = decimal.RoundToDecimalDigits(v, roundDigits)
}
}
return ts
}

View file

@ -229,7 +229,7 @@ The shortlist of configuration flags include the following:
-datasource.tlsServerName string
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
-datasource.url string
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend and -datasource.showURL
-enable.auth
enables auth with jwt token
-enable.rateLimit

View file

@ -11,9 +11,10 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/go-kit/kit/log"
"github.com/oklog/ulid"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/model/labels"
promstorage "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
var prometheusDataPath = flag.String("prometheusDataPath", "", "Optional path to readonly historical Prometheus data")
@ -68,7 +69,7 @@ func Init(retentionMsecs int64) {
}
return m
}
pdb, err := tsdb.Open(*prometheusDataPath, l, nil, opts)
pdb, err := tsdb.Open(*prometheusDataPath, l, nil, opts, nil)
if err != nil {
logger.Panicf("FATAL: cannot open Prometheus data at -prometheusDataPath=%q: %s", *prometheusDataPath, err)
}
@ -179,7 +180,15 @@ func VisitSeries(sq *storage.SearchQuery, deadline searchutils.Deadline, f Serie
values = values[:0]
timestamps = timestamps[:0]
it := s.Iterator()
for it.Next() {
for {
typ := it.Next()
if typ == chunkenc.ValNone {
break
}
if typ != chunkenc.ValFloat {
// Skip unsupported values
continue
}
ts, v := it.At()
values = append(values, v)
timestamps = append(timestamps, ts)

View file

@ -107,7 +107,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"expr": "sum(increase(vm_tenant_inserted_rows_total{job=~\"$job\", instance=~\"$instance\",accountID=~\"$account\", projectID=~\"$project\"}[1m])/60) by (accountID,projectID) ",
"interval": "",
@ -205,7 +205,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tenant_select_requests_total{job=~\"$job\", instance=~\"$instance.*\",accountID=~\"$account\", projectID=~\"$project\"}[$__rate_interval])) by (accountID,projectID) ",
@ -311,7 +311,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"expr": "sum(vm_tenant_active_timeseries{job=~\"$job\", instance=~\"$instance.*\",accountID=~\"$account\",projectID=~\"$project\"}) by(accountID,projectID)",
"format": "time_series",
@ -409,7 +409,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"expr": "sum(increase(vm_tenant_timeseries_created_total{job=~\"$job\", instance=~\"$instance\",accountID=~\"$account\", projectID=~\"$project\"}[1m])/60) by(accountID,projectID)",
"interval": "",
@ -504,7 +504,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(vm_tenant_used_tenant_bytes{job=~\"$job\", instance=~\"$instance\",accountID=~\"$account\",projectID=~\"$project\"}) by(accountID,projectID)",
@ -579,6 +579,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{version=~\"^vm(insert|select|storage).*\"}, job)",
@ -604,6 +605,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{job=~\"$job\"}, instance)",
@ -629,6 +631,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_tenant_active_timeseries{job=~\"$job\"},accountID)",
@ -654,6 +657,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_tenant_active_timeseries{accountID=~\"$accountID\"},projectID)",
@ -687,4 +691,4 @@
"uid": "IZFqd3lMz",
"version": 7,
"weekStart": ""
}
}

View file

@ -46,7 +46,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"gridPos": {
"h": 3,
@ -66,7 +66,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"description": "Number of objects at kubernetes cluster per each controller",
"fieldConfig": {
@ -118,7 +118,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "max(operator_controller_objects_count{job=~\"$job\",instance=~\"$instance\"}) by (controller)",
@ -133,7 +133,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
@ -184,7 +184,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"exemplar": false,
@ -207,7 +207,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"fill": 1,
"fillGradient": 0,
@ -248,7 +248,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(controller_runtime_reconcile_total{job=~\"$job\",instance=~\"$instance\",result=~\"requeue_after|requeue|success\"}[$__rate_interval])) by(controller)",
@ -294,7 +294,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"description": "",
"fill": 1,
@ -336,7 +336,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(operator_log_messages_total{job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])) by (level)",
@ -395,7 +395,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"description": "Non zero metrics indicates about error with CR object definition (typos or incorrect values) or errors with kubernetes API connection.",
"fill": 1,
@ -437,7 +437,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"exemplar": false,
@ -450,7 +450,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(controller_runtime_reconcile_total{job=~\"$job\",instance=~\"$instance\",result=\"error\"}[$__rate_interval])) by(controller) > 0",
@ -497,7 +497,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"description": "Operator limits number of reconcilation events to 5 events per 2 seconds.\n For now, this limit is applied only for vmalert and vmagent controllers.\n It should reduce load at kubernetes cluster and increase operator performance.",
"fill": 1,
@ -538,7 +538,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(operator_reconcile_throttled_events_total[$__rate_interval])) by(controller)",
@ -584,7 +584,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"description": "Number of objects waiting in the queue for reconciliation. Non-zero values indicate that operator cannot process CR objects changes with the given resources.",
"fill": 1,
@ -626,7 +626,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "max(workqueue_depth{job=~\"$job\",instance=~\"$instance\"}) by (name)",
@ -672,7 +672,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"description": " For controllers with StatefulSet it's ok to see latency greater then 3 seconds. It could be vmalertmanager,vmcluster or vmagent in statefulMode.\n\n For other controllers, latency greater then 1 second may indicate issues with kubernetes cluster or operator's performance.\n ",
"fieldConfig": {
@ -721,7 +721,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99,sum(rate(controller_runtime_reconcile_time_seconds_bucket[$__rate_interval])) by(le,controller) )",
@ -780,7 +780,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
@ -827,7 +827,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(go_memstats_sys_bytes{job=~\"$job\", instance=~\"$instance\"}) ",
@ -838,7 +838,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(go_memstats_heap_inuse_bytes{job=~\"$job\", instance=~\"$instance\"}) ",
@ -850,7 +850,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(go_memstats_stack_inuse_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -862,7 +862,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(process_resident_memory_bytes{job=~\"$job\", instance=~\"$instance\"})",
@ -909,7 +909,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"fill": 1,
"fillGradient": 0,
@ -949,7 +949,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])",
@ -995,7 +995,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"fill": 1,
"fillGradient": 0,
@ -1035,7 +1035,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(go_goroutines{job=~\"$job\", instance=~\"$instance\"})",
@ -1081,7 +1081,7 @@
"dashes": false,
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
@ -1128,7 +1128,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(go_gc_duration_seconds_sum{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))\n/\nsum(rate(go_gc_duration_seconds_count{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
@ -1203,7 +1203,7 @@
},
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"definition": "label_values(operator_log_messages_total,job)",
"hide": 0,
@ -1229,7 +1229,7 @@
},
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"definition": "label_values(operator_log_messages_total{job=~\"$job\"},instance)",
"hide": 0,
@ -1256,7 +1256,7 @@
},
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
"uid": "$ds"
},
"definition": "label_values(vm_app_version{job=\"$job\", instance=\"$instance\"}, version)",
"hide": 2,
@ -1286,4 +1286,4 @@
"uid": "1H179hunk",
"version": 1,
"weekStart": ""
}
}

View file

@ -6,7 +6,7 @@
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "9.1.0"
"version": "9.2.6"
},
{
"type": "datasource",
@ -62,7 +62,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"enable": true,
"expr": "sum(ALERTS{alertgroup=\"vmcluster\",alertstate=\"firing\",show_at=\"dashboard\"}) by(alertname)",
@ -109,6 +109,7 @@
"collapsed": false,
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"gridPos": {
"h": 1,
@ -123,6 +124,7 @@
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "How many datapoints are in storage",
@ -169,7 +171,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -238,7 +240,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -309,7 +311,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -379,7 +381,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -449,7 +451,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -519,7 +521,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -541,6 +543,7 @@
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "Average disk usage per datapoint.",
@ -588,7 +591,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -657,7 +660,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -759,7 +762,7 @@
}
]
},
"pluginVersion": "9.1.0",
"pluginVersion": "9.2.6",
"targets": [
{
"datasource": {
@ -882,6 +885,7 @@
{
"collapsed": false,
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"gridPos": {
@ -1111,6 +1115,7 @@
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows the number of active time series with new data points inserted during the last hour across all storage nodes. High value may result in ingestion slowdown. \n\nSee following link for details:",
@ -1156,7 +1161,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -1265,7 +1271,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -1370,7 +1377,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -1475,7 +1483,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -1538,6 +1547,7 @@
{
"collapsed": true,
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"gridPos": {
@ -1593,7 +1603,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=189&var-job=${__field.labels.job}&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=189&var-job=${__field.labels.job}&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -1602,7 +1612,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -1618,7 +1629,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 3
"y": 14
},
"id": 66,
"links": [],
@ -1704,7 +1715,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=190&var-job=${__field.labels.job}&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=190&var-job=${__field.labels.job}&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -1713,7 +1724,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -1729,7 +1741,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 3
"y": 14
},
"id": 138,
"links": [],
@ -1814,7 +1826,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=192&var-job=${__field.labels.job}&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=192&var-job=${__field.labels.job}&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -1823,7 +1835,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -1839,7 +1852,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 11
"y": 22
},
"id": 64,
"links": [],
@ -1929,7 +1942,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -1958,7 +1972,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 11
"y": 22
},
"id": 122,
"links": [],
@ -2066,7 +2080,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -2098,7 +2113,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 19
"y": 30
},
"id": 117,
"links": [],
@ -2186,7 +2201,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -2202,7 +2218,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 19
"y": 30
},
"id": 119,
"options": {
@ -2290,7 +2306,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -2306,7 +2323,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 27
"y": 38
},
"id": 68,
"links": [],
@ -2394,7 +2411,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -2410,7 +2428,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 27
"y": 38
},
"id": 120,
"options": {
@ -2498,7 +2516,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -2514,7 +2533,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 35
"y": 46
},
"id": 70,
"links": [],
@ -2564,6 +2583,7 @@
{
"collapsed": true,
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"gridPos": {
@ -2576,6 +2596,7 @@
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows the rate and total number of new series created over last 24h.\n\nHigh churn rate tightly connected with database performance and may result in unexpected OOM's or slow queries. It is recommended to always keep an eye on this metric to avoid unexpected cardinality \"explosions\".\n\nThe higher churn rate is, the more resources required to handle it. Consider to keep the churn rate as low as possible.\n\nTo investigate stats about most expensive series use `api/v1/status/tsdb` handler. More details here https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format\n\nGood references to read:\n* https://www.robustperception.io/cardinality-is-key\n* https://valyala.medium.com/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b",
@ -2918,6 +2939,7 @@
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "Slow queries according to `search.logSlowQueryDuration` flag, which is `5s` by default.",
@ -3121,6 +3143,7 @@
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "VictoriaMetrics limits the number of labels per each metric with `-maxLabelsPerTimeseries` command-line flag.\n\nThis prevents from ingesting metrics with too many labels. The value of `maxLabelsPerTimeseries` must be adjusted for your workload.\n\nWhen limit is exceeded (graph is > 0) - extra labels are dropped, which could result in unexpected identical time series.",
@ -3435,7 +3458,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
@ -3516,7 +3539,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"editorMode": "code",
"exemplar": false,
@ -3565,6 +3588,7 @@
{
"collapsed": true,
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"gridPos": {
@ -4026,6 +4050,7 @@
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "The number of rows or bytes that vminesrt internal buffer contains at the moment.",
@ -4250,6 +4275,7 @@
{
"collapsed": true,
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"gridPos": {
@ -4410,7 +4436,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=196&${__url_time_range}&${__all_variables}"
"url": "/d/oS7Bi_0Wz?viewPanel=196&${__url_time_range}${__all_variables}"
}
],
"mappings": [],
@ -4522,7 +4548,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=192&var-job=$job_storage&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=192&var-job=$job_storage&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -4667,7 +4693,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=189&var-job=$job_storage&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=189&var-job=$job_storage&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -5777,6 +5803,7 @@
{
"collapsed": true,
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"gridPos": {
@ -5835,7 +5862,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -5851,7 +5879,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 7
"y": 82
},
"id": 92,
"links": [],
@ -5941,7 +5969,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -5977,7 +6006,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 7
"y": 82
},
"id": 95,
"links": [],
@ -6074,7 +6103,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=192&var-job=$job_select&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=192&var-job=$job_select&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -6083,7 +6112,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -6099,7 +6129,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 15
"y": 90
},
"id": 163,
"links": [],
@ -6218,7 +6248,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=189&var-job=$job_select&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=189&var-job=$job_select&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -6227,7 +6257,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -6243,7 +6274,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 15
"y": 90
},
"id": 165,
"links": [],
@ -6367,7 +6398,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -6383,7 +6415,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 23
"y": 98
},
"id": 178,
"links": [],
@ -6474,7 +6506,8 @@
"mode": "absolute",
"steps": [
{
"color": "green"
"color": "green",
"value": null
},
{
"color": "red",
@ -6490,7 +6523,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 23
"y": 98
},
"id": 180,
"links": [],
@ -6597,7 +6630,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 31
"y": 106
},
"id": 179,
"links": [],
@ -6704,7 +6737,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 31
"y": 106
},
"id": 181,
"links": [],
@ -6822,7 +6855,7 @@
"h": 8,
"w": 24,
"x": 0,
"y": 39
"y": 114
},
"id": 93,
"links": [],
@ -6883,6 +6916,7 @@
{
"collapsed": true,
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"gridPos": {
@ -7182,7 +7216,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=192&var-job=$job_insert&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=192&var-job=$job_insert&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -7326,7 +7360,7 @@
{
"targetBlank": true,
"title": "Drilldown",
"url": "/d/oS7Bi_0Wz?viewPanel=189&var-job=$job_insert&var-ds=$ds&var-instance=$instance&${__url_time_range}"
"url": "/d/oS7Bi_0Wz?viewPanel=189&var-job=$job_insert&var-ds=$ds&var-instance=$instance&${__url_time_range}"
}
],
"mappings": [],
@ -7867,7 +7901,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"gridPos": {
"h": 2,
@ -8327,6 +8361,7 @@
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{version=~\"^vminsert.*\"}, job)",
@ -8351,6 +8386,7 @@
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{version=~\"^vmselect.*\"}, job)",
@ -8375,6 +8411,7 @@
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{version=~\"^vmstorage.*\"}, job)",
@ -8399,6 +8436,7 @@
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{version=~\"^vm(insert|select|storage).*\"}, job)",
@ -8424,6 +8462,7 @@
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"definition": "label_values(vm_app_version{job=~\"$job\"}, instance)",
@ -8445,7 +8484,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "${ds}"
"uid": "$ds"
},
"filters": [],
"hide": 0,

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -54,6 +54,7 @@ groups:
for: 15m
labels:
severity: warning
show_at: dashboard
annotations:
dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=35&var-instance={{ $labels.instance }}"
summary: "Too many errors served for path {{ $labels.path }} (instance {{ $labels.instance }})"
@ -65,6 +66,7 @@ groups:
for: 15m
labels:
severity: warning
show_at: dashboard
annotations:
dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=59&var-instance={{ $labels.instance }}"
summary: "VictoriaMetrics on instance {{ $labels.instance }} is constantly hitting concurrent flushes limit"

View file

@ -2,7 +2,7 @@ version: '3.5'
services:
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.83.1
image: victoriametrics/vmagent:v1.84.0
depends_on:
- "vminsert"
ports:
@ -17,7 +17,7 @@ services:
grafana:
container_name: grafana
image: grafana/grafana:9.1.0
image: grafana/grafana:9.2.6
depends_on:
- "vmselect"
ports:
@ -32,7 +32,7 @@ services:
vmstorage-1:
container_name: vmstorage-1
image: victoriametrics/vmstorage:v1.83.1-cluster
image: victoriametrics/vmstorage:v1.84.0-cluster
ports:
- 8482
- 8400
@ -44,7 +44,7 @@ services:
restart: always
vmstorage-2:
container_name: vmstorage-2
image: victoriametrics/vmstorage:v1.83.1-cluster
image: victoriametrics/vmstorage:v1.84.0-cluster
ports:
- 8482
- 8400
@ -56,7 +56,7 @@ services:
restart: always
vminsert:
container_name: vminsert
image: victoriametrics/vminsert:v1.83.1-cluster
image: victoriametrics/vminsert:v1.84.0-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@ -68,7 +68,7 @@ services:
restart: always
vmselect:
container_name: vmselect
image: victoriametrics/vmselect:v1.83.1-cluster
image: victoriametrics/vmselect:v1.84.0-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@ -82,7 +82,7 @@ services:
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.83.1
image: victoriametrics/vmalert:v1.84.0
depends_on:
- "vmselect"
ports:

View file

@ -2,7 +2,7 @@ version: "3.5"
services:
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.83.1
image: victoriametrics/vmagent:v1.84.0
depends_on:
- "victoriametrics"
ports:
@ -18,7 +18,7 @@ services:
restart: always
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.83.1
image: victoriametrics/victoria-metrics:v1.84.0
ports:
- 8428:8428
- 8089:8089
@ -40,7 +40,7 @@ services:
restart: always
grafana:
container_name: grafana
image: grafana/grafana:9.1.0
image: grafana/grafana:9.2.6
depends_on:
- "victoriametrics"
ports:
@ -56,7 +56,7 @@ services:
restart: always
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.83.1
image: victoriametrics/vmalert:v1.84.0
depends_on:
- "victoriametrics"
- "alertmanager"

View file

@ -110,6 +110,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
* [How ClickHouse inspired us to build a high performance time series database](https://www.youtube.com/watch?v=p9qjb_yoBro). See also [slides](https://docs.google.com/presentation/d/1SdFrwsyR-HMXfbzrY8xfDZH_Dg6E7E5NJ84tQozMn3w/edit?usp=sharing)
* [OSA Con 2022: Specifics of data analysis in Time Series Databases](https://www.youtube.com/watch?v=_zORxrgLtec)
* [OSMC 2022 | VictoriaMetrics: scaling to 100 million metrics per second](https://www.slideshare.net/NETWAYS/osmc-2022-victoriametrics-scaling-to-100-million-metrics-per-second-by-aliaksandr-valialkin)
* [CNCF Paris Meetup 2022-09-15 - VictoriaMetrics - The cost of scale in Prometheus ecosystem](https://www.youtube.com/watch?v=gcZYHpri2Hw). See also [slides](https://docs.google.com/presentation/d/1jhZuKnAXi15M-mdBP5a4ZAiyrMeHhYmzO8xcZ6pMyLc/edit?usp=sharing)
* [Comparing Thanos to VictoriaMetrics cluster](https://faun.pub/comparing-thanos-to-victoriametrics-cluster-b193bea1683)
* [Evaluation performance and correctness: VictoriaMetrics response](https://valyala.medium.com/evaluating-performance-and-correctness-victoriametrics-response-e27315627e87)

View file

@ -15,6 +15,11 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): improve [service discovery](https://docs.victoriametrics.com/sd_configs.html) performance when discovering big number of targets (10K and more).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `exported_` prefix to metric names exported by scrape targets if these metric names clash with [automatically generated metrics](https://docs.victoriametrics.com/vmagent.html#automatically-generated-metrics) such as `up`, `scrape_samples_scraped`, etc. This prevents from corruption of automatically generated metrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3406).
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve error message when the requested path cannot be properly parsed, so users could identify the issue and properly fix the path. Now the error message links to [url format docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3402).
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to copy data from sources via Prometheus `remote_read` protocol. See [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol). The related issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3132) and [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1101).
## [v1.84.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.84.0)

22
docs/operator/FAQ.md Normal file
View file

@ -0,0 +1,22 @@
---
sort: 15
---
# FAQ
## How to change VMStorage PVC storage class
With Helm chart deployment:
1. Update the PVCs manually
2. Run `kubectl delete statefulset --cascade=orphan {vmstorage-sts}` which will delete the sts but keep the pods
3. Update helm chart with the new storage class in the volumeClaimTemplate
4. Run the helm chart to recreate the sts with the updated value
With Operator deployment:
1. Update the PVCs manually
2. Run `kubectl delete vmcluster --cascade=orphan {cluster-name}`
3. Run `kubectl delete statefulset --cascade=orphan {vmstorage-sts}`
4. Update VMCluster spec to use new storage class
5. Apply cluster configuration

View file

@ -6,18 +6,21 @@ sort: 5
## vmbackupmanager
## vmbackupmanager is proprietary software.
You can check vmbackupmanager [documentation](https://docs.victoriametrics.com/vmbackupmanager.html). It contains a description of the service and its features. This documentation covers vmbackumanager integration in vmoperator
Before using it, you must have signed contract and accept EULA https://victoriametrics.com/assets/VM_EULA.pdf
### vmbackupmanager is a part of VictoriaMetrics Enterprise offer
*Before using it, you must have signed contract and accept EULA https://victoriametrics.com/assets/VM_EULA.pdf*
## Usage examples
`VMSingle` and `VMCluster` has built-in backup configuration, it uses `vmbackupmanager` - proprietary tool for backups.
It supports incremental backups (hours, daily, etc) with popular object storages (aws s3, google cloud storage).
You can enable it with the simple configuration, define secret
```yaml
`VMSingle` and `VMCluster` has built-in backup configuration, it uses `vmbackupmanager` - proprietary tool for backups.
It supports incremental backups (hourly, daily, weekly, monthly) with popular object storages (aws s3, google cloud storage).
The configuration example is the following:
```yaml
---
apiVersion: v1
kind: Secret
@ -63,74 +66,82 @@ spec:
credentialsSecret:
name: remote-storage-keys
key: credentials
```
```
NOTE: for cluster version operator adds suffix for `destination: "s3://your_bucket/folder"`, it becomes `"s3://your_bucket/folder/$(POD_NAME)"`.
NOTE: for cluster version operator adds suffix for destination: `"s3://your_bucket/folder"`, it becomes `"s3://your_bucket/folder/$(POD_NAME)"`.
It's needed to make consistent backups for each storage node.
You can read more about backup configuration options and mechanics [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmbackup)
Possible configuration options for backup crd can be found at [link](https://docs.victoriametrics.com/operator/api.html#vmbackup)
You can read more about backup configuration options and mechanics [here](https://docs.victoriametrics.com/vmbackup.html)
Possible configuration options for backup crd can be found at [link](https://docs.victoriametrics.com/operator/api.html#vmbackup)
## Restoring backups
There are several ways to restore with [vmrestore](https://docs.victoriametrics.com/vmrestore.html) or [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html).
It can be done with [vmrestore](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmrestore)
There two ways:
First:
You have to stop `VMSingle` by scaling it replicas to zero and manually restore data to the database directory.
Steps:
1) edit `VMSingle` CRD, set replicaCount: 0
2) wait until database stops
3) ssh to some server, where you can mount `VMSingle` disk and mount it manually
4) restore files with `vmrestore`
5) umount disk
6) edit `VMSingle` CRD, set replicaCount: 1
7) wait database start
Second:
### Manually mounting disk
You have to stop `VMSingle` by scaling it replicas to zero and manually restore data to the database directory.
1) add init container with vmrestore command to `VMSingle` CRD, example:
```yaml
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMSingle
metadata:
name: vmsingle-restored
namespace: monitoring-system
spec:
initContainers:
- name: vmrestore
image: victoriametrics/vmrestore:latest
volumeMounts:
- mountPath: /victoria-metrics-data
name: data
- mountPath: /etc/vm/creds
name: secret-remote-storage-keys
readOnly: true
args:
- -storageDataPath=/victoria-metrics-data
- -src=s3://your_bucket/folder/latest
- -credsFilePath=/etc/vm/creds/credentials
vmBackup:
# This is Enterprise Package feature you need to have signed contract to use it
# and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
acceptEULA: true
destination: "s3://your_bucket/folder"
extraArgs:
runOnStart: "true"
image:
repository: victoriametrics/vmbackupmanager
tag: v1.67.0-enterprise
credentialsSecret:
name: remote-storage-keys
key: credentials
Steps:
1. edit `VMSingle` CRD, set replicaCount: 0
2. wait until database stops
3. ssh to some server, where you can mount `VMSingle` disk and mount it manually
4. restore files with `vmrestore`
5. umount disk
6. edit `VMSingle` CRD, set replicaCount: 1
7. wait database start
### Using VMRestore init container
```
2) apply it, and db will be restored from s3
3) remove initContainers and apply crd.
1. add init container with vmrestore command to `VMSingle` CRD, example:
```yaml
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMSingle
metadata:
name: vmsingle-restored
namespace: monitoring-system
spec:
initContainers:
- name: vmrestore
image: victoriametrics/vmrestore:latest
volumeMounts:
- mountPath: /victoria-metrics-data
name: data
- mountPath: /etc/vm/creds
name: secret-remote-storage-keys
readOnly: true
args:
- -storageDataPath=/victoria-metrics-data
- -src=s3://your_bucket/folder/latest
- -credsFilePath=/etc/vm/creds/credentials
vmBackup:
# This is Enterprise Package feature you need to have signed contract to use it
# and accept the EULA https://victoriametrics.com/assets/VM_EULA.pdf
acceptEULA: true
destination: "s3://your_bucket/folder"
extraArgs:
runOnStart: "true"
image:
repository: victoriametrics/vmbackupmanager
tag: v1.83.0-enterprise
credentialsSecret:
name: remote-storage-keys
key: credentials
```
2. apply it, and db will be restored from s3
3. remove initContainers and apply crd.
Note that using `VMRestore` will require adjusting `src` for each pod because restore will be handled per-pod.
### Using VMBackupmanager init container
Using VMBackupmanager restore in Kubernetes environment is described [here](https://docs.victoriametrics.com/vmbackupmanager.html#how-to-restore-in-kubernetes).
Advantages of using `VMBackupmanager` include:
- automatic adjustment of `src` for each pod when backup is requested
- graceful handling of case when no restore is required - `VMBackupmanager` will exit with successful status code and won't prevent pod from starting

View file

@ -608,7 +608,7 @@ Additional information:
## TCP and UDP
**How to send data from OpenTSDB-compatible agents to VictoriaMetrics**
###How to send data from OpenTSDB-compatible agents to VictoriaMetrics
Turned off by default. Enable OpenTSDB receiver in VictoriaMetrics by setting `-opentsdbListenAddr` command-line flag.
*If run from docker, '-opentsdbListenAddr' port should be exposed*
@ -656,7 +656,7 @@ Additional information:
* [OpenTSDB http put API](http://opentsdb.net/docs/build/html/api_http/put.html)
* [How to send data OpenTSDB data to VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents)
**How to send Graphite data to VictoriaMetrics**
### How to send Graphite data to VictoriaMetrics
Enable Graphite receiver in VictoriaMetrics by setting `-graphiteListenAddr` command-line flag.

View file

@ -321,7 +321,8 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
## Automatically generated metrics
`vmagent` automatically generates the following metrics per each scrape of every [Prometheus-compatible target](#how-to-collect-metrics-in-prometheus-format):
`vmagent` automatically generates the following metrics per each scrape of every [Prometheus-compatible target](#how-to-collect-metrics-in-prometheus-format)
and attaches target-specific `instance` and `job` labels to these metrics:
* `up` - this metric exposes `1` value on successful scrape and `0` value on unsuccessful scrape. This allows monitoring
failing scrapes with the following [MetricsQL query](https://docs.victoriametrics.com/MetricsQL.html):
@ -409,6 +410,9 @@ Extra labels can be added to metrics collected by `vmagent` via the following me
sum_over_time(scrape_series_limit_samples_dropped[1h]) > 0
```
If the target exports metrics with names clashing with the automatically generated metric names, then `vmagent` automatically
adds `exported_` prefix to these metric names, so they don't clash with automatically generated metric names.
## Relabeling

View file

@ -839,7 +839,7 @@ The shortlist of configuration flags is the following:
-datasource.tlsServerName string
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
-datasource.url string
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend and -datasource.showURL
-defaultTenant.graphite string
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy .This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
-defaultTenant.prometheus string

View file

@ -11,9 +11,12 @@ vmctl provides various useful actions with VictoriaMetrics components.
Features:
- migrate data from [Prometheus](#migrating-data-from-prometheus) to VictoriaMetrics using snapshot API
- migrate data from [Thanos](#migrating-data-from-thanos) to VictoriaMetrics
- migrate data from [Cortex](#migrating-data-from-cortex) to VictoriaMetrics
- migrate data from [Mimir](#migrating-data-from-mimir) to VictoriaMetrics
- migrate data from [InfluxDB](#migrating-data-from-influxdb-1x) to VictoriaMetrics
- migrate data from [OpenTSDB](#migrating-data-from-opentsdb) to VictoriaMetrics
- migrate data between [VictoriaMetrics](#migrating-data-from-victoriametrics) single or cluster version.
- migrate data by [Prometheus remote read protocol](#migrating-data-by-remote-read-protocol) to VictoriaMetrics
- [verify](#verifying-exported-blocks-from-victoriametrics) exported blocks from VictoriaMetrics single or cluster version.
To see the full list of supported modes
@ -32,6 +35,7 @@ COMMANDS:
influx Migrate timeseries from InfluxDB
prometheus Migrate timeseries from Prometheus
vm-native Migrate time series between VictoriaMetrics installations via native binary format
remote-read Migrate timeseries by Prometheus remote read protocol
verify-block Verifies correctness of data blocks exported via VictoriaMetrics Native format. See https://docs.victoriametrics.com/#how-to-export-data-in-native-format
```
@ -436,6 +440,64 @@ Found 2 blocks to import. Continue? [Y/n] y
2020/02/23 15:51:07 Total time: 7.153158218s
```
## Migrating data by remote read protocol
`vmctl` supports the `remote-read` mode for migrating data from databases which support
[Prometheus remote read API](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/)
See `./vmctl remote-read --help` for details and full list of flags.
To start the migration process configure the following flags:
1. `--remote-read-src-addr` - data source address to read from;
2. `--vm-addr` - VictoriaMetrics address to write to. For single-node VM is usually equal to `--httpListenAddr`,
and for cluster version is equal to `--httpListenAddr` flag of vminsert component (for example `http://<vminsert>:8480/insert/<accountID>/prometheus`);
3. `--remote-read-filter-time-start` - the time filter in RFC3339 format to select time series with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z';
4. `--remote-read-filter-time-end` - the time filter in RFC3339 format to select time series with timestamp equal or smaller than provided value. E.g. '2020-01-01T20:07:00Z'. Current time is used when omitted.;
5. `--remote-read-step-interval` - split export data into chunks. Valid values are `month, day, hour, minute`;
The importing process example for local installation of Prometheus
and single-node VictoriaMetrics(`http://localhost:8428`):
```
./vmctl remote-read \
--remote-read-src-addr=http://127.0.0.1:9091 \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \
--vm-addr=http://127.0.0.1:8428 \
--vm-concurrency=6
Split defined times into 8798 ranges to import. Continue? [Y/n]
VM worker 0:↘ 127177 samples/s
VM worker 1:↘ 140137 samples/s
VM worker 2:↘ 151606 samples/s
VM worker 3:↘ 130765 samples/s
VM worker 4:↘ 131904 samples/s
VM worker 5:↘ 132693 samples/s
Processing ranges: 8798 / 8798 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2022/10/19 16:45:37 Import finished!
2022/10/19 16:45:37 VictoriaMetrics importer stats:
idle duration: 6m57.793987511s;
time spent while importing: 1m18.463744801s;
total samples: 25348208;
samples/s: 323056.31;
total bytes: 669.7 MB;
bytes/s: 8.5 MB;
import requests: 127;
import requests retries: 0;
2022/10/19 16:45:37 Total time: 1m19.406283424s
```
### Filtering
The filtering consists of two parts: by labels and time.
Filtering by time can be configured via flags `--remote-read-filter-time-start` and `--remote-read-filter-time-end`
in RFC3339 format.
Filtering by labels can be configured via flags `--remote-read-filter-label` and `--remote-read-filter-label-value`.
For example, `--remote-read-filter-label=tenant` and `--remote-read-filter-label-value="team-eu"` will select only series
with `tenant="team-eu"` label-value pair.
## Migrating data from Thanos
Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means
@ -482,6 +544,187 @@ then import it into VM using `vmctl` in `prometheus` mode.
vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428
```
### Remote read protocol
Currently, Thanos doesn't support streaming remote read protocol. It is [recommended](https://thanos.io/tip/thanos/integrations.md/#storeapi-as-prometheus-remote-read)
to use [thanos-remote-read](https://github.com/G-Research/thanos-remote-read) a proxy, that allows exposing any Thanos
service (or anything that exposes gRPC StoreAPI e.g. Querier) via Prometheus remote read protocol.
If you want to migrate data, you should run [thanos-remote-read](https://github.com/G-Research/thanos-remote-read) proxy
and define the Thanos store address `./thanos-remote-read -store 127.0.0.1:19194`.
It is important to know that `store` flag is Thanos Store API gRPC endpoint.
Also, it is important to know that thanos-remote-read proxy doesn't support `STREAMED_XOR_CHUNKS` mode.
When you run thanos-remote-read proxy, it exposes port to serve HTTP on `10080 by default`.
The importing process example for local installation of Thanos
and single-node VictoriaMetrics(`http://localhost:8428`):
```
./vmctl remote-read \
--remote-read-src-addr=http://127.0.0.1:10080 \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \
--vm-addr=http://127.0.0.1:8428 \
--vm-concurrency=6
```
On the [thanos-remote-read](https://github.com/G-Research/thanos-remote-read) proxy side you will see logs like:
```
ts=2022-10-19T15:05:04.193916Z caller=main.go:278 level=info traceID=00000000000000000000000000000000 msg="thanos request" request="min_time:1666180800000 max_time:1666184399999 matchers:<type:RE value:\".*\" > aggregates:RAW "
ts=2022-10-19T15:05:04.468852Z caller=main.go:278 level=info traceID=00000000000000000000000000000000 msg="thanos request" request="min_time:1666184400000 max_time:1666187999999 matchers:<type:RE value:\".*\" > aggregates:RAW "
ts=2022-10-19T15:05:04.553914Z caller=main.go:278 level=info traceID=00000000000000000000000000000000 msg="thanos request" request="min_time:1666188000000 max_time:1666191364863 matchers:<type:RE value:\".*\" > aggregates:RAW "
```
And when process will finish you will see:
```
Split defined times into 8799 ranges to import. Continue? [Y/n]
VM worker 0:↓ 98183 samples/s
VM worker 1:↓ 114640 samples/s
VM worker 2:↓ 131710 samples/s
VM worker 3:↓ 114256 samples/s
VM worker 4:↓ 105671 samples/s
VM worker 5:↓ 124000 samples/s
Processing ranges: 8799 / 8799 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2022/10/19 18:05:07 Import finished!
2022/10/19 18:05:07 VictoriaMetrics importer stats:
idle duration: 52m13.987637229s;
time spent while importing: 9m1.728983776s;
total samples: 70836111;
samples/s: 130759.32;
total bytes: 2.2 GB;
bytes/s: 4.0 MB;
import requests: 356;
import requests retries: 0;
2022/10/19 18:05:07 Total time: 9m2.607521618s
```
## Migrating data from Cortex
Cortex has an implementation of the Prometheus remote read protocol. That means
`vmctl` in mode `remote-read` may also be used for Cortex historical data migration.
These instructions may vary based on the details of your Cortex configuration.
Please read carefully and verify as you go.
### Remote read protocol
If you want to migrate data, you should check your cortex configuration in the section
```yaml
api:
prometheus_http_prefix:
```
If you defined some prometheus prefix, you should use it when you define flag `--remote-read-src-addr=http://127.0.0.1:9009/{prometheus_http_prefix}`.
By default, Cortex uses the `prometheus` path prefix, so you should define the flag `--remote-read-src-addr=http://127.0.0.1:9009/prometheus`.
It is important to know that Cortex doesn't support the `STREAMED_XOR_CHUNKS` mode.
When you run Cortex, it exposes a port to serve HTTP on `9009 by default`.
The importing process example for the local installation of Cortex
and single-node VictoriaMetrics(`http://localhost:8428`):
```
./vmctl remote-read \
--remote-read-src-addr=http://127.0.0.1:9009/prometheus \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \
--remote-read-src-check-alive=false \
--vm-addr=http://127.0.0.1:8428 \
--vm-concurrency=6
```
And when the process finishes, you will see the following:
```
Split defined times into 8842 ranges to import. Continue? [Y/n]
VM worker 0:↗ 3863 samples/s
VM worker 1:↗ 2686 samples/s
VM worker 2:↗ 2620 samples/s
VM worker 3:↗ 2705 samples/s
VM worker 4:↗ 2643 samples/s
VM worker 5:↗ 2593 samples/s
Processing ranges: 8842 / 8842 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2022/10/21 12:09:49 Import finished!
2022/10/21 12:09:49 VictoriaMetrics importer stats:
idle duration: 0s;
time spent while importing: 3.82640757s;
total samples: 160232;
samples/s: 41875.31;
total bytes: 11.3 MB;
bytes/s: 3.0 MB;
import requests: 6;
import requests retries: 0;
2022/10/21 12:09:49 Total time: 4.71824253s
```
It is important to know that if you run your Cortex installation in multi-tenant mode, remote read protocol
requires an Authentication header like `X-Scope-OrgID`. You can define it via the flag `--remote-read-headers=X-Scope-OrgID:demo`
## Migrating data from Mimir
Mimir has similar implemintation as Cortex and also support of the Prometheus remote read protocol. That means
`vmctl` in mode `remote-read` may also be used for Mimir historical data migration.
These instructions may vary based on the details of your Mimir configuration.
Please read carefully and verify as you go.
### Remote read protocol
If you want to migrate data, you should check your Mimir configuration in the section
```yaml
api:
prometheus_http_prefix:
```
If you defined some prometheus prefix, you should use it when you define flag `--remote-read-src-addr=http://127.0.0.1:9009/{prometheus_http_prefix}`.
By default, Mimir uses the `prometheus` path prefix, so you should define the flag `--remote-read-src-addr=http://127.0.0.1:9009/prometheus`.
Mimir supports both remote read mode, so you can use `STREAMED_XOR_CHUNKS` mode and `SAMPLES` mode.
When you run Mimir, it exposes a port to serve HTTP on `8080 by default`.
Next example of the local installation was in multi-tenant mode (3 instances of mimir) with nginx as load balancer.
Load balancer expose single port `:9090`.
As you can see in the example we call `:9009` instead of `:8080` because of proxy.
The importing process example for the local installation of Mimir
and single-node VictoriaMetrics(`http://localhost:8428`):
```
./vmctl remote-read
--remote-read-src-addr=http://127.0.0.1:9009/prometheus \
--remote-read-filter-time-start=2021-10-18T00:00:00Z \
--remote-read-step-interval=hour \
--remote-read-src-check-alive=false \
--remote-read-headers=X-Scope-OrgID:demo \
--remote-read-use-stream=true \
--vm-addr=http://127.0.0.1:8428 \
--vm-concurrency=6
```
And when the process finishes, you will see the following:
```
Split defined times into 8847 ranges to import. Continue? [Y/n]
VM worker 0:→ 12176 samples/s
VM worker 1:→ 11918 samples/s
VM worker 2:→ 11261 samples/s
VM worker 3:→ 12861 samples/s
VM worker 4:→ 11096 samples/s
VM worker 5:→ 11575 samples/s
Processing ranges: 8847 / 8847 [█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████] 100.00%
2022/10/21 17:22:23 Import finished!
2022/10/21 17:22:23 VictoriaMetrics importer stats:
idle duration: 0s;
time spent while importing: 15.379614356s;
total samples: 81243;
samples/s: 5282.51;
total bytes: 6.1 MB;
bytes/s: 397.8 kB;
import requests: 6;
import requests retries: 0;
2022/10/21 17:22:23 Total time: 16.287405248s
```
It is important to know that if you run your Mimir installation in multi-tenant mode, remote read protocol
requires an Authentication header like `X-Scope-OrgID`. You can define it via the flag `--remote-read-headers=X-Scope-OrgID:demo`
## Migrating data from VictoriaMetrics
### Native protocol

View file

@ -233,7 +233,7 @@ The shortlist of configuration flags include the following:
-datasource.tlsServerName string
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
-datasource.url string
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend and -datasource.showURL
-enable.auth
enables auth with jwt token
-enable.rateLimit

27
go.mod
View file

@ -12,7 +12,7 @@ require (
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.23.0
github.com/VictoriaMetrics/metricsql v0.49.0
github.com/VictoriaMetrics/metricsql v0.49.1
github.com/aws/aws-sdk-go-v2 v1.17.1
github.com/aws/aws-sdk-go-v2/config v1.18.3
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42
@ -22,6 +22,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/go-kit/kit v0.12.0
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.7.0
github.com/influxdata/influxdb v1.10.0
@ -30,7 +31,7 @@ require (
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/oklog/ulid v1.3.1
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/prometheus/prometheus v0.40.4
github.com/urfave/cli/v2 v2.23.5
github.com/valyala/fastjson v1.6.3
github.com/valyala/fastrand v1.1.0
@ -51,6 +52,8 @@ require (
cloud.google.com/go/iam v0.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.44.149 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.3 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 // indirect
@ -67,33 +70,51 @@ require (
github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 // indirect
github.com/aws/smithy-go v1.13.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/testify v1.8.1 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/histogram v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 // indirect
go.opentelemetry.io/otel v1.11.1 // indirect
go.opentelemetry.io/otel/metric v0.33.0 // indirect
go.opentelemetry.io/otel/trace v1.11.1 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
go.uber.org/goleak v1.2.0 // indirect
golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/time v0.2.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

811
go.sum

File diff suppressed because it is too large Load diff

20
lib/bytesutil/itoa.go Normal file
View file

@ -0,0 +1,20 @@
package bytesutil
import (
"strconv"
)
// Itoa returns string representation of n.
//
// This function doesn't allocate memory on repeated calls for the same n.
func Itoa(n int) string {
bb := bbPool.Get()
b := bb.B[:0]
b = strconv.AppendInt(b, int64(n), 10)
s := InternString(ToUnsafeString(b))
bb.B = b
bbPool.Put(bb)
return s
}
var bbPool ByteBufferPool

View file

@ -0,0 +1,21 @@
package bytesutil
import (
"testing"
)
func TestItoa(t *testing.T) {
f := func(n int, resultExpected string) {
t.Helper()
for i := 0; i < 5; i++ {
result := Itoa(n)
if result != resultExpected {
t.Fatalf("unexpected result for Itoa(%d); got %q; want %q", n, result, resultExpected)
}
}
}
f(0, "0")
f(1, "1")
f(-123, "-123")
f(343432, "343432")
}

View file

@ -5,6 +5,7 @@ import (
"encoding/json"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"gopkg.in/yaml.v2"
)
@ -121,8 +122,8 @@ func TestIfExpressionMatch(t *testing.T) {
if err := yaml.UnmarshalStrict([]byte(ifExpr), &ie); err != nil {
t.Fatalf("unexpected error during unmarshal: %s", err)
}
labels := MustParseMetricWithLabels(metricWithLabels)
if !ie.Match(labels) {
labels := promutils.NewLabelsFromString(metricWithLabels)
if !ie.Match(labels.GetLabels()) {
t.Fatalf("unexpected mismatch of ifExpr=%s for %s", ifExpr, metricWithLabels)
}
}
@ -155,8 +156,8 @@ func TestIfExpressionMismatch(t *testing.T) {
if err := yaml.UnmarshalStrict([]byte(ifExpr), &ie); err != nil {
t.Fatalf("unexpected error during unmarshal: %s", err)
}
labels := MustParseMetricWithLabels(metricWithLabels)
if ie.Match(labels) {
labels := promutils.NewLabelsFromString(metricWithLabels)
if ie.Match(labels.GetLabels()) {
t.Fatalf("unexpected match of ifExpr=%s for %s", ifExpr, metricWithLabels)
}
}

View file

@ -9,6 +9,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
"github.com/cespare/xxhash/v2"
)
@ -48,8 +49,6 @@ func (prc *parsedRelabelConfig) String() string {
}
// Apply applies pcs to labels starting from the labelsOffset.
//
// If isFinalize is set, then FinalizeLabels is called on the labels[labelsOffset:].
func (pcs *ParsedConfigs) Apply(labels []prompbmarshal.Label, labelsOffset int) []prompbmarshal.Label {
var inStr string
relabelDebug := false
@ -111,32 +110,6 @@ func removeEmptyLabels(labels []prompbmarshal.Label, labelsOffset int) []prompbm
return dst
}
// RemoveMetaLabels removes all the `__meta_` labels from src and puts the rest of labels to dst.
//
// See https://www.robustperception.io/life-of-a-label fo details.
func RemoveMetaLabels(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
for _, label := range src {
if strings.HasPrefix(label.Name, "__meta_") {
continue
}
dst = append(dst, label)
}
return dst
}
// RemoveLabelsWithDoubleDashPrefix removes labels with "__" prefix from src, appends the remaining lables to dst and returns the result.
func RemoveLabelsWithDoubleDashPrefix(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
for _, label := range src {
name := label.Name
// A hack: do not delete __vm_filepath label, since it is used by internal logic for FileSDConfig.
if strings.HasPrefix(name, "__") && name != "__vm_filepath" {
continue
}
dst = append(dst, label)
}
return dst
}
// FinalizeLabels removes labels with "__" in the beginning (except of "__name__").
func FinalizeLabels(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
for _, label := range src {
@ -164,7 +137,7 @@ func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset
}
switch prc.Action {
case "graphite":
metricName := GetLabelValueByName(src, "__name__")
metricName := getLabelValue(src, "__name__")
gm := graphiteMatchesPool.Get().(*graphiteMatches)
var ok bool
gm.a, ok = prc.graphiteMatchTemplate.Match(gm.a[:0], metricName)
@ -464,9 +437,9 @@ func areEqualLabelValues(labels []prompbmarshal.Label, labelNames []string) bool
logger.Panicf("BUG: expecting at least 2 labelNames; got %d", len(labelNames))
return false
}
labelValue := GetLabelValueByName(labels, labelNames[0])
labelValue := getLabelValue(labels, labelNames[0])
for _, labelName := range labelNames[1:] {
v := GetLabelValueByName(labels, labelName)
v := getLabelValue(labels, labelName)
if v != labelValue {
return false
}
@ -500,6 +473,15 @@ func setLabelValue(labels []prompbmarshal.Label, labelsOffset int, name, value s
return labels
}
func getLabelValue(labels []prompbmarshal.Label, name string) string {
for _, label := range labels {
if label.Name == name {
return label.Value
}
}
return ""
}
// GetLabelByName returns label with the given name from labels.
func GetLabelByName(labels []prompbmarshal.Label, name string) *prompbmarshal.Label {
for i := range labels {
@ -511,17 +493,6 @@ func GetLabelByName(labels []prompbmarshal.Label, name string) *prompbmarshal.La
return nil
}
// GetLabelValueByName returns value for label with the given name from labels.
//
// It returns empty string for non-existing label.
func GetLabelValueByName(labels []prompbmarshal.Label, name string) string {
label := GetLabelByName(labels, name)
if label == nil {
return ""
}
return label.Value
}
// CleanLabels sets label.Name and label.Value to an empty string for all the labels.
//
// This should help GC cleaning up label.Name and label.Value strings.
@ -563,6 +534,14 @@ func labelsToString(labels []prompbmarshal.Label) string {
return string(b)
}
// SortLabels sorts labels in alphabetical order.
func SortLabels(labels []prompbmarshal.Label) {
x := &promutils.Labels{
Labels: labels,
}
x.Sort()
}
func fillLabelReferences(dst []byte, replacement string, labels []prompbmarshal.Label) []byte {
s := replacement
for len(s) > 0 {
@ -579,7 +558,7 @@ func fillLabelReferences(dst []byte, replacement string, labels []prompbmarshal.
}
labelName := s[:n]
s = s[n+2:]
labelValue := GetLabelValueByName(labels, labelName)
labelValue := getLabelValue(labels, labelName)
dst = append(dst, labelValue...)
}
return dst

View file

@ -5,6 +5,7 @@ import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestSanitizeName(t *testing.T) {
@ -77,8 +78,8 @@ func TestApplyRelabelConfigs(t *testing.T) {
if err != nil {
t.Fatalf("cannot parse %q: %s", config, err)
}
labels := MustParseMetricWithLabels(metric)
resultLabels := pcs.Apply(labels, 0)
labels := promutils.NewLabelsFromString(metric)
resultLabels := pcs.Apply(labels.GetLabels(), 0)
if isFinalize {
resultLabels = FinalizeLabels(resultLabels[:0], resultLabels)
}
@ -725,8 +726,8 @@ func TestApplyRelabelConfigs(t *testing.T) {
func TestFinalizeLabels(t *testing.T) {
f := func(metric, resultExpected string) {
t.Helper()
labels := MustParseMetricWithLabels(metric)
resultLabels := FinalizeLabels(nil, labels)
labels := promutils.NewLabelsFromString(metric)
resultLabels := FinalizeLabels(nil, labels.GetLabels())
result := labelsToString(resultLabels)
if result != resultExpected {
t.Fatalf("unexpected result; got\n%s\nwant\n%s", result, resultExpected)
@ -738,27 +739,11 @@ func TestFinalizeLabels(t *testing.T) {
f(`{foo="bar",abc="def",__address__="foo.com"}`, `{abc="def",foo="bar"}`)
}
func TestRemoveMetaLabels(t *testing.T) {
f := func(metric, resultExpected string) {
t.Helper()
labels := MustParseMetricWithLabels(metric)
resultLabels := RemoveMetaLabels(nil, labels)
result := labelsToString(resultLabels)
if result != resultExpected {
t.Fatalf("unexpected result of RemoveMetaLabels;\ngot\n%s\nwant\n%s", result, resultExpected)
}
}
f(`{}`, `{}`)
f(`{foo="bar"}`, `{foo="bar"}`)
f(`{__meta_foo="bar"}`, `{}`)
f(`{__meta_foo="bdffr",foo="bar",__meta_xxx="basd"}`, `{foo="bar"}`)
}
func TestFillLabelReferences(t *testing.T) {
f := func(replacement, metric, resultExpected string) {
t.Helper()
labels := MustParseMetricWithLabels(metric)
result := fillLabelReferences(nil, replacement, labels)
labels := promutils.NewLabelsFromString(metric)
result := fillLabelReferences(nil, replacement, labels.GetLabels())
if string(result) != resultExpected {
t.Fatalf("unexpected result; got\n%q\nwant\n%q", result, resultExpected)
}

View file

@ -1,54 +0,0 @@
package promrelabel
import (
"sort"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
// SortLabels sorts labels.
func SortLabels(labels []prompbmarshal.Label) {
if len(labels) < 2 {
return
}
ls := labelsSorterPool.Get().(*labelsSorter)
*ls = labels
if !sort.IsSorted(ls) {
sort.Sort(ls)
}
*ls = nil
labelsSorterPool.Put(ls)
}
// SortLabelsStable sorts labels using stable sort.
func SortLabelsStable(labels []prompbmarshal.Label) {
if len(labels) < 2 {
return
}
ls := labelsSorterPool.Get().(*labelsSorter)
*ls = labels
if !sort.IsSorted(ls) {
sort.Stable(ls)
}
*ls = nil
labelsSorterPool.Put(ls)
}
var labelsSorterPool = &sync.Pool{
New: func() interface{} {
return &labelsSorter{}
},
}
type labelsSorter []prompbmarshal.Label
func (ls *labelsSorter) Len() int { return len(*ls) }
func (ls *labelsSorter) Swap(i, j int) {
a := *ls
a[i], a[j] = a[j], a[i]
}
func (ls *labelsSorter) Less(i, j int) bool {
a := *ls
return a[i].Name < a[j].Name
}

View file

@ -1,43 +0,0 @@
package promrelabel
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
func TestSortLabels(t *testing.T) {
labels := []prompbmarshal.Label{
{
Name: "foo",
Value: "bar",
},
{
Name: "aa",
Value: "bb",
},
{
Name: "ba",
Value: "zz",
},
}
labelsExpected := []prompbmarshal.Label{
{
Name: "aa",
Value: "bb",
},
{
Name: "ba",
Value: "zz",
},
{
Name: "foo",
Value: "bar",
},
}
SortLabels(labels)
if !reflect.DeepEqual(labels, labelsExpected) {
t.Fatalf("unexpected sorted labels; got\n%v\nwant\n%v", labels, labelsExpected)
}
}

View file

@ -1,50 +0,0 @@
package promrelabel
import (
"fmt"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
)
// MustParseMetricWithLabels parses s, which can have the form `metric{labels}`.
//
// This function is indended mostly for tests.
func MustParseMetricWithLabels(metricWithLabels string) []prompbmarshal.Label {
stripDummyMetric := false
if strings.HasPrefix(metricWithLabels, "{") {
// Add a dummy metric name, since the parser needs it
metricWithLabels = "dummy_metric" + metricWithLabels
stripDummyMetric = true
}
// add a value to metricWithLabels, so it could be parsed by prometheus protocol parser.
s := metricWithLabels + " 123"
var rows prometheus.Rows
var err error
rows.UnmarshalWithErrLogger(s, func(s string) {
err = fmt.Errorf("error during metric parse: %s", s)
})
if err != nil {
logger.Panicf("BUG: cannot parse %q: %s", metricWithLabels, err)
}
if len(rows.Rows) != 1 {
logger.Panicf("BUG: unexpected number of rows parsed; got %d; want 1", len(rows.Rows))
}
r := rows.Rows[0]
var lfs []prompbmarshal.Label
if !stripDummyMetric {
lfs = append(lfs, prompbmarshal.Label{
Name: "__name__",
Value: r.Metric,
})
}
for _, tag := range r.Tags {
lfs = append(lfs, prompbmarshal.Label{
Name: tag.Key,
Value: tag.Value,
})
}
return lfs
}

View file

@ -62,9 +62,21 @@ func addMissingPort(addr string, isTLS bool) string {
return addr
}
if isTLS {
return addr + ":443"
return concatTwoStrings(addr, ":443")
}
return addr + ":80"
return concatTwoStrings(addr, ":80")
}
func concatTwoStrings(x, y string) string {
bb := bbPool.Get()
b := bb.B[:0]
b = append(b, x...)
b = append(b, y...)
s := bytesutil.ToUnsafeString(b)
s = bytesutil.InternString(s)
bb.B = b
bbPool.Put(bb)
return s
}
func newClient(sw *ScrapeWork) *client {

View file

@ -9,7 +9,6 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
@ -19,7 +18,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/azure"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
@ -230,23 +228,7 @@ func (cfg *Config) getJobNames() []string {
type GlobalConfig struct {
ScrapeInterval *promutils.Duration `yaml:"scrape_interval,omitempty"`
ScrapeTimeout *promutils.Duration `yaml:"scrape_timeout,omitempty"`
ExternalLabels map[string]string `yaml:"external_labels,omitempty"`
}
func (gc *GlobalConfig) getExternalLabels() []prompbmarshal.Label {
externalLabels := gc.ExternalLabels
if len(externalLabels) == 0 {
return nil
}
labels := make([]prompbmarshal.Label, 0, len(externalLabels))
for name, value := range externalLabels {
labels = append(labels, prompbmarshal.Label{
Name: name,
Value: value,
})
}
promrelabel.SortLabels(labels)
return labels
ExternalLabels *promutils.Labels `yaml:"external_labels,omitempty"`
}
// ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config.
@ -301,8 +283,8 @@ type ScrapeConfig struct {
}
func (sc *ScrapeConfig) mustStart(baseDir string) {
swosFunc := func(metaLabels map[string]string) interface{} {
target := metaLabels["__address__"]
swosFunc := func(metaLabels *promutils.Labels) interface{} {
target := metaLabels.Get("__address__")
sw, err := sc.swc.getScrapeWork(target, nil, metaLabels)
if err != nil {
logger.Errorf("cannot create kubernetes_sd_config target %q for job_name %q: %s", target, sc.swc.jobName, err)
@ -367,7 +349,7 @@ type FileSDConfig struct {
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config
type StaticConfig struct {
Targets []string `yaml:"targets"`
Labels map[string]string `yaml:"labels,omitempty"`
Labels *promutils.Labels `yaml:"labels,omitempty"`
}
func loadStaticConfigs(path string) ([]StaticConfig, error) {
@ -723,7 +705,7 @@ func (cfg *Config) getFileSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
// Create a map for the previous scrape work.
swsMapPrev := make(map[string][]*ScrapeWork)
for _, sw := range prev {
filepath := promrelabel.GetLabelValueByName(sw.Labels, "__vm_filepath")
filepath := sw.Labels.Get("__vm_filepath")
if len(filepath) == 0 {
logger.Panicf("BUG: missing `__vm_filepath` label")
} else {
@ -960,7 +942,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
if (*streamParse || sc.StreamParse) && sc.SeriesLimit > 0 {
return nil, fmt.Errorf("cannot use stream parsing mode when `series_limit` is set for `job_name` %q", jobName)
}
externalLabels := globalCfg.getExternalLabels()
externalLabels := globalCfg.ExternalLabels
noStaleTracking := *noStaleMarkers
if sc.NoStaleMarkers != nil {
noStaleTracking = *sc.NoStaleMarkers
@ -1010,7 +992,7 @@ type scrapeWorkConfig struct {
honorLabels bool
honorTimestamps bool
denyRedirects bool
externalLabels []prompbmarshal.Label
externalLabels *promutils.Labels
relabelConfigs *promrelabel.ParsedConfigs
metricRelabelConfigs *promrelabel.ParsedConfigs
sampleLimit int
@ -1024,7 +1006,7 @@ type scrapeWorkConfig struct {
}
type targetLabelsGetter interface {
GetLabels(baseDir string) ([]map[string]string, error)
GetLabels(baseDir string) ([]*promutils.Labels, error)
}
func appendSDScrapeWork(dst []*ScrapeWork, sdc targetLabelsGetter, baseDir string, swc *scrapeWorkConfig, discoveryType string) ([]*ScrapeWork, bool) {
@ -1036,7 +1018,7 @@ func appendSDScrapeWork(dst []*ScrapeWork, sdc targetLabelsGetter, baseDir strin
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, discoveryType), true
}
func appendScrapeWorkForTargetLabels(dst []*ScrapeWork, swc *scrapeWorkConfig, targetLabels []map[string]string, discoveryType string) []*ScrapeWork {
func appendScrapeWorkForTargetLabels(dst []*ScrapeWork, swc *scrapeWorkConfig, targetLabels []*promutils.Labels, discoveryType string) []*ScrapeWork {
startTime := time.Now()
// Process targetLabels in parallel in order to reduce processing time for big number of targetLabels.
type result struct {
@ -1045,11 +1027,11 @@ func appendScrapeWorkForTargetLabels(dst []*ScrapeWork, swc *scrapeWorkConfig, t
}
goroutines := cgroup.AvailableCPUs()
resultCh := make(chan result, len(targetLabels))
workCh := make(chan map[string]string, goroutines)
workCh := make(chan *promutils.Labels, goroutines)
for i := 0; i < goroutines; i++ {
go func() {
for metaLabels := range workCh {
target := metaLabels["__address__"]
target := metaLabels.Get("__address__")
sw, err := swc.getScrapeWork(target, nil, metaLabels)
if err != nil {
err = fmt.Errorf("skipping %s target %q for job_name %q because of error: %w", discoveryType, target, swc.jobName, err)
@ -1080,6 +1062,8 @@ func appendScrapeWorkForTargetLabels(dst []*ScrapeWork, swc *scrapeWorkConfig, t
}
func (sdc *FileSDConfig) appendScrapeWork(dst []*ScrapeWork, swsMapPrev map[string][]*ScrapeWork, baseDir string, swc *scrapeWorkConfig) []*ScrapeWork {
metaLabels := promutils.GetLabels()
defer promutils.PutLabels(metaLabels)
for _, file := range sdc.Files {
pathPattern := fs.GetFilepath(baseDir, file)
paths := []string{pathPattern}
@ -1112,10 +1096,9 @@ func (sdc *FileSDConfig) appendScrapeWork(dst []*ScrapeWork, swsMapPrev map[stri
pathShort = pathShort[1:]
}
}
metaLabels := map[string]string{
"__meta_filepath": pathShort,
"__vm_filepath": path, // This label is needed for internal promscrape logic
}
metaLabels.Reset()
metaLabels.Add("__meta_filepath", pathShort)
metaLabels.Add("__vm_filepath", path) // This label is needed for internal promscrape logic
for i := range stcs {
dst = stcs[i].appendScrapeWork(dst, swc, metaLabels)
}
@ -1124,7 +1107,7 @@ func (sdc *FileSDConfig) appendScrapeWork(dst []*ScrapeWork, swsMapPrev map[stri
return dst
}
func (stc *StaticConfig) appendScrapeWork(dst []*ScrapeWork, swc *scrapeWorkConfig, metaLabels map[string]string) []*ScrapeWork {
func (stc *StaticConfig) appendScrapeWork(dst []*ScrapeWork, swc *scrapeWorkConfig, metaLabels *promutils.Labels) []*ScrapeWork {
for _, target := range stc.Targets {
if target == "" {
// Do not return this error, since other targets may be valid
@ -1144,8 +1127,8 @@ func (stc *StaticConfig) appendScrapeWork(dst []*ScrapeWork, swc *scrapeWorkConf
return dst
}
func appendScrapeWorkKey(dst []byte, labels []prompbmarshal.Label) []byte {
for _, label := range labels {
func appendScrapeWorkKey(dst []byte, labels *promutils.Labels) []byte {
for _, label := range labels.GetLabels() {
// Do not use strconv.AppendQuote, since it is slow according to CPU profile.
dst = append(dst, label.Name...)
dst = append(dst, '=')
@ -1176,45 +1159,20 @@ func needSkipScrapeWork(key string, membersCount, replicasCount, memberNum int)
return true
}
type labelsContext struct {
labels []prompbmarshal.Label
}
func getLabelsContext() *labelsContext {
v := labelsContextPool.Get()
if v == nil {
return &labelsContext{}
}
return v.(*labelsContext)
}
func putLabelsContext(lctx *labelsContext) {
labels := lctx.labels
for i := range labels {
labels[i].Name = ""
labels[i].Value = ""
}
lctx.labels = lctx.labels[:0]
labelsContextPool.Put(lctx)
}
var labelsContextPool sync.Pool
var scrapeWorkKeyBufPool bytesutil.ByteBufferPool
func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabels map[string]string) (*ScrapeWork, error) {
lctx := getLabelsContext()
defer putLabelsContext(lctx)
func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabels *promutils.Labels) (*ScrapeWork, error) {
labels := promutils.GetLabels()
defer promutils.PutLabels(labels)
labels := mergeLabels(lctx.labels[:0], swc, target, extraLabels, metaLabels)
var originalLabels []prompbmarshal.Label
mergeLabels(labels, swc, target, extraLabels, metaLabels)
var originalLabels *promutils.Labels
if !*dropOriginalLabels {
originalLabels = append([]prompbmarshal.Label{}, labels...)
originalLabels = labels.Clone()
}
labels = swc.relabelConfigs.Apply(labels, 0)
labels.Labels = swc.relabelConfigs.Apply(labels.Labels, 0)
// Remove labels starting from "__meta_" prefix according to https://www.robustperception.io/life-of-a-label/
labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
lctx.labels = labels
labels.RemoveMetaLabels()
// Verify whether the scrape work must be skipped because of `-promscrape.cluster.*` configs.
// Perform the verification on labels after the relabeling in order to guarantee that targets with the same set of labels
@ -1230,25 +1188,25 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
}
}
if !*dropOriginalLabels {
promrelabel.SortLabels(originalLabels)
originalLabels.Sort()
// Reduce memory usage by interning all the strings in originalLabels.
internLabelStrings(originalLabels)
originalLabels.InternStrings()
}
if len(labels) == 0 {
if labels.Len() == 0 {
// Drop target without labels.
droppedTargetsMap.Register(originalLabels)
return nil, nil
}
// See https://www.robustperception.io/life-of-a-label
scheme := promrelabel.GetLabelValueByName(labels, "__scheme__")
scheme := labels.Get("__scheme__")
if len(scheme) == 0 {
scheme = "http"
}
metricsPath := promrelabel.GetLabelValueByName(labels, "__metrics_path__")
metricsPath := labels.Get("__metrics_path__")
if len(metricsPath) == 0 {
metricsPath = "/metrics"
}
address := promrelabel.GetLabelValueByName(labels, "__address__")
address := labels.Get("__address__")
if len(address) == 0 {
// Drop target without scrape address.
droppedTargetsMap.Register(originalLabels)
@ -1271,7 +1229,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
address = addMissingPort(address, scheme == "https")
var at *auth.Token
tenantID := promrelabel.GetLabelValueByName(labels, "__tenant_id__")
tenantID := labels.Get("__tenant_id__")
if len(tenantID) > 0 {
newToken, err := auth.NewToken(tenantID)
if err != nil {
@ -1292,14 +1250,14 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
}
}
paramsStr := url.Values(params).Encode()
scrapeURL := fmt.Sprintf("%s://%s%s%s%s", scheme, address, metricsPath, optionalQuestion, paramsStr)
scrapeURL := getScrapeURL(scheme, address, metricsPath, optionalQuestion, paramsStr)
if _, err := url.Parse(scrapeURL); err != nil {
return nil, fmt.Errorf("invalid url %q for scheme=%q, target=%q, address=%q, metrics_path=%q for job=%q: %w",
scrapeURL, scheme, target, address, metricsPath, swc.jobName, err)
}
// Read __scrape_interval__ and __scrape_timeout__ from labels.
scrapeInterval := swc.scrapeInterval
if s := promrelabel.GetLabelValueByName(labels, "__scrape_interval__"); len(s) > 0 {
if s := labels.Get("__scrape_interval__"); len(s) > 0 {
d, err := promutils.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("cannot parse __scrape_interval__=%q: %w", s, err)
@ -1307,7 +1265,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
scrapeInterval = d
}
scrapeTimeout := swc.scrapeTimeout
if s := promrelabel.GetLabelValueByName(labels, "__scrape_timeout__"); len(s) > 0 {
if s := labels.Get("__scrape_timeout__"); len(s) > 0 {
d, err := promutils.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("cannot parse __scrape_timeout__=%q: %w", s, err)
@ -1317,7 +1275,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
// Read series_limit option from __series_limit__ label.
// See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
seriesLimit := swc.seriesLimit
if s := promrelabel.GetLabelValueByName(labels, "__series_limit__"); len(s) > 0 {
if s := labels.Get("__series_limit__"); len(s) > 0 {
n, err := strconv.Atoi(s)
if err != nil {
return nil, fmt.Errorf("cannot parse __series_limit__=%q: %w", s, err)
@ -1327,7 +1285,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
// Read stream_parse option from __stream_parse__ label.
// See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
streamParse := swc.streamParse
if s := promrelabel.GetLabelValueByName(labels, "__stream_parse__"); len(s) > 0 {
if s := labels.Get("__stream_parse__"); len(s) > 0 {
b, err := strconv.ParseBool(s)
if err != nil {
return nil, fmt.Errorf("cannot parse __stream_parse__=%q: %w", s, err)
@ -1335,22 +1293,19 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
streamParse = b
}
// Remove labels with "__" prefix according to https://www.robustperception.io/life-of-a-label/
labels = promrelabel.RemoveLabelsWithDoubleDashPrefix(labels[:0], labels)
// Remove references to deleted labels, so GC could clean strings for label name and label value past len(labels).
labels.RemoveLabelsWithDoubleUnderscorePrefix()
// Add missing "instance" label according to https://www.robustperception.io/life-of-a-label
if labels.Get("instance") == "" {
labels.Add("instance", address)
}
// Remove references to deleted labels, so GC could clean strings for label name and label value past len(labels.Labels).
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
labelsCopy := make([]prompbmarshal.Label, len(labels)+1)
labels = append(labelsCopy[:0], labels...)
// Add missing "instance" label according to https://www.robustperception.io/life-of-a-label
if promrelabel.GetLabelByName(labels, "instance") == nil {
labels = append(labels, prompbmarshal.Label{
Name: "instance",
Value: address,
})
}
promrelabel.SortLabels(labels)
labelsCopy := labels.Clone()
// Sort labels in alphabetical order of their names.
labelsCopy.Sort()
// Reduce memory usage by interning all the strings in labels.
internLabelStrings(labels)
labelsCopy.InternStrings()
sw := &ScrapeWork{
ScrapeURL: scrapeURL,
@ -1360,7 +1315,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
HonorTimestamps: swc.honorTimestamps,
DenyRedirects: swc.denyRedirects,
OriginalLabels: originalLabels,
Labels: labels,
Labels: labelsCopy,
ExternalLabels: swc.externalLabels,
ProxyURL: swc.proxyURL,
ProxyAuthConfig: swc.proxyAuthConfig,
@ -1381,19 +1336,26 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
return sw, nil
}
func internLabelStrings(labels []prompbmarshal.Label) {
for i := range labels {
label := &labels[i]
label.Name = bytesutil.InternString(label.Name)
label.Value = bytesutil.InternString(label.Value)
}
func getScrapeURL(scheme, address, metricsPath, optionalQuestion, paramsStr string) string {
bb := bbPool.Get()
b := bb.B[:0]
b = append(b, scheme...)
b = append(b, "://"...)
b = append(b, address...)
b = append(b, metricsPath...)
b = append(b, optionalQuestion...)
b = append(b, paramsStr...)
s := bytesutil.ToUnsafeString(b)
s = bytesutil.InternString(s)
bb.B = b
bbPool.Put(bb)
return s
}
func getParamsFromLabels(labels []prompbmarshal.Label, paramsOrig map[string][]string) map[string][]string {
func getParamsFromLabels(labels *promutils.Labels, paramsOrig map[string][]string) map[string][]string {
// See https://www.robustperception.io/life-of-a-label
m := make(map[string][]string)
for i := range labels {
label := &labels[i]
var m map[string][]string
for _, label := range labels.GetLabels() {
if !strings.HasPrefix(label.Name, "__param_") {
continue
}
@ -1402,79 +1364,36 @@ func getParamsFromLabels(labels []prompbmarshal.Label, paramsOrig map[string][]s
if p := paramsOrig[name]; len(p) > 1 {
values = append(values, p[1:]...)
}
if m == nil {
m = make(map[string][]string)
}
m[name] = values
}
return m
}
func mergeLabels(dst []prompbmarshal.Label, swc *scrapeWorkConfig, target string, extraLabels, metaLabels map[string]string) []prompbmarshal.Label {
if len(dst) > 0 {
logger.Panicf("BUG: len(dst) must be 0; got %d", len(dst))
func mergeLabels(dst *promutils.Labels, swc *scrapeWorkConfig, target string, extraLabels, metaLabels *promutils.Labels) {
if n := dst.Len(); n > 0 {
logger.Panicf("BUG: len(dst.Labels) must be 0; got %d", n)
}
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
dst = appendLabel(dst, "job", swc.jobName)
dst = appendLabel(dst, "__address__", target)
dst = appendLabel(dst, "__scheme__", swc.scheme)
dst = appendLabel(dst, "__metrics_path__", swc.metricsPath)
dst = appendLabel(dst, "__scrape_interval__", swc.scrapeIntervalString)
dst = appendLabel(dst, "__scrape_timeout__", swc.scrapeTimeoutString)
dst.Add("job", swc.jobName)
dst.Add("__address__", target)
dst.Add("__scheme__", swc.scheme)
dst.Add("__metrics_path__", swc.metricsPath)
dst.Add("__scrape_interval__", swc.scrapeIntervalString)
dst.Add("__scrape_timeout__", swc.scrapeTimeoutString)
for k, args := range swc.params {
if len(args) == 0 {
continue
}
k = "__param_" + k
v := args[0]
dst = appendLabel(dst, k, v)
dst.Add(k, v)
}
for k, v := range extraLabels {
dst = appendLabel(dst, k, v)
}
for k, v := range metaLabels {
dst = appendLabel(dst, k, v)
}
if len(dst) < 2 {
return dst
}
// Remove duplicate labels if any.
// Stable sorting is needed in order to preserve the order for labels with identical names.
// This is needed in order to remove labels with duplicate names other than the last one.
promrelabel.SortLabelsStable(dst)
prevName := dst[0].Name
hasDuplicateLabels := false
for _, label := range dst[1:] {
if label.Name == prevName {
hasDuplicateLabels = true
break
}
prevName = label.Name
}
if !hasDuplicateLabels {
return dst
}
prevName = dst[0].Name
tmp := dst[:1]
for _, label := range dst[1:] {
if label.Name == prevName {
tmp[len(tmp)-1] = label
} else {
tmp = append(tmp, label)
prevName = label.Name
}
}
tail := dst[len(tmp):]
for i := range tail {
label := &tail[i]
label.Name = ""
label.Value = ""
}
return tmp
}
func appendLabel(dst []prompbmarshal.Label, name, value string) []prompbmarshal.Label {
return append(dst, prompbmarshal.Label{
Name: name,
Value: value,
})
dst.AddFrom(extraLabels)
dst.AddFrom(metaLabels)
dst.RemoveDuplicates()
}
const (

View file

@ -10,7 +10,6 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/gce"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
@ -18,11 +17,13 @@ import (
)
func TestMergeLabels(t *testing.T) {
f := func(swc *scrapeWorkConfig, target string, extraLabels, metaLabels map[string]string, resultExpected string) {
f := func(swc *scrapeWorkConfig, target string, extraLabelsMap, metaLabelsMap map[string]string, resultExpected string) {
t.Helper()
var labels []prompbmarshal.Label
labels = mergeLabels(labels[:0], swc, target, extraLabels, metaLabels)
result := promLabelsString(labels)
extraLabels := promutils.NewLabelsFromMap(extraLabelsMap)
metaLabels := promutils.NewLabelsFromMap(metaLabelsMap)
labels := promutils.NewLabels(0)
mergeLabels(labels, swc, target, extraLabels, metaLabels)
result := labels.String()
if result != resultExpected {
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
}
@ -247,16 +248,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "host1:80",
},
{
Name: "job",
Value: "abc",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "host1:80",
"job": "abc",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "abc",
@ -266,16 +261,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "host2:443",
},
{
Name: "job",
Value: "abc",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "host2:443",
"job": "abc",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "abc",
@ -285,16 +274,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "host3:1234",
},
{
Name: "job",
Value: "abc",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "host3:1234",
"job": "abc",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "abc",
@ -304,16 +287,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "host4:1234",
},
{
Name: "job",
Value: "abc",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "host4:1234",
"job": "abc",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "abc",
@ -358,16 +335,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "8.8.8.8",
},
{
Name: "job",
Value: "blackbox",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "8.8.8.8",
"job": "blackbox",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "blackbox",
@ -770,8 +741,9 @@ func TestGetFileSDScrapeWorkSuccess(t *testing.T) {
// Remove `__vm_filepath` label, since its value depends on the current working dir.
for _, sw := range sws {
for j := range sw.Labels {
label := &sw.Labels[j]
labels := sw.Labels.GetLabels()
for j := range labels {
label := &labels[j]
if label.Name == "__vm_filepath" {
label.Value = ""
}
@ -799,24 +771,12 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__vm_filepath",
Value: "",
},
{
Name: "instance",
Value: "host1:80",
},
{
Name: "job",
Value: "foo",
},
{
Name: "qwe",
Value: "rty",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"__vm_filepath": "",
"instance": "host1:80",
"job": "foo",
"qwe": "rty",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -826,24 +786,12 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__vm_filepath",
Value: "",
},
{
Name: "instance",
Value: "host2:80",
},
{
Name: "job",
Value: "foo",
},
{
Name: "qwe",
Value: "rty",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"__vm_filepath": "",
"instance": "host2:80",
"job": "foo",
"qwe": "rty",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -853,24 +801,12 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__vm_filepath",
Value: "",
},
{
Name: "instance",
Value: "localhost:9090",
},
{
Name: "job",
Value: "foo",
},
{
Name: "yml",
Value: "test",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"__vm_filepath": "",
"instance": "localhost:9090",
"job": "foo",
"yml": "test",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -902,16 +838,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "foo",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "foo",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -932,26 +862,14 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "foo",
},
},
ExternalLabels: []prompbmarshal.Label{
{
Name: "datacenter",
Value: "foobar",
},
{
Name: "jobs",
Value: "xxx",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "foo",
}),
ExternalLabels: promutils.NewLabelsFromMap(map[string]string{
"datacenter": "foobar",
"jobs": "xxx",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -996,20 +914,11 @@ scrape_configs:
HonorLabels: true,
HonorTimestamps: false,
DenyRedirects: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:443",
},
{
Name: "job",
Value: "foo",
},
{
Name: "x",
Value: "y",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:443",
"job": "foo",
"x": "y",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
ProxyURL: proxy.MustNewURL("http://foo.bar"),
@ -1022,20 +931,11 @@ scrape_configs:
HonorLabels: true,
HonorTimestamps: false,
DenyRedirects: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "aaa:443",
},
{
Name: "job",
Value: "foo",
},
{
Name: "x",
Value: "y",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "aaa:443",
"job": "foo",
"x": "y",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
ProxyURL: proxy.MustNewURL("http://foo.bar"),
@ -1046,16 +946,10 @@ scrape_configs:
ScrapeInterval: 8 * time.Second,
ScrapeTimeout: 8 * time.Second,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "1.2.3.4:80",
},
{
Name: "job",
Value: "qwer",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "1.2.3.4:80",
"job": "qwer",
}),
AuthConfig: &promauth.Config{
TLSServerName: "foobar",
TLSInsecureSkipVerify: true,
@ -1068,16 +962,10 @@ scrape_configs:
ScrapeInterval: 8 * time.Second,
ScrapeTimeout: 8 * time.Second,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foobar:80",
},
{
Name: "job",
Value: "asdf",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foobar:80",
"job": "asdf",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "asdf",
@ -1124,24 +1012,12 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "hash",
Value: "82",
},
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "prefix:url",
Value: "http://foo.bar:1234/metrics",
},
{
Name: "url",
Value: "http://foo.bar:1234/metrics",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"hash": "82",
"instance": "foo.bar:1234",
"prefix:url": "http://foo.bar:1234/metrics",
"url": "http://foo.bar:1234/metrics",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -1180,16 +1056,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "fake.addr",
},
{
Name: "job",
Value: "https",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "fake.addr",
"job": "https",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -1221,16 +1091,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "3",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "3",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -1251,16 +1115,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "foo",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "foo",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -1277,16 +1135,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "foo",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "foo",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -1303,16 +1155,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "foo",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "foo",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
@ -1343,42 +1189,18 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "foo",
Value: "bar",
},
{
Name: "instance",
Value: "pp:80",
},
{
Name: "job",
Value: "yyy",
},
},
ExternalLabels: []prompbmarshal.Label{
{
Name: "__address__",
Value: "aaasdf",
},
{
Name: "__param_a",
Value: "jlfd",
},
{
Name: "foo",
Value: "xx",
},
{
Name: "job",
Value: "foobar",
},
{
Name: "q",
Value: "qwe",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"foo": "bar",
"instance": "pp:80",
"job": "yyy",
}),
ExternalLabels: promutils.NewLabelsFromMap(map[string]string{
"__address__": "aaasdf",
"__param_a": "jlfd",
"foo": "xx",
"job": "foobar",
"q": "qwe",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "aaa",
@ -1434,16 +1256,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "192.168.1.2",
},
{
Name: "job",
Value: "snmp",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "192.168.1.2",
"job": "snmp",
}),
AuthConfig: ac,
ProxyAuthConfig: proxyAC,
SampleLimit: 100,
@ -1470,16 +1286,10 @@ scrape_configs:
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "path wo slash",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "path wo slash",
}),
jobNameOriginal: "path wo slash",
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
@ -1505,16 +1315,10 @@ scrape_configs:
ScrapeOffset: time.Hour * 24 * 2,
HonorTimestamps: true,
NoStaleMarkers: true,
Labels: []prompbmarshal.Label{
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "foo",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "foo",
}),
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",

View file

@ -1,31 +1,42 @@
package promscrape
import (
"fmt"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func BenchmarkInternLabelStrings(b *testing.B) {
func BenchmarkGetScrapeWork(b *testing.B) {
swc := &scrapeWorkConfig{
jobName: "job-1",
scheme: "http",
metricsPath: "/metrics",
scrapeIntervalString: "30s",
scrapeTimeoutString: "10s",
}
target := "host1.com:1234"
extraLabels := promutils.NewLabelsFromMap(map[string]string{
"env": "prod",
"datacenter": "dc-foo",
})
metaLabels := promutils.NewLabelsFromMap(map[string]string{
"__meta_foo": "bar",
"__meta_kubernetes_namespace": "default",
"__address__": "foobar.com",
"__meta_sfdfdf_dsfds_fdfdfds_fdfdfd": "true",
})
b.ReportAllocs()
b.SetBytes(1)
b.RunParallel(func(pb *testing.PB) {
labels := []prompbmarshal.Label{
{
Name: "job",
Value: "node-exporter",
},
{
Name: "instance",
Value: "foo.bar.baz:1234",
},
{
Name: "__meta_kubernetes_namespace",
Value: "default",
},
}
for pb.Next() {
internLabelStrings(labels)
sw, err := swc.getScrapeWork(target, extraLabels, metaLabels)
if err != nil {
panic(fmt.Errorf("BUG: getScrapeWork returned non-nil error: %w", err))
}
if sw == nil {
panic(fmt.Errorf("BUG: getScrapeWork returned nil ScrapeWork"))
}
}
})
}

View file

@ -8,6 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -43,7 +44,7 @@ type SDConfig struct {
}
// GetLabels returns Consul labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
ac, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)
@ -60,8 +61,8 @@ func (sdc *SDConfig) MustStop() {
configMap.Delete(sdc)
}
func appendMachineLabels(vms []virtualMachine, port int, sdc *SDConfig) []map[string]string {
ms := make([]map[string]string, 0, len(vms))
func appendMachineLabels(vms []virtualMachine, port int, sdc *SDConfig) []*promutils.Labels {
ms := make([]*promutils.Labels, 0, len(vms))
for i := range vms {
vm := &vms[i]
for _, ips := range vm.ipAddresses {
@ -69,36 +70,35 @@ func appendMachineLabels(vms []virtualMachine, port int, sdc *SDConfig) []map[st
continue
}
addr := discoveryutils.JoinHostPort(ips.privateIP, port)
m := map[string]string{
"__address__": addr,
"__meta_azure_subscription_id": sdc.SubscriptionID,
"__meta_azure_machine_id": vm.ID,
"__meta_azure_machine_name": vm.Name,
"__meta_azure_machine_location": vm.Location,
"__meta_azure_machine_private_ip": ips.privateIP,
}
m := promutils.NewLabels(16)
m.Add("__address__", addr)
m.Add("__meta_azure_subscription_id", sdc.SubscriptionID)
m.Add("__meta_azure_machine_id", vm.ID)
m.Add("__meta_azure_machine_name", vm.Name)
m.Add("__meta_azure_machine_location", vm.Location)
m.Add("__meta_azure_machine_private_ip", ips.privateIP)
if sdc.TenantID != "" {
m["__meta_azure_tenant_id"] = sdc.TenantID
m.Add("__meta_azure_tenant_id", sdc.TenantID)
}
// /subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/PROVIDER/TYPE/NAME
idPath := strings.Split(vm.ID, "/")
if len(idPath) > 4 {
m["__meta_azure_machine_resource_group"] = idPath[4]
m.Add("__meta_azure_machine_resource_group", idPath[4])
}
if vm.Properties.StorageProfile.OsDisk.OsType != "" {
m["__meta_azure_machine_os_type"] = vm.Properties.StorageProfile.OsDisk.OsType
m.Add("__meta_azure_machine_os_type", vm.Properties.StorageProfile.OsDisk.OsType)
}
if vm.Properties.OsProfile.ComputerName != "" {
m["__meta_azure_machine_computer_name"] = vm.Properties.OsProfile.ComputerName
m.Add("__meta_azure_machine_computer_name", vm.Properties.OsProfile.ComputerName)
}
if ips.publicIP != "" {
m["__meta_azure_machine_public_ip"] = ips.publicIP
m.Add("__meta_azure_machine_public_ip", ips.publicIP)
}
if vm.scaleSet != "" {
m["__meta_azure_machine_scale_set"] = vm.scaleSet
m.Add("__meta_azure_machine_scale_set", vm.scaleSet)
}
for k, v := range vm.Tags {
m[discoveryutils.SanitizeLabelName("__meta_azure_machine_tag_"+k)] = v
m.Add(discoveryutils.SanitizeLabelName("__meta_azure_machine_tag_"+k), v)
}
ms = append(ms, m)
}

View file

@ -1,24 +1,17 @@
package azure
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestAppendMachineLabels(t *testing.T) {
f := func(name string, vms []virtualMachine, expectedLabels [][]prompbmarshal.Label) {
f := func(name string, vms []virtualMachine, expectedLabels []*promutils.Labels) {
t.Run(name, func(t *testing.T) {
labelss := appendMachineLabels(vms, 80, &SDConfig{SubscriptionID: "some-id"})
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range labelss {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, expectedLabels) {
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, expectedLabels)
}
discoveryutils.TestEqualLabelss(t, labelss, expectedLabels)
})
}
f("single vm", []virtualMachine{
@ -33,8 +26,8 @@ func TestAppendMachineLabels(t *testing.T) {
{privateIP: "10.10.10.1"},
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
}, []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.10.10.1:80",
"__meta_azure_machine_id": "id-2",
"__meta_azure_subscription_id": "some-id",

View file

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -38,7 +39,7 @@ type SDConfig struct {
}
// GetLabels returns Consul labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
cfg, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)

View file

@ -7,12 +7,13 @@ import (
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// getServiceNodesLabels returns labels for Consul service nodes with given cfg.
func getServiceNodesLabels(cfg *apiConfig) []map[string]string {
func getServiceNodesLabels(cfg *apiConfig) []*promutils.Labels {
sns := cfg.consulWatcher.getServiceNodesSnapshot()
var ms []map[string]string
var ms []*promutils.Labels
for svc, sn := range sns {
for i := range sn {
ms = sn[i].appendTargetLabels(ms, svc, cfg.tagSeparator)
@ -71,38 +72,37 @@ func parseServiceNodes(data []byte) ([]ServiceNode, error) {
return sns, nil
}
func (sn *ServiceNode) appendTargetLabels(ms []map[string]string, serviceName, tagSeparator string) []map[string]string {
func (sn *ServiceNode) appendTargetLabels(ms []*promutils.Labels, serviceName, tagSeparator string) []*promutils.Labels {
var addr string
if sn.Service.Address != "" {
addr = discoveryutils.JoinHostPort(sn.Service.Address, sn.Service.Port)
} else {
addr = discoveryutils.JoinHostPort(sn.Node.Address, sn.Service.Port)
}
m := map[string]string{
"__address__": addr,
"__meta_consul_address": sn.Node.Address,
"__meta_consul_dc": sn.Node.Datacenter,
"__meta_consul_health": aggregatedStatus(sn.Checks),
"__meta_consul_namespace": sn.Service.Namespace,
"__meta_consul_partition": sn.Service.Partition,
"__meta_consul_node": sn.Node.Node,
"__meta_consul_service": serviceName,
"__meta_consul_service_address": sn.Service.Address,
"__meta_consul_service_id": sn.Service.ID,
"__meta_consul_service_port": strconv.Itoa(sn.Service.Port),
}
m := promutils.NewLabels(16)
m.Add("__address__", addr)
m.Add("__meta_consul_address", sn.Node.Address)
m.Add("__meta_consul_dc", sn.Node.Datacenter)
m.Add("__meta_consul_health", aggregatedStatus(sn.Checks))
m.Add("__meta_consul_namespace", sn.Service.Namespace)
m.Add("__meta_consul_partition", sn.Service.Partition)
m.Add("__meta_consul_node", sn.Node.Node)
m.Add("__meta_consul_service", serviceName)
m.Add("__meta_consul_service_address", sn.Service.Address)
m.Add("__meta_consul_service_id", sn.Service.ID)
m.Add("__meta_consul_service_port", strconv.Itoa(sn.Service.Port))
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions.
m["__meta_consul_tags"] = tagSeparator + strings.Join(sn.Service.Tags, tagSeparator) + tagSeparator
m.Add("__meta_consul_tags", tagSeparator+strings.Join(sn.Service.Tags, tagSeparator)+tagSeparator)
for k, v := range sn.Node.Meta {
m[discoveryutils.SanitizeLabelName("__meta_consul_metadata_"+k)] = v
m.Add(discoveryutils.SanitizeLabelName("__meta_consul_metadata_"+k), v)
}
for k, v := range sn.Service.Meta {
m[discoveryutils.SanitizeLabelName("__meta_consul_service_metadata_"+k)] = v
m.Add(discoveryutils.SanitizeLabelName("__meta_consul_service_metadata_"+k), v)
}
for k, v := range sn.Node.TaggedAddresses {
m[discoveryutils.SanitizeLabelName("__meta_consul_tagged_address_"+k)] = v
m.Add(discoveryutils.SanitizeLabelName("__meta_consul_tagged_address_"+k), v)
}
ms = append(ms, m)
return ms

View file

@ -1,11 +1,10 @@
package consul
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestParseServiceNodesFailure(t *testing.T) {
@ -108,12 +107,8 @@ func TestParseServiceNodesSuccess(t *testing.T) {
// Check sn.appendTargetLabels()
tagSeparator := ","
labelss := sn.appendTargetLabels(nil, "redis", tagSeparator)
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range labelss {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
expectedLabelss := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
expectedLabelss := []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.1.10.12:8000",
"__meta_consul_address": "10.1.10.12",
"__meta_consul_dc": "dc1",
@ -132,7 +127,5 @@ func TestParseServiceNodesSuccess(t *testing.T) {
"__meta_consul_tags": ",primary,",
}),
}
if !reflect.DeepEqual(sortedLabelss, expectedLabelss) {
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, expectedLabelss)
}
discoveryutils.TestEqualLabelss(t, labelss, expectedLabelss)
}

View file

@ -10,6 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -30,7 +31,7 @@ type SDConfig struct {
}
// GetLabels returns Digital Ocean droplet labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
cfg, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)
@ -39,7 +40,6 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
if err != nil {
return nil, err
}
return addDropletLabels(droplets, cfg.port), nil
}
@ -115,8 +115,8 @@ func (r *listDropletResponse) nextURLPath() (string, error) {
return u.RequestURI(), nil
}
func addDropletLabels(droplets []droplet, defaultPort int) []map[string]string {
var ms []map[string]string
func addDropletLabels(droplets []droplet, defaultPort int) []*promutils.Labels {
var ms []*promutils.Labels
for _, droplet := range droplets {
if len(droplet.Networks.V4) == 0 {
continue
@ -127,27 +127,26 @@ func addDropletLabels(droplets []droplet, defaultPort int) []map[string]string {
publicIPv6 := droplet.getIPByNet("v6", "public")
addr := discoveryutils.JoinHostPort(publicIPv4, defaultPort)
m := map[string]string{
"__address__": addr,
"__meta_digitalocean_droplet_id": fmt.Sprintf("%d", droplet.ID),
"__meta_digitalocean_droplet_name": droplet.Name,
"__meta_digitalocean_image": droplet.Image.Slug,
"__meta_digitalocean_image_name": droplet.Image.Name,
"__meta_digitalocean_private_ipv4": privateIPv4,
"__meta_digitalocean_public_ipv4": publicIPv4,
"__meta_digitalocean_public_ipv6": publicIPv6,
"__meta_digitalocean_region": droplet.Region.Slug,
"__meta_digitalocean_size": droplet.SizeSlug,
"__meta_digitalocean_status": droplet.Status,
"__meta_digitalocean_vpc": droplet.VpcUUID,
}
m := promutils.NewLabels(16)
m.Add("__address__", addr)
m.Add("__meta_digitalocean_droplet_id", fmt.Sprintf("%d", droplet.ID))
m.Add("__meta_digitalocean_droplet_name", droplet.Name)
m.Add("__meta_digitalocean_image", droplet.Image.Slug)
m.Add("__meta_digitalocean_image_name", droplet.Image.Name)
m.Add("__meta_digitalocean_private_ipv4", privateIPv4)
m.Add("__meta_digitalocean_public_ipv4", publicIPv4)
m.Add("__meta_digitalocean_public_ipv6", publicIPv6)
m.Add("__meta_digitalocean_region", droplet.Region.Slug)
m.Add("__meta_digitalocean_size", droplet.SizeSlug)
m.Add("__meta_digitalocean_status", droplet.Status)
m.Add("__meta_digitalocean_vpc", droplet.VpcUUID)
if len(droplet.Features) > 0 {
features := fmt.Sprintf(",%s,", strings.Join(droplet.Features, ","))
m["__meta_digitalocean_features"] = features
m.Add("__meta_digitalocean_features", features)
}
if len(droplet.Tags) > 0 {
tags := fmt.Sprintf(",%s,", strings.Join(droplet.Tags, ","))
m["__meta_digitalocean_tags"] = tags
m.Add("__meta_digitalocean_tags", tags)
}
ms = append(ms, m)
}

View file

@ -1,11 +1,10 @@
package digitalocean
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_addDropletLabels(t *testing.T) {
@ -16,7 +15,7 @@ func Test_addDropletLabels(t *testing.T) {
tests := []struct {
name string
args args
want [][]prompbmarshal.Label
want []*promutils.Labels
}{
{
name: "base labels add test",
@ -62,8 +61,8 @@ func Test_addDropletLabels(t *testing.T) {
},
defaultPort: 9100,
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "100.100.100.100:9100",
"__meta_digitalocean_droplet_id": "15",
"__meta_digitalocean_droplet_name": "ubuntu-1",
@ -85,14 +84,7 @@ func Test_addDropletLabels(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := addDropletLabels(tt.args.droplets, tt.args.defaultPort)
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range got {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, tt.want) {
t.Errorf("addTasksLabels() \ngot \n%v\n, \nwant \n%v\n", sortedLabelss, tt.want)
}
discoveryutils.TestEqualLabelss(t, got, tt.want)
})
}
}

View file

@ -11,6 +11,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// SDCheckInterval defines interval for targets refresh.
@ -30,7 +31,7 @@ type SDConfig struct {
}
// GetLabels returns DNS labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
if len(sdc.Names) == 0 {
return nil, fmt.Errorf("`names` cannot be empty in `dns_sd_config`")
}
@ -60,7 +61,7 @@ func (sdc *SDConfig) MustStop() {
// nothing to do
}
func getMXAddrLabels(ctx context.Context, sdc *SDConfig) []map[string]string {
func getMXAddrLabels(ctx context.Context, sdc *SDConfig) []*promutils.Labels {
port := 25
if sdc.Port != nil {
port = *sdc.Port
@ -81,7 +82,7 @@ func getMXAddrLabels(ctx context.Context, sdc *SDConfig) []map[string]string {
}
}(name)
}
var ms []map[string]string
var ms []*promutils.Labels
for range sdc.Names {
r := <-ch
if r.err != nil {
@ -99,7 +100,7 @@ func getMXAddrLabels(ctx context.Context, sdc *SDConfig) []map[string]string {
return ms
}
func getSRVAddrLabels(ctx context.Context, sdc *SDConfig) []map[string]string {
func getSRVAddrLabels(ctx context.Context, sdc *SDConfig) []*promutils.Labels {
type result struct {
name string
as []*net.SRV
@ -116,7 +117,7 @@ func getSRVAddrLabels(ctx context.Context, sdc *SDConfig) []map[string]string {
}
}(name)
}
var ms []map[string]string
var ms []*promutils.Labels
for range sdc.Names {
r := <-ch
if r.err != nil {
@ -134,7 +135,7 @@ func getSRVAddrLabels(ctx context.Context, sdc *SDConfig) []map[string]string {
return ms
}
func getAAddrLabels(ctx context.Context, sdc *SDConfig, lookupType string) ([]map[string]string, error) {
func getAAddrLabels(ctx context.Context, sdc *SDConfig, lookupType string) ([]*promutils.Labels, error) {
if sdc.Port == nil {
return nil, fmt.Errorf("missing `port` in `dns_sd_config` for `type: %s`", lookupType)
}
@ -155,7 +156,7 @@ func getAAddrLabels(ctx context.Context, sdc *SDConfig, lookupType string) ([]ma
}
}(name)
}
var ms []map[string]string
var ms []*promutils.Labels
for range sdc.Names {
r := <-ch
if r.err != nil {
@ -173,24 +174,22 @@ func getAAddrLabels(ctx context.Context, sdc *SDConfig, lookupType string) ([]ma
return ms, nil
}
func appendMXLabels(ms []map[string]string, name, target string, port int) []map[string]string {
func appendMXLabels(ms []*promutils.Labels, name, target string, port int) []*promutils.Labels {
addr := discoveryutils.JoinHostPort(target, port)
m := map[string]string{
"__address__": addr,
"__meta_dns_name": name,
"__meta_dns_mx_record_target": target,
}
m := promutils.NewLabels(3)
m.Add("__address__", addr)
m.Add("__meta_dns_name", name)
m.Add("__meta_dns_mx_record_target", target)
return append(ms, m)
}
func appendAddrLabels(ms []map[string]string, name, target string, port int) []map[string]string {
func appendAddrLabels(ms []*promutils.Labels, name, target string, port int) []*promutils.Labels {
addr := discoveryutils.JoinHostPort(target, port)
m := map[string]string{
"__address__": addr,
"__meta_dns_name": name,
"__meta_dns_srv_record_target": target,
"__meta_dns_srv_record_port": strconv.Itoa(port),
}
m := promutils.NewLabels(4)
m.Add("__address__", addr)
m.Add("__meta_dns_name", name)
m.Add("__meta_dns_srv_record_target", target)
m.Add("__meta_dns_srv_record_port", strconv.Itoa(port))
return append(ms, m)
}

View file

@ -6,6 +6,7 @@ import (
"strconv"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// See https://github.com/moby/moby/blob/314759dc2f4745925d8dec6d15acc7761c6e5c92/docs/api/v1.41.yaml#L4024
@ -30,7 +31,7 @@ type container struct {
}
}
func getContainersLabels(cfg *apiConfig) ([]map[string]string, error) {
func getContainersLabels(cfg *apiConfig) ([]*promutils.Labels, error) {
networkLabels, err := getNetworksLabelsByNetworkID(cfg)
if err != nil {
return nil, err
@ -58,8 +59,8 @@ func parseContainers(data []byte) ([]container, error) {
return containers, nil
}
func addContainersLabels(containers []container, networkLabels map[string]map[string]string, defaultPort int, hostNetworkingHost string) []map[string]string {
var ms []map[string]string
func addContainersLabels(containers []container, networkLabels map[string]*promutils.Labels, defaultPort int, hostNetworkingHost string) []*promutils.Labels {
var ms []*promutils.Labels
for i := range containers {
c := &containers[i]
if len(c.Names) == 0 {
@ -71,16 +72,16 @@ func addContainersLabels(containers []container, networkLabels map[string]map[st
if p.Type != "tcp" {
continue
}
m := map[string]string{
"__address__": discoveryutils.JoinHostPort(n.IPAddress, p.PrivatePort),
"__meta_docker_network_ip": n.IPAddress,
"__meta_docker_port_private": strconv.Itoa(p.PrivatePort),
}
m := promutils.NewLabels(16)
m.Add("__address__", discoveryutils.JoinHostPort(n.IPAddress, p.PrivatePort))
m.Add("__meta_docker_network_ip", n.IPAddress)
m.Add("__meta_docker_port_private", strconv.Itoa(p.PrivatePort))
if p.PublicPort > 0 {
m["__meta_docker_port_public"] = strconv.Itoa(p.PublicPort)
m["__meta_docker_port_public_ip"] = p.IP
m.Add("__meta_docker_port_public", strconv.Itoa(p.PublicPort))
m.Add("__meta_docker_port_public_ip", p.IP)
}
addCommonLabels(m, c, networkLabels[n.NetworkID])
m.RemoveDuplicates()
ms = append(ms, m)
added = true
}
@ -90,11 +91,11 @@ func addContainersLabels(containers []container, networkLabels map[string]map[st
if c.HostConfig.NetworkMode != "host" {
addr = discoveryutils.JoinHostPort(n.IPAddress, defaultPort)
}
m := map[string]string{
"__address__": addr,
"__meta_docker_network_ip": n.IPAddress,
}
m := promutils.NewLabels(16)
m.Add("__address__", addr)
m.Add("__meta_docker_network_ip", n.IPAddress)
addCommonLabels(m, c, networkLabels[n.NetworkID])
m.RemoveDuplicates()
ms = append(ms, m)
}
}
@ -102,14 +103,12 @@ func addContainersLabels(containers []container, networkLabels map[string]map[st
return ms
}
func addCommonLabels(m map[string]string, c *container, networkLabels map[string]string) {
m["__meta_docker_container_id"] = c.ID
m["__meta_docker_container_name"] = c.Names[0]
m["__meta_docker_container_network_mode"] = c.HostConfig.NetworkMode
func addCommonLabels(m *promutils.Labels, c *container, networkLabels *promutils.Labels) {
m.Add("__meta_docker_container_id", c.ID)
m.Add("__meta_docker_container_name", c.Names[0])
m.Add("__meta_docker_container_network_mode", c.HostConfig.NetworkMode)
for k, v := range c.Labels {
m[discoveryutils.SanitizeLabelName("__meta_docker_container_label_"+k)] = v
}
for k, v := range networkLabels {
m[k] = v
m.Add(discoveryutils.SanitizeLabelName("__meta_docker_container_label_"+k), v)
}
m.AddFrom(networkLabels)
}

View file

@ -3,6 +3,9 @@ package docker
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_parseContainers(t *testing.T) {
@ -314,7 +317,7 @@ func Test_addContainerLabels(t *testing.T) {
tests := []struct {
name string
c container
want []map[string]string
want []*promutils.Labels
wantErr bool
}{
{
@ -352,8 +355,8 @@ func Test_addContainerLabels(t *testing.T) {
},
},
},
want: []map[string]string{
{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.17.0.2:8012",
"__meta_docker_container_id": "90bc3b31aa13da5c0b11af2e228d54b38428a84e25d4e249ae9e9c95e51a0700",
"__meta_docker_container_label_com_docker_compose_config_hash": "c9f0bd5bb31921f94cff367d819a30a0cc08d4399080897a6c5cd74b983156ec",
@ -370,7 +373,7 @@ func Test_addContainerLabels(t *testing.T) {
"__meta_docker_network_ip": "172.17.0.2",
"__meta_docker_network_name": "bridge",
"__meta_docker_network_scope": "local",
},
}),
},
},
{
@ -408,8 +411,8 @@ func Test_addContainerLabels(t *testing.T) {
},
},
},
want: []map[string]string{
{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "foobar",
"__meta_docker_container_id": "90bc3b31aa13da5c0b11af2e228d54b38428a84e25d4e249ae9e9c95e51a0700",
"__meta_docker_container_label_com_docker_compose_config_hash": "c9f0bd5bb31921f94cff367d819a30a0cc08d4399080897a6c5cd74b983156ec",
@ -426,7 +429,7 @@ func Test_addContainerLabels(t *testing.T) {
"__meta_docker_network_ip": "172.17.0.2",
"__meta_docker_network_name": "bridge",
"__meta_docker_network_scope": "local",
},
}),
},
},
{
@ -475,8 +478,8 @@ func Test_addContainerLabels(t *testing.T) {
},
},
},
want: []map[string]string{
{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.17.0.2:8080",
"__meta_docker_container_id": "90bc3b31aa13da5c0b11af2e228d54b38428a84e25d4e249ae9e9c95e51a0700",
"__meta_docker_container_label_com_docker_compose_config_hash": "c9f0bd5bb31921f94cff367d819a30a0cc08d4399080897a6c5cd74b983156ec",
@ -496,21 +499,19 @@ func Test_addContainerLabels(t *testing.T) {
"__meta_docker_port_private": "8080",
"__meta_docker_port_public": "18081",
"__meta_docker_port_public_ip": "0.0.0.0",
},
}),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
labelsMap := addContainersLabels([]container{tt.c}, networkLabels, 8012, "foobar")
labelss := addContainersLabels([]container{tt.c}, networkLabels, 8012, "foobar")
if (err != nil) != tt.wantErr {
t.Errorf("addContainersLabels() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(labelsMap, tt.want) {
t.Errorf("addContainersLabels() \ngot %v, \nwant %v", labelsMap, tt.want)
}
discoveryutils.TestEqualLabelss(t, labelss, tt.want)
})
}
}

View file

@ -6,6 +6,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -36,7 +37,7 @@ type Filter struct {
}
// GetLabels returns docker labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
cfg, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)

View file

@ -6,6 +6,7 @@ import (
"strconv"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// See https://docs.docker.com/engine/api/v1.40/#tag/Network
@ -18,7 +19,7 @@ type network struct {
Labels map[string]string
}
func getNetworksLabelsByNetworkID(cfg *apiConfig) (map[string]map[string]string, error) {
func getNetworksLabelsByNetworkID(cfg *apiConfig) (map[string]*promutils.Labels, error) {
networks, err := getNetworks(cfg)
if err != nil {
return nil, err
@ -42,18 +43,17 @@ func parseNetworks(data []byte) ([]network, error) {
return networks, nil
}
func getNetworkLabelsByNetworkID(networks []network) map[string]map[string]string {
ms := make(map[string]map[string]string)
func getNetworkLabelsByNetworkID(networks []network) map[string]*promutils.Labels {
ms := make(map[string]*promutils.Labels)
for _, network := range networks {
m := map[string]string{
"__meta_docker_network_id": network.ID,
"__meta_docker_network_name": network.Name,
"__meta_docker_network_internal": strconv.FormatBool(network.Internal),
"__meta_docker_network_ingress": strconv.FormatBool(network.Ingress),
"__meta_docker_network_scope": network.Scope,
}
m := promutils.NewLabels(8)
m.Add("__meta_docker_network_id", network.ID)
m.Add("__meta_docker_network_name", network.Name)
m.Add("__meta_docker_network_internal", strconv.FormatBool(network.Internal))
m.Add("__meta_docker_network_ingress", strconv.FormatBool(network.Ingress))
m.Add("__meta_docker_network_scope", network.Scope)
for k, v := range network.Labels {
m[discoveryutils.SanitizeLabelName("__meta_docker_network_label_"+k)] = v
m.Add(discoveryutils.SanitizeLabelName("__meta_docker_network_label_"+k), v)
}
ms[network.ID] = m
}

View file

@ -5,8 +5,8 @@ import (
"sort"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_addNetworkLabels(t *testing.T) {
@ -16,7 +16,7 @@ func Test_addNetworkLabels(t *testing.T) {
tests := []struct {
name string
args args
want [][]prompbmarshal.Label
want []*promutils.Labels
}{
{
name: "ingress network",
@ -33,8 +33,8 @@ func Test_addNetworkLabels(t *testing.T) {
},
},
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__meta_docker_network_id": "qs0hog6ldlei9ct11pr3c77v1",
"__meta_docker_network_ingress": "true",
"__meta_docker_network_internal": "false",
@ -52,14 +52,11 @@ func Test_addNetworkLabels(t *testing.T) {
networkIDs = append(networkIDs, networkID)
}
sort.Strings(networkIDs)
var sortedLabelss [][]prompbmarshal.Label
var labelss []*promutils.Labels
for _, networkID := range networkIDs {
labels := got[networkID]
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, tt.want) {
t.Errorf("addNetworkLabels() \ngot %v, \nwant %v", sortedLabelss, tt.want)
labelss = append(labelss, got[networkID])
}
discoveryutils.TestEqualLabelss(t, labelss, tt.want)
})
}
}

View file

@ -6,6 +6,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -36,7 +37,7 @@ type Filter struct {
}
// GetLabels returns dockerswarm labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
cfg, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)

View file

@ -6,6 +6,7 @@ import (
"strconv"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// See https://docs.docker.com/engine/api/v1.40/#tag/Network
@ -18,7 +19,7 @@ type network struct {
Labels map[string]string
}
func getNetworksLabelsByNetworkID(cfg *apiConfig) (map[string]map[string]string, error) {
func getNetworksLabelsByNetworkID(cfg *apiConfig) (map[string]*promutils.Labels, error) {
networks, err := getNetworks(cfg)
if err != nil {
return nil, err
@ -42,18 +43,17 @@ func parseNetworks(data []byte) ([]network, error) {
return networks, nil
}
func getNetworkLabelsByNetworkID(networks []network) map[string]map[string]string {
ms := make(map[string]map[string]string)
func getNetworkLabelsByNetworkID(networks []network) map[string]*promutils.Labels {
ms := make(map[string]*promutils.Labels)
for _, network := range networks {
m := map[string]string{
"__meta_dockerswarm_network_id": network.ID,
"__meta_dockerswarm_network_name": network.Name,
"__meta_dockerswarm_network_internal": strconv.FormatBool(network.Internal),
"__meta_dockerswarm_network_ingress": strconv.FormatBool(network.Ingress),
"__meta_dockerswarm_network_scope": network.Scope,
}
m := promutils.NewLabels(8)
m.Add("__meta_dockerswarm_network_id", network.ID)
m.Add("__meta_dockerswarm_network_name", network.Name)
m.Add("__meta_dockerswarm_network_internal", strconv.FormatBool(network.Internal))
m.Add("__meta_dockerswarm_network_ingress", strconv.FormatBool(network.Ingress))
m.Add("__meta_dockerswarm_network_scope", network.Scope)
for k, v := range network.Labels {
m[discoveryutils.SanitizeLabelName("__meta_dockerswarm_network_label_"+k)] = v
m.Add(discoveryutils.SanitizeLabelName("__meta_dockerswarm_network_label_"+k), v)
}
ms[network.ID] = m
}

View file

@ -5,8 +5,8 @@ import (
"sort"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_addNetworkLabels(t *testing.T) {
@ -16,7 +16,7 @@ func Test_addNetworkLabels(t *testing.T) {
tests := []struct {
name string
args args
want [][]prompbmarshal.Label
want []*promutils.Labels
}{
{
name: "ingress network",
@ -33,8 +33,8 @@ func Test_addNetworkLabels(t *testing.T) {
},
},
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__meta_dockerswarm_network_id": "qs0hog6ldlei9ct11pr3c77v1",
"__meta_dockerswarm_network_ingress": "true",
"__meta_dockerswarm_network_internal": "false",
@ -52,14 +52,11 @@ func Test_addNetworkLabels(t *testing.T) {
networkIDs = append(networkIDs, networkID)
}
sort.Strings(networkIDs)
var sortedLabelss [][]prompbmarshal.Label
var labelss []*promutils.Labels
for _, networkID := range networkIDs {
labels := got[networkID]
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, tt.want) {
t.Errorf("addNetworkLabels() \ngot %v, \nwant %v", sortedLabelss, tt.want)
labelss = append(labelss, got[networkID])
}
discoveryutils.TestEqualLabelss(t, labelss, tt.want)
})
}
}

View file

@ -5,6 +5,7 @@ import (
"fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// See https://docs.docker.com/engine/api/v1.40/#tag/Node
@ -37,7 +38,7 @@ type node struct {
}
}
func getNodesLabels(cfg *apiConfig) ([]map[string]string, error) {
func getNodesLabels(cfg *apiConfig) ([]*promutils.Labels, error) {
nodes, err := getNodes(cfg)
if err != nil {
return nil, err
@ -61,26 +62,25 @@ func parseNodes(data []byte) ([]node, error) {
return nodes, nil
}
func addNodeLabels(nodes []node, port int) []map[string]string {
var ms []map[string]string
func addNodeLabels(nodes []node, port int) []*promutils.Labels {
var ms []*promutils.Labels
for _, node := range nodes {
m := map[string]string{
"__address__": discoveryutils.JoinHostPort(node.Status.Addr, port),
"__meta_dockerswarm_node_address": node.Status.Addr,
"__meta_dockerswarm_node_availability": node.Spec.Availability,
"__meta_dockerswarm_node_engine_version": node.Description.Engine.EngineVersion,
"__meta_dockerswarm_node_hostname": node.Description.Hostname,
"__meta_dockerswarm_node_id": node.ID,
"__meta_dockerswarm_node_manager_address": node.ManagerStatus.Addr,
"__meta_dockerswarm_node_manager_leader": fmt.Sprintf("%t", node.ManagerStatus.Leader),
"__meta_dockerswarm_node_manager_reachability": node.ManagerStatus.Reachability,
"__meta_dockerswarm_node_platform_architecture": node.Description.Platform.Architecture,
"__meta_dockerswarm_node_platform_os": node.Description.Platform.OS,
"__meta_dockerswarm_node_role": node.Spec.Role,
"__meta_dockerswarm_node_status": node.Status.State,
}
m := promutils.NewLabels(16)
m.Add("__address__", discoveryutils.JoinHostPort(node.Status.Addr, port))
m.Add("__meta_dockerswarm_node_address", node.Status.Addr)
m.Add("__meta_dockerswarm_node_availability", node.Spec.Availability)
m.Add("__meta_dockerswarm_node_engine_version", node.Description.Engine.EngineVersion)
m.Add("__meta_dockerswarm_node_hostname", node.Description.Hostname)
m.Add("__meta_dockerswarm_node_id", node.ID)
m.Add("__meta_dockerswarm_node_manager_address", node.ManagerStatus.Addr)
m.Add("__meta_dockerswarm_node_manager_leader", fmt.Sprintf("%t", node.ManagerStatus.Leader))
m.Add("__meta_dockerswarm_node_manager_reachability", node.ManagerStatus.Reachability)
m.Add("__meta_dockerswarm_node_platform_architecture", node.Description.Platform.Architecture)
m.Add("__meta_dockerswarm_node_platform_os", node.Description.Platform.OS)
m.Add("__meta_dockerswarm_node_role", node.Spec.Role)
m.Add("__meta_dockerswarm_node_status", node.Status.State)
for k, v := range node.Spec.Labels {
m[discoveryutils.SanitizeLabelName("__meta_dockerswarm_node_label_"+k)] = v
m.Add(discoveryutils.SanitizeLabelName("__meta_dockerswarm_node_label_"+k), v)
}
ms = append(ms, m)
}

View file

@ -4,8 +4,8 @@ import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_parseNodes(t *testing.T) {
@ -112,7 +112,7 @@ func Test_addNodeLabels(t *testing.T) {
tests := []struct {
name string
args args
want [][]prompbmarshal.Label
want []*promutils.Labels
}{
{
name: "add labels to one node",
@ -154,8 +154,8 @@ func Test_addNodeLabels(t *testing.T) {
},
port: 9100,
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.31.40.97:9100",
"__meta_dockerswarm_node_address": "172.31.40.97",
"__meta_dockerswarm_node_availability": "active",
@ -175,14 +175,7 @@ func Test_addNodeLabels(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := addNodeLabels(tt.args.nodes, tt.args.port)
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range got {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, tt.want) {
t.Errorf("addNodeLabels() \ngot %v, \nwant %v", sortedLabelss, tt.want)
}
discoveryutils.TestEqualLabelss(t, got, tt.want)
})
}
}

View file

@ -6,8 +6,8 @@ import (
"net"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// https://docs.docker.com/engine/api/v1.40/#tag/Service
@ -46,7 +46,7 @@ type portConfig struct {
PublishedPort int
}
func getServicesLabels(cfg *apiConfig) ([]map[string]string, error) {
func getServicesLabels(cfg *apiConfig) ([]*promutils.Labels, error) {
services, err := getServices(cfg)
if err != nil {
return nil, err
@ -84,19 +84,18 @@ func getServiceMode(svc service) string {
return ""
}
func addServicesLabels(services []service, networksLabels map[string]map[string]string, port int) []map[string]string {
var ms []map[string]string
func addServicesLabels(services []service, networksLabels map[string]*promutils.Labels, port int) []*promutils.Labels {
var ms []*promutils.Labels
for _, service := range services {
commonLabels := map[string]string{
"__meta_dockerswarm_service_id": service.ID,
"__meta_dockerswarm_service_name": service.Spec.Name,
"__meta_dockerswarm_service_mode": getServiceMode(service),
"__meta_dockerswarm_service_task_container_hostname": service.Spec.TaskTemplate.ContainerSpec.Hostname,
"__meta_dockerswarm_service_task_container_image": service.Spec.TaskTemplate.ContainerSpec.Image,
"__meta_dockerswarm_service_updating_status": service.UpdateStatus.State,
}
commonLabels := promutils.NewLabels(10)
commonLabels.Add("__meta_dockerswarm_service_id", service.ID)
commonLabels.Add("__meta_dockerswarm_service_name", service.Spec.Name)
commonLabels.Add("__meta_dockerswarm_service_mode", getServiceMode(service))
commonLabels.Add("__meta_dockerswarm_service_task_container_hostname", service.Spec.TaskTemplate.ContainerSpec.Hostname)
commonLabels.Add("__meta_dockerswarm_service_task_container_image", service.Spec.TaskTemplate.ContainerSpec.Image)
commonLabels.Add("__meta_dockerswarm_service_updating_status", service.UpdateStatus.State)
for k, v := range service.Spec.Labels {
commonLabels[discoveryutils.SanitizeLabelName("__meta_dockerswarm_service_label_"+k)] = v
commonLabels.Add(discoveryutils.SanitizeLabelName("__meta_dockerswarm_service_label_"+k), v)
}
for _, vip := range service.Endpoint.VirtualIPs {
// skip services without virtual address.
@ -114,30 +113,22 @@ func addServicesLabels(services []service, networksLabels map[string]map[string]
if ep.Protocol != "tcp" {
continue
}
m := map[string]string{
"__address__": discoveryutils.JoinHostPort(ip.String(), ep.PublishedPort),
"__meta_dockerswarm_service_endpoint_port_name": ep.Name,
"__meta_dockerswarm_service_endpoint_port_publish_mode": ep.PublishMode,
}
for k, v := range commonLabels {
m[k] = v
}
for k, v := range networksLabels[vip.NetworkID] {
m[k] = v
}
m := promutils.NewLabels(24)
m.Add("__address__", discoveryutils.JoinHostPort(ip.String(), ep.PublishedPort))
m.Add("__meta_dockerswarm_service_endpoint_port_name", ep.Name)
m.Add("__meta_dockerswarm_service_endpoint_port_publish_mode", ep.PublishMode)
m.AddFrom(commonLabels)
m.AddFrom(networksLabels[vip.NetworkID])
m.RemoveDuplicates()
added = true
ms = append(ms, m)
}
if !added {
m := map[string]string{
"__address__": discoveryutils.JoinHostPort(ip.String(), port),
}
for k, v := range commonLabels {
m[k] = v
}
for k, v := range networksLabels[vip.NetworkID] {
m[k] = v
}
m := promutils.NewLabels(24)
m.Add("__address__", discoveryutils.JoinHostPort(ip.String(), port))
m.AddFrom(commonLabels)
m.AddFrom(networksLabels[vip.NetworkID])
m.RemoveDuplicates()
ms = append(ms, m)
}
}

View file

@ -4,8 +4,8 @@ import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_parseServicesResponse(t *testing.T) {
@ -172,27 +172,27 @@ func Test_parseServicesResponse(t *testing.T) {
func Test_addServicesLabels(t *testing.T) {
type args struct {
services []service
networksLabels map[string]map[string]string
networksLabels map[string]*promutils.Labels
port int
}
tests := []struct {
name string
args args
want [][]prompbmarshal.Label
want []*promutils.Labels
}{
{
name: "add 2 services with network labels join",
args: args{
port: 9100,
networksLabels: map[string]map[string]string{
"qs0hog6ldlei9ct11pr3c77v1": {
networksLabels: map[string]*promutils.Labels{
"qs0hog6ldlei9ct11pr3c77v1": promutils.NewLabelsFromMap(map[string]string{
"__meta_dockerswarm_network_id": "qs0hog6ldlei9ct11pr3c77v1",
"__meta_dockerswarm_network_ingress": "true",
"__meta_dockerswarm_network_internal": "false",
"__meta_dockerswarm_network_label_key1": "value1",
"__meta_dockerswarm_network_name": "ingress",
"__meta_dockerswarm_network_scope": "swarm",
},
}),
},
services: []service{
{
@ -259,8 +259,8 @@ func Test_addServicesLabels(t *testing.T) {
},
},
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.0.0.3:0",
"__meta_dockerswarm_network_id": "qs0hog6ldlei9ct11pr3c77v1",
"__meta_dockerswarm_network_ingress": "true",
@ -282,13 +282,7 @@ func Test_addServicesLabels(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := addServicesLabels(tt.args.services, tt.args.networksLabels, tt.args.port)
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range got {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, tt.want) {
t.Errorf("addServicesLabels() \ngot %v, \nwant %v", sortedLabelss, tt.want)
}
discoveryutils.TestEqualLabelss(t, got, tt.want)
})
}
}

View file

@ -8,6 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// See https://docs.docker.com/engine/api/v1.40/#tag/Task
@ -39,7 +40,7 @@ type task struct {
Slot int
}
func getTasksLabels(cfg *apiConfig) ([]map[string]string, error) {
func getTasksLabels(cfg *apiConfig) ([]*promutils.Labels, error) {
tasks, err := getTasks(cfg)
if err != nil {
return nil, err
@ -76,18 +77,17 @@ func parseTasks(data []byte) ([]task, error) {
return tasks, nil
}
func addTasksLabels(tasks []task, nodesLabels, servicesLabels []map[string]string, networksLabels map[string]map[string]string, services []service, port int) []map[string]string {
var ms []map[string]string
func addTasksLabels(tasks []task, nodesLabels, servicesLabels []*promutils.Labels, networksLabels map[string]*promutils.Labels, services []service, port int) []*promutils.Labels {
var ms []*promutils.Labels
for _, task := range tasks {
commonLabels := map[string]string{
"__meta_dockerswarm_task_id": task.ID,
"__meta_dockerswarm_task_container_id": task.Status.ContainerStatus.ContainerID,
"__meta_dockerswarm_task_desired_state": task.DesiredState,
"__meta_dockerswarm_task_slot": strconv.Itoa(task.Slot),
"__meta_dockerswarm_task_state": task.Status.State,
}
commonLabels := promutils.NewLabels(8)
commonLabels.Add("__meta_dockerswarm_task_id", task.ID)
commonLabels.Add("__meta_dockerswarm_task_container_id", task.Status.ContainerStatus.ContainerID)
commonLabels.Add("__meta_dockerswarm_task_desired_state", task.DesiredState)
commonLabels.Add("__meta_dockerswarm_task_slot", strconv.Itoa(task.Slot))
commonLabels.Add("__meta_dockerswarm_task_state", task.Status.State)
for k, v := range task.Spec.ContainerSpec.Labels {
commonLabels[discoveryutils.SanitizeLabelName("__meta_dockerswarm_container_label_"+k)] = v
commonLabels.Add(discoveryutils.SanitizeLabelName("__meta_dockerswarm_container_label_"+k), v)
}
var svcPorts []portConfig
for i, v := range services {
@ -103,12 +103,11 @@ func addTasksLabels(tasks []task, nodesLabels, servicesLabels []map[string]strin
if port.Protocol != "tcp" {
continue
}
m := make(map[string]string, len(commonLabels)+2)
for k, v := range commonLabels {
m[k] = v
}
m["__address__"] = discoveryutils.JoinHostPort(commonLabels["__meta_dockerswarm_node_address"], port.PublishedPort)
m["__meta_dockerswarm_task_port_publish_mode"] = port.PublishMode
m := promutils.NewLabels(10)
m.AddFrom(commonLabels)
m.Add("__address__", discoveryutils.JoinHostPort(commonLabels.Get("__meta_dockerswarm_node_address"), port.PublishedPort))
m.Add("__meta_dockerswarm_task_port_publish_mode", port.PublishMode)
m.RemoveDuplicates()
ms = append(ms, m)
}
for _, na := range task.NetworksAttachments {
@ -124,27 +123,21 @@ func addTasksLabels(tasks []task, nodesLabels, servicesLabels []map[string]strin
if ep.Protocol != "tcp" {
continue
}
m := make(map[string]string, len(commonLabels)+len(networkLabels)+2)
for k, v := range commonLabels {
m[k] = v
}
for k, v := range networkLabels {
m[k] = v
}
m["__address__"] = discoveryutils.JoinHostPort(ip.String(), ep.PublishedPort)
m["__meta_dockerswarm_task_port_publish_mode"] = ep.PublishMode
m := promutils.NewLabels(20)
m.AddFrom(commonLabels)
m.AddFrom(networkLabels)
m.Add("__address__", discoveryutils.JoinHostPort(ip.String(), ep.PublishedPort))
m.Add("__meta_dockerswarm_task_port_publish_mode", ep.PublishMode)
m.RemoveDuplicates()
ms = append(ms, m)
added = true
}
if !added {
m := make(map[string]string, len(commonLabels)+len(networkLabels)+1)
for k, v := range commonLabels {
m[k] = v
}
for k, v := range networkLabels {
m[k] = v
}
m["__address__"] = discoveryutils.JoinHostPort(ip.String(), port)
m := promutils.NewLabels(20)
m.AddFrom(commonLabels)
m.AddFrom(networkLabels)
m.Add("__address__", discoveryutils.JoinHostPort(ip.String(), port))
m.RemoveDuplicates()
ms = append(ms, m)
}
}
@ -154,13 +147,13 @@ func addTasksLabels(tasks []task, nodesLabels, servicesLabels []map[string]strin
}
// addLabels adds lables from src to dst if they contain the given `key: value` pair.
func addLabels(dst map[string]string, src []map[string]string, key, value string) {
func addLabels(dst *promutils.Labels, src []*promutils.Labels, key, value string) {
for _, m := range src {
if m[key] != value {
if m.Get(key) != value {
continue
}
for k, v := range m {
dst[k] = v
for _, label := range m.GetLabels() {
dst.Add(label.Name, label.Value)
}
return
}

View file

@ -4,8 +4,8 @@ import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_parseTasks(t *testing.T) {
@ -116,16 +116,16 @@ func Test_parseTasks(t *testing.T) {
func Test_addTasksLabels(t *testing.T) {
type args struct {
tasks []task
nodesLabels []map[string]string
servicesLabels []map[string]string
networksLabels map[string]map[string]string
nodesLabels []*promutils.Labels
servicesLabels []*promutils.Labels
networksLabels map[string]*promutils.Labels
services []service
port int
}
tests := []struct {
name string
args args
want [][]prompbmarshal.Label
want []*promutils.Labels
}{
{
name: "adds 1 task with nodes labels",
@ -159,8 +159,8 @@ func Test_addTasksLabels(t *testing.T) {
}},
},
},
nodesLabels: []map[string]string{
{
nodesLabels: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.31.40.97:9100",
"__meta_dockerswarm_node_address": "172.31.40.97",
"__meta_dockerswarm_node_availability": "active",
@ -171,11 +171,11 @@ func Test_addTasksLabels(t *testing.T) {
"__meta_dockerswarm_node_platform_os": "linux",
"__meta_dockerswarm_node_role": "manager",
"__meta_dockerswarm_node_status": "ready",
},
}),
},
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.31.40.97:6379",
"__meta_dockerswarm_node_address": "172.31.40.97",
"__meta_dockerswarm_node_availability": "active",
@ -230,18 +230,18 @@ func Test_addTasksLabels(t *testing.T) {
PortStatus: struct{ Ports []portConfig }{}},
},
},
networksLabels: map[string]map[string]string{
"qs0hog6ldlei9ct11pr3c77v1": {
networksLabels: map[string]*promutils.Labels{
"qs0hog6ldlei9ct11pr3c77v1": promutils.NewLabelsFromMap(map[string]string{
"__meta_dockerswarm_network_id": "qs0hog6ldlei9ct11pr3c77v1",
"__meta_dockerswarm_network_ingress": "true",
"__meta_dockerswarm_network_internal": "false",
"__meta_dockerswarm_network_label_key1": "value1",
"__meta_dockerswarm_network_name": "ingress",
"__meta_dockerswarm_network_scope": "swarm",
},
}),
},
nodesLabels: []map[string]string{
{
nodesLabels: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.31.40.97:9100",
"__meta_dockerswarm_node_address": "172.31.40.97",
"__meta_dockerswarm_node_availability": "active",
@ -252,7 +252,7 @@ func Test_addTasksLabels(t *testing.T) {
"__meta_dockerswarm_node_platform_os": "linux",
"__meta_dockerswarm_node_role": "manager",
"__meta_dockerswarm_node_status": "ready",
},
}),
},
services: []service{
{
@ -320,10 +320,10 @@ func Test_addTasksLabels(t *testing.T) {
},
},
},
servicesLabels: []map[string]string{},
servicesLabels: []*promutils.Labels{},
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.10.15.15:6379",
"__meta_dockerswarm_network_id": "qs0hog6ldlei9ct11pr3c77v1",
"__meta_dockerswarm_network_ingress": "true",
@ -353,13 +353,7 @@ func Test_addTasksLabels(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := addTasksLabels(tt.args.tasks, tt.args.nodesLabels, tt.args.servicesLabels, tt.args.networksLabels, tt.args.services, tt.args.port)
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range got {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, tt.want) {
t.Errorf("addTasksLabels() \ngot %v, \nwant %v", sortedLabelss, tt.want)
}
discoveryutils.TestEqualLabelss(t, got, tt.want)
})
}
}

View file

@ -7,6 +7,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/awsapi"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// SDCheckInterval defines interval for targets refresh.
@ -34,7 +35,7 @@ type SDConfig struct {
}
// GetLabels returns ec2 labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
cfg, err := getAPIConfig(sdc)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)

View file

@ -7,17 +7,18 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/awsapi"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// getInstancesLabels returns labels for ec2 instances obtained from the given cfg
func getInstancesLabels(cfg *apiConfig) ([]map[string]string, error) {
func getInstancesLabels(cfg *apiConfig) ([]*promutils.Labels, error) {
rs, err := getReservations(cfg)
if err != nil {
return nil, err
}
azMap := getAZMap(cfg)
region := cfg.awsConfig.GetRegion()
var ms []map[string]string
var ms []*promutils.Labels
for _, r := range rs {
for _, inst := range r.InstanceSet.Items {
ms = inst.appendTargetLabels(ms, r.OwnerID, region, cfg.port, azMap)
@ -135,32 +136,31 @@ func parseInstancesResponse(data []byte) (*InstancesResponse, error) {
return &v, nil
}
func (inst *Instance) appendTargetLabels(ms []map[string]string, ownerID, region string, port int, azMap map[string]string) []map[string]string {
func (inst *Instance) appendTargetLabels(ms []*promutils.Labels, ownerID, region string, port int, azMap map[string]string) []*promutils.Labels {
if len(inst.PrivateIPAddress) == 0 {
// Cannot scrape instance without private IP address
return ms
}
addr := discoveryutils.JoinHostPort(inst.PrivateIPAddress, port)
m := map[string]string{
"__address__": addr,
"__meta_ec2_architecture": inst.Architecture,
"__meta_ec2_ami": inst.ImageID,
"__meta_ec2_availability_zone": inst.Placement.AvailabilityZone,
"__meta_ec2_availability_zone_id": azMap[inst.Placement.AvailabilityZone],
"__meta_ec2_instance_id": inst.ID,
"__meta_ec2_instance_lifecycle": inst.Lifecycle,
"__meta_ec2_instance_state": inst.State.Name,
"__meta_ec2_instance_type": inst.Type,
"__meta_ec2_owner_id": ownerID,
"__meta_ec2_platform": inst.Platform,
"__meta_ec2_primary_subnet_id": inst.SubnetID,
"__meta_ec2_private_dns_name": inst.PrivateDNSName,
"__meta_ec2_private_ip": inst.PrivateIPAddress,
"__meta_ec2_public_dns_name": inst.PublicDNSName,
"__meta_ec2_public_ip": inst.PublicIPAddress,
"__meta_ec2_region": region,
"__meta_ec2_vpc_id": inst.VPCID,
}
m := promutils.NewLabels(24)
m.Add("__address__", addr)
m.Add("__meta_ec2_architecture", inst.Architecture)
m.Add("__meta_ec2_ami", inst.ImageID)
m.Add("__meta_ec2_availability_zone", inst.Placement.AvailabilityZone)
m.Add("__meta_ec2_availability_zone_id", azMap[inst.Placement.AvailabilityZone])
m.Add("__meta_ec2_instance_id", inst.ID)
m.Add("__meta_ec2_instance_lifecycle", inst.Lifecycle)
m.Add("__meta_ec2_instance_state", inst.State.Name)
m.Add("__meta_ec2_instance_type", inst.Type)
m.Add("__meta_ec2_owner_id", ownerID)
m.Add("__meta_ec2_platform", inst.Platform)
m.Add("__meta_ec2_primary_subnet_id", inst.SubnetID)
m.Add("__meta_ec2_private_dns_name", inst.PrivateDNSName)
m.Add("__meta_ec2_private_ip", inst.PrivateIPAddress)
m.Add("__meta_ec2_public_dns_name", inst.PublicDNSName)
m.Add("__meta_ec2_public_ip", inst.PublicIPAddress)
m.Add("__meta_ec2_region", region)
m.Add("__meta_ec2_vpc_id", inst.VPCID)
if len(inst.VPCID) > 0 {
subnets := make([]string, 0, len(inst.NetworkInterfaceSet.Items))
seenSubnets := make(map[string]bool, len(inst.NetworkInterfaceSet.Items))
@ -179,16 +179,16 @@ func (inst *Instance) appendTargetLabels(ms []map[string]string, ownerID, region
}
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions.
m["__meta_ec2_subnet_id"] = "," + strings.Join(subnets, ",") + ","
m.Add("__meta_ec2_subnet_id", ","+strings.Join(subnets, ",")+",")
if len(ipv6Addrs) > 0 {
m["__meta_ec2_ipv6_addresses"] = "," + strings.Join(ipv6Addrs, ",") + ","
m.Add("__meta_ec2_ipv6_addresses", ","+strings.Join(ipv6Addrs, ",")+",")
}
}
for _, t := range inst.TagSet.Items {
if len(t.Key) == 0 || len(t.Value) == 0 {
continue
}
m[discoveryutils.SanitizeLabelName("__meta_ec2_tag_"+t.Key)] = t.Value
m.Add(discoveryutils.SanitizeLabelName("__meta_ec2_tag_"+t.Key), t.Value)
}
ms = append(ms, m)
return ms

View file

@ -4,8 +4,8 @@ import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestDescribeAvailabilityZonesResponse(t *testing.T) {
@ -241,12 +241,8 @@ func TestParseInstancesResponse(t *testing.T) {
labelss := inst.appendTargetLabels(nil, ownerID, "region-a", port, map[string]string{
"eu-west-2c": "foobar-zone",
})
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range labelss {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
expectedLabels := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
expectedLabels := []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.31.11.152:423",
"__meta_ec2_architecture": "x86_64",
"__meta_ec2_availability_zone": "eu-west-2c",
@ -269,7 +265,5 @@ func TestParseInstancesResponse(t *testing.T) {
"__meta_ec2_vpc_id": "vpc-f1eaad99",
}),
}
if !reflect.DeepEqual(sortedLabelss, expectedLabels) {
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, expectedLabels)
}
discoveryutils.TestEqualLabelss(t, labelss, expectedLabels)
}

View file

@ -9,6 +9,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -82,7 +83,7 @@ type DataCenterInfo struct {
}
// GetLabels returns Eureka labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
cfg, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)
@ -103,8 +104,8 @@ func (sdc *SDConfig) MustStop() {
configMap.Delete(sdc)
}
func addInstanceLabels(apps *applications) []map[string]string {
var ms []map[string]string
func addInstanceLabels(apps *applications) []*promutils.Labels {
var ms []*promutils.Labels
for _, app := range apps.Applications {
for _, instance := range app.Instances {
instancePort := 80
@ -112,38 +113,37 @@ func addInstanceLabels(apps *applications) []map[string]string {
instancePort = instance.Port.Port
}
targetAddress := discoveryutils.JoinHostPort(instance.HostName, instancePort)
m := map[string]string{
"__address__": targetAddress,
"instance": instance.InstanceID,
"__meta_eureka_app_name": app.Name,
"__meta_eureka_app_instance_hostname": instance.HostName,
"__meta_eureka_app_instance_homepage_url": instance.HomePageURL,
"__meta_eureka_app_instance_statuspage_url": instance.StatusPageURL,
"__meta_eureka_app_instance_healthcheck_url": instance.HealthCheckURL,
"__meta_eureka_app_instance_ip_addr": instance.IPAddr,
"__meta_eureka_app_instance_vip_address": instance.VipAddress,
"__meta_eureka_app_instance_secure_vip_address": instance.SecureVipAddress,
"__meta_eureka_app_instance_status": instance.Status,
"__meta_eureka_app_instance_country_id": strconv.Itoa(instance.CountryID),
"__meta_eureka_app_instance_id": instance.InstanceID,
}
m := promutils.NewLabels(24)
m.Add("__address__", targetAddress)
m.Add("instance", instance.InstanceID)
m.Add("__meta_eureka_app_name", app.Name)
m.Add("__meta_eureka_app_instance_hostname", instance.HostName)
m.Add("__meta_eureka_app_instance_homepage_url", instance.HomePageURL)
m.Add("__meta_eureka_app_instance_statuspage_url", instance.StatusPageURL)
m.Add("__meta_eureka_app_instance_healthcheck_url", instance.HealthCheckURL)
m.Add("__meta_eureka_app_instance_ip_addr", instance.IPAddr)
m.Add("__meta_eureka_app_instance_vip_address", instance.VipAddress)
m.Add("__meta_eureka_app_instance_secure_vip_address", instance.SecureVipAddress)
m.Add("__meta_eureka_app_instance_status", instance.Status)
m.Add("__meta_eureka_app_instance_country_id", strconv.Itoa(instance.CountryID))
m.Add("__meta_eureka_app_instance_id", instance.InstanceID)
if instance.Port.Port != 0 {
m["__meta_eureka_app_instance_port"] = strconv.Itoa(instance.Port.Port)
m["__meta_eureka_app_instance_port_enabled"] = strconv.FormatBool(instance.Port.Enabled)
m.Add("__meta_eureka_app_instance_port", strconv.Itoa(instance.Port.Port))
m.Add("__meta_eureka_app_instance_port_enabled", strconv.FormatBool(instance.Port.Enabled))
}
if instance.SecurePort.Port != 0 {
m["__meta_eureka_app_instance_secure_port"] = strconv.Itoa(instance.SecurePort.Port)
m["__meta_eureka_app_instance_secure_port_enabled"] = strconv.FormatBool(instance.SecurePort.Enabled)
m.Add("__meta_eureka_app_instance_secure_port", strconv.Itoa(instance.SecurePort.Port))
m.Add("__meta_eureka_app_instance_secure_port_enabled", strconv.FormatBool(instance.SecurePort.Enabled))
}
if len(instance.DataCenterInfo.Name) > 0 {
m["__meta_eureka_app_instance_datacenterinfo_name"] = instance.DataCenterInfo.Name
m.Add("__meta_eureka_app_instance_datacenterinfo_name", instance.DataCenterInfo.Name)
for _, tag := range instance.DataCenterInfo.Metadata.Items {
m[discoveryutils.SanitizeLabelName("__meta_eureka_app_instance_datacenterinfo_metadata_"+tag.XMLName.Local)] = tag.Content
m.Add(discoveryutils.SanitizeLabelName("__meta_eureka_app_instance_datacenterinfo_metadata_"+tag.XMLName.Local), tag.Content)
}
}
for _, tag := range instance.Metadata.Items {
m[discoveryutils.SanitizeLabelName("__meta_eureka_app_instance_metadata_"+tag.XMLName.Local)] = tag.Content
m.Add(discoveryutils.SanitizeLabelName("__meta_eureka_app_instance_metadata_"+tag.XMLName.Local), tag.Content)
}
ms = append(ms, m)
}

View file

@ -1,11 +1,10 @@
package eureka
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_addInstanceLabels(t *testing.T) {
@ -15,7 +14,7 @@ func Test_addInstanceLabels(t *testing.T) {
tests := []struct {
name string
args args
want [][]prompbmarshal.Label
want []*promutils.Labels
}{
{
name: "1 application",
@ -50,8 +49,8 @@ func Test_addInstanceLabels(t *testing.T) {
},
},
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "host-1:9100",
"instance": "some-id",
"__meta_eureka_app_instance_hostname": "host-1",
@ -75,13 +74,7 @@ func Test_addInstanceLabels(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := addInstanceLabels(tt.args.applications)
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range got {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, tt.want) {
t.Fatalf("unexpected labels \ngot : %v, \nwant: %v", got, tt.want)
}
discoveryutils.TestEqualLabelss(t, got, tt.want)
})
}
}

View file

@ -4,6 +4,8 @@ import (
"flag"
"fmt"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// SDCheckInterval defines interval for targets refresh.
@ -60,7 +62,7 @@ func (z ZoneYAML) MarshalYAML() (interface{}, error) {
}
// GetLabels returns gce labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
cfg, err := getAPIConfig(sdc)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)

View file

@ -8,12 +8,13 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// getInstancesLabels returns labels for gce instances obtained from the given cfg
func getInstancesLabels(cfg *apiConfig) []map[string]string {
func getInstancesLabels(cfg *apiConfig) []*promutils.Labels {
insts := getInstances(cfg)
var ms []map[string]string
var ms []*promutils.Labels
for _, inst := range insts {
ms = inst.appendTargetLabels(ms, cfg.project, cfg.tagSeparator, cfg.port)
}
@ -88,7 +89,7 @@ type Instance struct {
NetworkInterfaces []NetworkInterface
Tags TagList
Metadata MetadataList
Labels discoveryutils.SortedLabels
Labels *promutils.Labels
}
// NetworkInterface is network interface from https://cloud.google.com/compute/docs/reference/rest/v1/instances/list
@ -131,42 +132,41 @@ func parseInstanceList(data []byte) (*InstanceList, error) {
return &il, nil
}
func (inst *Instance) appendTargetLabels(ms []map[string]string, project, tagSeparator string, port int) []map[string]string {
func (inst *Instance) appendTargetLabels(ms []*promutils.Labels, project, tagSeparator string, port int) []*promutils.Labels {
if len(inst.NetworkInterfaces) == 0 {
return ms
}
iface := inst.NetworkInterfaces[0]
addr := discoveryutils.JoinHostPort(iface.NetworkIP, port)
m := map[string]string{
"__address__": addr,
"__meta_gce_instance_id": inst.ID,
"__meta_gce_instance_status": inst.Status,
"__meta_gce_instance_name": inst.Name,
"__meta_gce_machine_type": inst.MachineType,
"__meta_gce_network": iface.Network,
"__meta_gce_private_ip": iface.NetworkIP,
"__meta_gce_project": project,
"__meta_gce_subnetwork": iface.Subnetwork,
"__meta_gce_zone": inst.Zone,
}
m := promutils.NewLabels(24)
m.Add("__address__", addr)
m.Add("__meta_gce_instance_id", inst.ID)
m.Add("__meta_gce_instance_status", inst.Status)
m.Add("__meta_gce_instance_name", inst.Name)
m.Add("__meta_gce_machine_type", inst.MachineType)
m.Add("__meta_gce_network", iface.Network)
m.Add("__meta_gce_private_ip", iface.NetworkIP)
m.Add("__meta_gce_project", project)
m.Add("__meta_gce_subnetwork", iface.Subnetwork)
m.Add("__meta_gce_zone", inst.Zone)
for _, iface := range inst.NetworkInterfaces {
m[discoveryutils.SanitizeLabelName("__meta_gce_interface_ipv4_"+iface.Name)] = iface.NetworkIP
m.Add(discoveryutils.SanitizeLabelName("__meta_gce_interface_ipv4_"+iface.Name), iface.NetworkIP)
}
if len(inst.Tags.Items) > 0 {
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions.
m["__meta_gce_tags"] = tagSeparator + strings.Join(inst.Tags.Items, tagSeparator) + tagSeparator
m.Add("__meta_gce_tags", tagSeparator+strings.Join(inst.Tags.Items, tagSeparator)+tagSeparator)
}
for _, item := range inst.Metadata.Items {
m[discoveryutils.SanitizeLabelName("__meta_gce_metadata_"+item.Key)] = item.Value
m.Add(discoveryutils.SanitizeLabelName("__meta_gce_metadata_"+item.Key), item.Value)
}
for _, label := range inst.Labels {
m[discoveryutils.SanitizeLabelName("__meta_gce_label_"+label.Name)] = label.Value
for _, label := range inst.Labels.Labels {
m.Add(discoveryutils.SanitizeLabelName("__meta_gce_label_"+label.Name), label.Value)
}
if len(iface.AccessConfigs) > 0 {
ac := iface.AccessConfigs[0]
if ac.Type == "ONE_TO_ONE_NAT" {
m["__meta_gce_public_ip"] = ac.NatIP
m.Add("__meta_gce_public_ip", ac.NatIP)
}
}
ms = append(ms, m)

View file

@ -1,11 +1,10 @@
package gce
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestParseInstanceListFailure(t *testing.T) {
@ -148,12 +147,8 @@ func TestParseInstanceListSuccess(t *testing.T) {
tagSeparator := ","
port := 80
labelss := inst.appendTargetLabels(nil, project, tagSeparator, port)
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range labelss {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
expectedLabelss := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
expectedLabelss := []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.11.2.7:80",
"__meta_gce_instance_id": "7897352091592122",
"__meta_gce_instance_name": "play-1m-1-vmagent",
@ -174,7 +169,5 @@ func TestParseInstanceListSuccess(t *testing.T) {
"__meta_gce_zone": "https://www.googleapis.com/compute/v1/projects/victoriametrics-test/zones/us-east1-b",
}),
}
if !reflect.DeepEqual(sortedLabelss, expectedLabelss) {
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, expectedLabelss)
}
discoveryutils.TestEqualLabelss(t, labelss, expectedLabelss)
}

View file

@ -7,6 +7,7 @@ import (
"strconv"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/fasthttp"
"github.com/VictoriaMetrics/metrics"
)
@ -25,7 +26,7 @@ type apiConfig struct {
// https://prometheus.io/docs/prometheus/latest/http_sd/
type httpGroupTarget struct {
Targets []string `json:"targets"`
Labels map[string]string `json:"labels"`
Labels *promutils.Labels `json:"labels"`
}
func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {

View file

@ -3,6 +3,8 @@ package http
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_parseAPIResponse(t *testing.T) {
@ -28,7 +30,7 @@ func Test_parseAPIResponse(t *testing.T) {
},
want: []httpGroupTarget{
{
Labels: map[string]string{"label-1": "value-1"},
Labels: promutils.NewLabelsFromMap(map[string]string{"label-1": "value-1"}),
Targets: []string{"http://target-1:9100", "http://target-2:9150"},
},
},

View file

@ -6,6 +6,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -25,7 +26,7 @@ type SDConfig struct {
}
// GetLabels returns http service discovery labels according to sdc.
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
cfg, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %w", err)
@ -37,17 +38,16 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
return addHTTPTargetLabels(hts, sdc.URL), nil
}
func addHTTPTargetLabels(src []httpGroupTarget, sourceURL string) []map[string]string {
ms := make([]map[string]string, 0, len(src))
func addHTTPTargetLabels(src []httpGroupTarget, sourceURL string) []*promutils.Labels {
ms := make([]*promutils.Labels, 0, len(src))
for _, targetGroup := range src {
labels := targetGroup.Labels
for _, target := range targetGroup.Targets {
m := make(map[string]string, len(labels))
for k, v := range labels {
m[k] = v
}
m["__address__"] = target
m["__meta_url"] = sourceURL
m := promutils.NewLabels(2 + labels.Len())
m.AddFrom(labels)
m.Add("__address__", target)
m.Add("__meta_url", sourceURL)
m.RemoveDuplicates()
ms = append(ms, m)
}
}

View file

@ -1,11 +1,10 @@
package http
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func Test_addHTTPTargetLabels(t *testing.T) {
@ -15,7 +14,7 @@ func Test_addHTTPTargetLabels(t *testing.T) {
tests := []struct {
name string
args args
want [][]prompbmarshal.Label
want []*promutils.Labels
}{
{
name: "add ok",
@ -23,18 +22,18 @@ func Test_addHTTPTargetLabels(t *testing.T) {
src: []httpGroupTarget{
{
Targets: []string{"127.0.0.1:9100", "127.0.0.2:91001"},
Labels: map[string]string{"__meta_kubernetes_pod": "pod-1", "__meta_consul_dc": "dc-2"},
Labels: promutils.NewLabelsFromMap(map[string]string{"__meta_kubernetes_pod": "pod-1", "__meta_consul_dc": "dc-2"}),
},
},
},
want: [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
want: []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "127.0.0.1:9100",
"__meta_kubernetes_pod": "pod-1",
"__meta_consul_dc": "dc-2",
"__meta_url": "http://foo.bar/baz?aaa=bb",
}),
discoveryutils.GetSortedLabels(map[string]string{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "127.0.0.2:91001",
"__meta_kubernetes_pod": "pod-1",
"__meta_consul_dc": "dc-2",
@ -46,13 +45,7 @@ func Test_addHTTPTargetLabels(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := addHTTPTargetLabels(tt.args.src, "http://foo.bar/baz?aaa=bb")
var sortedLabelss [][]prompbmarshal.Label
for _, labels := range got {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(labels))
}
if !reflect.DeepEqual(sortedLabelss, tt.want) {
t.Errorf("addHTTPTargetLabels() \ngot \n%v\n, \nwant \n%v\n", sortedLabelss, tt.want)
}
discoveryutils.TestEqualLabelss(t, got, tt.want)
})
}
}

View file

@ -19,6 +19,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/metrics"
)
@ -37,7 +38,7 @@ type object interface {
key() string
// getTargetLabels must be called under gw.mu lock.
getTargetLabels(gw *groupWatcher) []map[string]string
getTargetLabels(gw *groupWatcher) []*promutils.Labels
}
// parseObjectFunc must parse object from the given data.
@ -136,8 +137,8 @@ func (aw *apiWatcher) updateScrapeWorks(uw *urlWatcher, swosByKey map[string][]i
aw.swosByURLWatcherLock.Unlock()
}
func (aw *apiWatcher) setScrapeWorks(uw *urlWatcher, key string, labels []map[string]string) {
swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels)
func (aw *apiWatcher) setScrapeWorks(uw *urlWatcher, key string, labelss []*promutils.Labels) {
swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labelss)
aw.swosByURLWatcherLock.Lock()
swosByKey := aw.swosByURLWatcher[uw]
if swosByKey == nil {
@ -163,7 +164,7 @@ func (aw *apiWatcher) removeScrapeWorks(uw *urlWatcher, key string) {
aw.swosByURLWatcherLock.Unlock()
}
func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []map[string]string) []interface{} {
func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []*promutils.Labels) []interface{} {
// Do not pre-allocate swos, since it is likely the swos will be empty because of relabeling
var swos []interface{}
for _, labels := range labelss {
@ -299,24 +300,31 @@ func (gw *groupWatcher) getScrapeWorkObjectsByAPIWatcherLocked(objectsByKey map[
var wg sync.WaitGroup
limiterCh := make(chan struct{}, cgroup.AvailableCPUs())
for key, o := range objectsByKey {
labels := o.getTargetLabels(gw)
labelss := o.getTargetLabels(gw)
wg.Add(1)
limiterCh <- struct{}{}
go func(key string, labels []map[string]string) {
go func(key string, labelss []*promutils.Labels) {
for aw, e := range swosByAPIWatcher {
swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels)
swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labelss)
e.mu.Lock()
e.swosByKey[key] = swos
e.mu.Unlock()
}
putLabelssToPool(labelss)
wg.Done()
<-limiterCh
}(key, labels)
}(key, labelss)
}
wg.Wait()
return swosByAPIWatcher
}
func putLabelssToPool(labelss []*promutils.Labels) {
for _, labels := range labelss {
promutils.PutLabels(labels)
}
}
func (gw *groupWatcher) getObjectByRoleLocked(role, namespace, name string) object {
if role == "node" {
// Node objects have no namespace
@ -764,10 +772,11 @@ func (uw *urlWatcher) updateObjectLocked(key string, o object) {
uw.objectsUpdated.Inc()
}
if len(uw.aws) > 0 {
labels := o.getTargetLabels(uw.gw)
labelss := o.getTargetLabels(uw.gw)
for aw := range uw.aws {
aw.setScrapeWorks(uw, key, labels)
aw.setScrapeWorks(uw, key, labelss)
}
putLabelssToPool(labelss)
}
uw.maybeUpdateDependedScrapeWorksLocked()
}

View file

@ -8,6 +8,8 @@ import (
"sync"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestGetAPIPathsWithNamespaces(t *testing.T) {
@ -919,10 +921,10 @@ func TestGetScrapeWorkObjects(t *testing.T) {
}
testAPIServer := httptest.NewServer(mux)
tc.sdc.APIServer = testAPIServer.URL
ac, err := newAPIConfig(tc.sdc, "", func(metaLabels map[string]string) interface{} {
ac, err := newAPIConfig(tc.sdc, "", func(metaLabels *promutils.Labels) interface{} {
var res []interface{}
for k := range metaLabels {
res = append(res, k)
for _, label := range metaLabels.Labels {
res = append(res, label.Name)
}
return res
})

View file

@ -1,7 +1,9 @@
package kubernetes
import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// ObjectMeta represents ObjectMeta from k8s API.
@ -11,8 +13,8 @@ type ObjectMeta struct {
Name string
Namespace string
UID string
Labels discoveryutils.SortedLabels
Annotations discoveryutils.SortedLabels
Labels *promutils.Labels
Annotations *promutils.Labels
OwnerReferences []OwnerReference
}
@ -26,15 +28,38 @@ type ListMeta struct {
ResourceVersion string
}
func (om *ObjectMeta) registerLabelsAndAnnotations(prefix string, m map[string]string) {
for _, lb := range om.Labels {
m[discoveryutils.SanitizeLabelName(prefix+"_label_"+lb.Name)] = lb.Value
m[discoveryutils.SanitizeLabelName(prefix+"_labelpresent_"+lb.Name)] = "true"
func (om *ObjectMeta) registerLabelsAndAnnotations(prefix string, m *promutils.Labels) {
bb := bbPool.Get()
b := bb.B
for _, lb := range om.Labels.GetLabels() {
b = appendThreeStrings(b[:0], prefix, "_label_", lb.Name)
labelName := bytesutil.ToUnsafeString(b)
m.Add(discoveryutils.SanitizeLabelName(labelName), lb.Value)
b = appendThreeStrings(b[:0], prefix, "_labelpresent_", lb.Name)
labelName = bytesutil.ToUnsafeString(b)
m.Add(discoveryutils.SanitizeLabelName(labelName), "true")
}
for _, a := range om.Annotations {
m[discoveryutils.SanitizeLabelName(prefix+"_annotation_"+a.Name)] = a.Value
m[discoveryutils.SanitizeLabelName(prefix+"_annotationpresent_"+a.Name)] = "true"
for _, a := range om.Annotations.GetLabels() {
b = appendThreeStrings(b[:0], prefix, "_annotation_", a.Name)
labelName := bytesutil.ToUnsafeString(b)
m.Add(discoveryutils.SanitizeLabelName(labelName), a.Value)
b = appendThreeStrings(b[:0], prefix, "_annotationpresent_", a.Name)
labelName = bytesutil.ToUnsafeString(b)
m.Add(discoveryutils.SanitizeLabelName(labelName), "true")
}
bb.B = b
bbPool.Put(bb)
}
var bbPool bytesutil.ByteBufferPool
func appendThreeStrings(dst []byte, a, b, c string) []byte {
dst = append(dst, a...)
dst = append(dst, b...)
dst = append(dst, c...)
return dst
}
// OwnerReference represents OwnerReferense from k8s API.

View file

@ -7,6 +7,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func (eps *Endpoints) key() string {
@ -91,13 +92,13 @@ type EndpointPort struct {
// getTargetLabels returns labels for each endpoint in eps.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#endpoints
func (eps *Endpoints) getTargetLabels(gw *groupWatcher) []map[string]string {
func (eps *Endpoints) getTargetLabels(gw *groupWatcher) []*promutils.Labels {
var svc *Service
if o := gw.getObjectByRoleLocked("service", eps.Metadata.Namespace, eps.Metadata.Name); o != nil {
svc = o.(*Service)
}
podPortsSeen := make(map[*Pod][]int)
var ms []map[string]string
var ms []*promutils.Labels
for _, ess := range eps.Subsets {
for _, epp := range ess.Ports {
ms = appendEndpointLabelsForAddresses(ms, gw, podPortsSeen, eps, ess.Addresses, epp, svc, "true")
@ -106,7 +107,7 @@ func (eps *Endpoints) getTargetLabels(gw *groupWatcher) []map[string]string {
}
// See https://kubernetes.io/docs/reference/labels-annotations-taints/#endpoints-kubernetes-io-over-capacity
// and https://github.com/kubernetes/kubernetes/pull/99975
switch eps.Metadata.Annotations.GetByName("endpoints.kubernetes.io/over-capacity") {
switch eps.Metadata.Annotations.Get("endpoints.kubernetes.io/over-capacity") {
case "truncated":
logger.Warnf(`the number of targets for "role: endpoints" %q exceeds 1000 and has been truncated; please use "role: endpointslice" instead`, eps.Metadata.key())
case "warning":
@ -129,14 +130,14 @@ func (eps *Endpoints) getTargetLabels(gw *groupWatcher) []map[string]string {
continue
}
addr := discoveryutils.JoinHostPort(p.Status.PodIP, cp.ContainerPort)
m := map[string]string{
"__address__": addr,
}
m := promutils.GetLabels()
m.Add("__address__", addr)
p.appendCommonLabels(m, gw)
p.appendContainerLabels(m, c, &cp)
if svc != nil {
svc.appendCommonLabels(m)
}
m.RemoveDuplicates()
ms = append(ms, m)
}
}
@ -144,8 +145,8 @@ func (eps *Endpoints) getTargetLabels(gw *groupWatcher) []map[string]string {
return ms
}
func appendEndpointLabelsForAddresses(ms []map[string]string, gw *groupWatcher, podPortsSeen map[*Pod][]int, eps *Endpoints,
eas []EndpointAddress, epp EndpointPort, svc *Service, ready string) []map[string]string {
func appendEndpointLabelsForAddresses(ms []*promutils.Labels, gw *groupWatcher, podPortsSeen map[*Pod][]int, eps *Endpoints,
eas []EndpointAddress, epp EndpointPort, svc *Service, ready string) []*promutils.Labels {
for _, ea := range eas {
var p *Pod
if ea.TargetRef.Name != "" {
@ -154,13 +155,14 @@ func appendEndpointLabelsForAddresses(ms []map[string]string, gw *groupWatcher,
}
}
m := getEndpointLabelsForAddressAndPort(gw, podPortsSeen, eps, ea, epp, p, svc, ready)
m.RemoveDuplicates()
ms = append(ms, m)
}
return ms
}
func getEndpointLabelsForAddressAndPort(gw *groupWatcher, podPortsSeen map[*Pod][]int, eps *Endpoints, ea EndpointAddress, epp EndpointPort,
p *Pod, svc *Service, ready string) map[string]string {
p *Pod, svc *Service, ready string) *promutils.Labels {
m := getEndpointLabels(eps.Metadata, ea, epp, ready)
if svc != nil {
svc.appendCommonLabels(m)
@ -188,26 +190,24 @@ func getEndpointLabelsForAddressAndPort(gw *groupWatcher, podPortsSeen map[*Pod]
return m
}
func getEndpointLabels(om ObjectMeta, ea EndpointAddress, epp EndpointPort, ready string) map[string]string {
func getEndpointLabels(om ObjectMeta, ea EndpointAddress, epp EndpointPort, ready string) *promutils.Labels {
addr := discoveryutils.JoinHostPort(ea.IP, epp.Port)
m := map[string]string{
"__address__": addr,
"__meta_kubernetes_namespace": om.Namespace,
"__meta_kubernetes_endpoints_name": om.Name,
"__meta_kubernetes_endpoint_ready": ready,
"__meta_kubernetes_endpoint_port_name": epp.Name,
"__meta_kubernetes_endpoint_port_protocol": epp.Protocol,
}
m := promutils.GetLabels()
m.Add("__address__", addr)
m.Add("__meta_kubernetes_namespace", om.Namespace)
m.Add("__meta_kubernetes_endpoints_name", om.Name)
m.Add("__meta_kubernetes_endpoint_ready", ready)
m.Add("__meta_kubernetes_endpoint_port_name", epp.Name)
m.Add("__meta_kubernetes_endpoint_port_protocol", epp.Protocol)
if ea.TargetRef.Kind != "" {
m["__meta_kubernetes_endpoint_address_target_kind"] = ea.TargetRef.Kind
m["__meta_kubernetes_endpoint_address_target_name"] = ea.TargetRef.Name
m.Add("__meta_kubernetes_endpoint_address_target_kind", ea.TargetRef.Kind)
m.Add("__meta_kubernetes_endpoint_address_target_name", ea.TargetRef.Name)
}
if ea.NodeName != "" {
m["__meta_kubernetes_endpoint_node_name"] = ea.NodeName
m.Add("__meta_kubernetes_endpoint_node_name", ea.NodeName)
}
if ea.Hostname != "" {
m["__meta_kubernetes_endpoint_hostname"] = ea.Hostname
m.Add("__meta_kubernetes_endpoint_hostname", ea.Hostname)
}
return m
}

View file

@ -4,8 +4,7 @@ import (
"bytes"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestParseEndpointsListFailure(t *testing.T) {
@ -91,8 +90,8 @@ func TestParseEndpointsListSuccess(t *testing.T) {
}
sortedLabelss := getSortedLabelss(objectsByKey)
expectedLabelss := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
expectedLabelss := []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.17.0.2:8443",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "coredns-6955765f44-lnp6t",
@ -119,7 +118,7 @@ func TestGetEndpointsLabels(t *testing.T) {
containerPorts map[string][]ContainerPort
endpointPorts []EndpointPort
}
f := func(t *testing.T, args testArgs, wantLabels [][]prompbmarshal.Label) {
f := func(t *testing.T, args testArgs, wantLabels []*promutils.Labels) {
t.Helper()
eps := Endpoints{
Metadata: ObjectMeta{
@ -175,12 +174,7 @@ func TestGetEndpointsLabels(t *testing.T) {
}
node := Node{
Metadata: ObjectMeta{
Labels: []prompbmarshal.Label{
{
Name: "node-label",
Value: "xyz",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{"node-label": "xyz"}),
},
}
for cn, ports := range args.containerPorts {
@ -212,10 +206,11 @@ func TestGetEndpointsLabels(t *testing.T) {
},
}
gw.attachNodeMetadata = true
var sortedLabelss [][]prompbmarshal.Label
var sortedLabelss []*promutils.Labels
gotLabels := eps.getTargetLabels(&gw)
for _, lbs := range gotLabels {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(lbs))
lbs.Sort()
sortedLabelss = append(sortedLabelss, lbs)
}
if !areEqualLabelss(sortedLabelss, wantLabels) {
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, wantLabels)
@ -231,8 +226,8 @@ func TestGetEndpointsLabels(t *testing.T) {
Protocol: "foobar",
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
}, []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.13.15.15:8081",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "test-pod",
@ -272,8 +267,8 @@ func TestGetEndpointsLabels(t *testing.T) {
Protocol: "https",
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
}, []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.13.15.15:8081",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "test-pod",
@ -296,7 +291,7 @@ func TestGetEndpointsLabels(t *testing.T) {
"__meta_kubernetes_service_name": "test-eps",
"__meta_kubernetes_service_type": "service-type",
}),
discoveryutils.GetSortedLabels(map[string]string{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "192.168.15.1:8428",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_node_label_node_label": "xyz",
@ -335,8 +330,8 @@ func TestGetEndpointsLabels(t *testing.T) {
Protocol: "xabc",
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
}, []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.13.15.15:8428",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "test-pod",

View file

@ -7,6 +7,7 @@ import (
"strconv"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func (eps *EndpointSlice) key() string {
@ -37,16 +38,16 @@ func parseEndpointSlice(data []byte) (object, error) {
// getTargetLabels returns labels for eps.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#endpointslices
func (eps *EndpointSlice) getTargetLabels(gw *groupWatcher) []map[string]string {
func (eps *EndpointSlice) getTargetLabels(gw *groupWatcher) []*promutils.Labels {
// The associated service name is stored in kubernetes.io/service-name label.
// See https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetesioservice-name
svcName := eps.Metadata.Labels.GetByName("kubernetes.io/service-name")
svcName := eps.Metadata.Labels.Get("kubernetes.io/service-name")
var svc *Service
if o := gw.getObjectByRoleLocked("service", eps.Metadata.Namespace, svcName); o != nil {
svc = o.(*Service)
}
podPortsSeen := make(map[*Pod][]int)
var ms []map[string]string
var ms []*promutils.Labels
for _, ess := range eps.Endpoints {
var p *Pod
if o := gw.getObjectByRoleLocked("pod", ess.TargetRef.Namespace, ess.TargetRef.Name); o != nil {
@ -55,6 +56,7 @@ func (eps *EndpointSlice) getTargetLabels(gw *groupWatcher) []map[string]string
for _, epp := range eps.Ports {
for _, addr := range ess.Addresses {
m := getEndpointSliceLabelsForAddressAndPort(gw, podPortsSeen, addr, eps, ess, epp, p, svc)
m.RemoveDuplicates()
ms = append(ms, m)
}
@ -77,14 +79,14 @@ func (eps *EndpointSlice) getTargetLabels(gw *groupWatcher) []map[string]string
continue
}
addr := discoveryutils.JoinHostPort(p.Status.PodIP, cp.ContainerPort)
m := map[string]string{
"__address__": addr,
}
m := promutils.GetLabels()
m.Add("__address__", addr)
p.appendCommonLabels(m, gw)
p.appendContainerLabels(m, c, &cp)
if svc != nil {
svc.appendCommonLabels(m)
}
m.RemoveDuplicates()
ms = append(ms, m)
}
}
@ -98,7 +100,7 @@ func (eps *EndpointSlice) getTargetLabels(gw *groupWatcher) []map[string]string
// p appended to seen Ports
// if TargetRef matches
func getEndpointSliceLabelsForAddressAndPort(gw *groupWatcher, podPortsSeen map[*Pod][]int, addr string, eps *EndpointSlice, ea Endpoint, epp EndpointPort,
p *Pod, svc *Service) map[string]string {
p *Pod, svc *Service) *promutils.Labels {
m := getEndpointSliceLabels(eps, addr, ea, epp)
if svc != nil {
svc.appendCommonLabels(m)
@ -128,31 +130,30 @@ func getEndpointSliceLabelsForAddressAndPort(gw *groupWatcher, podPortsSeen map[
}
// //getEndpointSliceLabels builds labels for given EndpointSlice
func getEndpointSliceLabels(eps *EndpointSlice, addr string, ea Endpoint, epp EndpointPort) map[string]string {
func getEndpointSliceLabels(eps *EndpointSlice, addr string, ea Endpoint, epp EndpointPort) *promutils.Labels {
addr = discoveryutils.JoinHostPort(addr, epp.Port)
m := map[string]string{
"__address__": addr,
"__meta_kubernetes_namespace": eps.Metadata.Namespace,
"__meta_kubernetes_endpointslice_name": eps.Metadata.Name,
"__meta_kubernetes_endpointslice_address_type": eps.AddressType,
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": strconv.FormatBool(ea.Conditions.Ready),
"__meta_kubernetes_endpointslice_port_name": epp.Name,
"__meta_kubernetes_endpointslice_port_protocol": epp.Protocol,
"__meta_kubernetes_endpointslice_port": strconv.Itoa(epp.Port),
}
m := promutils.GetLabels()
m.Add("__address__", addr)
m.Add("__meta_kubernetes_namespace", eps.Metadata.Namespace)
m.Add("__meta_kubernetes_endpointslice_name", eps.Metadata.Name)
m.Add("__meta_kubernetes_endpointslice_address_type", eps.AddressType)
m.Add("__meta_kubernetes_endpointslice_endpoint_conditions_ready", strconv.FormatBool(ea.Conditions.Ready))
m.Add("__meta_kubernetes_endpointslice_port_name", epp.Name)
m.Add("__meta_kubernetes_endpointslice_port_protocol", epp.Protocol)
m.Add("__meta_kubernetes_endpointslice_port", strconv.Itoa(epp.Port))
if epp.AppProtocol != "" {
m["__meta_kubernetes_endpointslice_port_app_protocol"] = epp.AppProtocol
m.Add("__meta_kubernetes_endpointslice_port_app_protocol", epp.AppProtocol)
}
if ea.TargetRef.Kind != "" {
m["__meta_kubernetes_endpointslice_address_target_kind"] = ea.TargetRef.Kind
m["__meta_kubernetes_endpointslice_address_target_name"] = ea.TargetRef.Name
m.Add("__meta_kubernetes_endpointslice_address_target_kind", ea.TargetRef.Kind)
m.Add("__meta_kubernetes_endpointslice_address_target_name", ea.TargetRef.Name)
}
if ea.Hostname != "" {
m["__meta_kubernetes_endpointslice_endpoint_hostname"] = ea.Hostname
m.Add("__meta_kubernetes_endpointslice_endpoint_hostname", ea.Hostname)
}
for k, v := range ea.Topology {
m[discoveryutils.SanitizeLabelName("__meta_kubernetes_endpointslice_endpoint_topology_"+k)] = v
m[discoveryutils.SanitizeLabelName("__meta_kubernetes_endpointslice_endpoint_topology_present_"+k)] = "true"
m.Add(discoveryutils.SanitizeLabelName("__meta_kubernetes_endpointslice_endpoint_topology_"+k), v)
m.Add(discoveryutils.SanitizeLabelName("__meta_kubernetes_endpointslice_endpoint_topology_present_"+k), "true")
}
return m
}

View file

@ -4,8 +4,7 @@ import (
"bytes"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestParseEndpointSliceListFail(t *testing.T) {
@ -165,8 +164,8 @@ func TestParseEndpointSliceListSuccess(t *testing.T) {
t.Fatalf("unexpected resource version; got %s; want %s", meta.ResourceVersion, expectedResourceVersion)
}
sortedLabelss := getSortedLabelss(objectsByKey)
expectedLabelss := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
expectedLabelss := []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.244.0.3:53",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-z8czk",
@ -186,7 +185,7 @@ func TestParseEndpointSliceListSuccess(t *testing.T) {
"__meta_kubernetes_endpointslice_port_protocol": "UDP",
"__meta_kubernetes_namespace": "kube-system",
}),
discoveryutils.GetSortedLabels(map[string]string{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.244.0.3:9153",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "coredns-66bff467f8-z8czk",
@ -206,7 +205,7 @@ func TestParseEndpointSliceListSuccess(t *testing.T) {
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_namespace": "kube-system",
}),
discoveryutils.GetSortedLabels(map[string]string{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.18.0.2:6443",
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
@ -230,13 +229,13 @@ func TestGetEndpointsliceLabels(t *testing.T) {
containerPorts map[string][]ContainerPort
endpointPorts []EndpointPort
}
f := func(t *testing.T, args testArgs, wantLabels [][]prompbmarshal.Label) {
f := func(t *testing.T, args testArgs, wantLabels []*promutils.Labels) {
t.Helper()
eps := EndpointSlice{
Metadata: ObjectMeta{
Name: "test-eps",
Namespace: "default",
Labels: discoveryutils.GetSortedLabels(map[string]string{
Labels: promutils.NewLabelsFromMap(map[string]string{
"kubernetes.io/service-name": "test-svc",
}),
},
@ -295,12 +294,7 @@ func TestGetEndpointsliceLabels(t *testing.T) {
}
node := Node{
Metadata: ObjectMeta{
Labels: []prompbmarshal.Label{
{
Name: "node-label",
Value: "xyz",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{"node-label": "xyz"}),
},
}
for cn, ports := range args.containerPorts {
@ -332,10 +326,11 @@ func TestGetEndpointsliceLabels(t *testing.T) {
},
}
gw.attachNodeMetadata = true
var sortedLabelss [][]prompbmarshal.Label
var sortedLabelss []*promutils.Labels
gotLabels := eps.getTargetLabels(&gw)
for _, lbs := range gotLabels {
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(lbs))
lbs.Sort()
sortedLabelss = append(sortedLabelss, lbs)
}
if !areEqualLabelss(sortedLabelss, wantLabels) {
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, wantLabels)
@ -351,8 +346,8 @@ func TestGetEndpointsliceLabels(t *testing.T) {
Protocol: "foobar",
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
}, []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.13.15.15:8081",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "test-pod",
@ -399,8 +394,8 @@ func TestGetEndpointsliceLabels(t *testing.T) {
Protocol: "https",
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
}, []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.13.15.15:8081",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "test-pod",
@ -430,7 +425,7 @@ func TestGetEndpointsliceLabels(t *testing.T) {
"__meta_kubernetes_service_name": "test-svc",
"__meta_kubernetes_service_type": "service-type",
}),
discoveryutils.GetSortedLabels(map[string]string{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "192.168.15.1:8428",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_node_label_node_label": "xyz",
@ -469,8 +464,8 @@ func TestGetEndpointsliceLabels(t *testing.T) {
Protocol: "xabc",
},
},
}, [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
}, []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "10.13.15.15:8428",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "test-pod",

View file

@ -5,6 +5,8 @@ import (
"fmt"
"io"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func (ig *Ingress) key() string {
@ -89,8 +91,8 @@ type HTTPIngressPath struct {
// getTargetLabels returns labels for ig.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ingress
func (ig *Ingress) getTargetLabels(gw *groupWatcher) []map[string]string {
var ms []map[string]string
func (ig *Ingress) getTargetLabels(gw *groupWatcher) []*promutils.Labels {
var ms []*promutils.Labels
for _, r := range ig.Spec.Rules {
paths := getIngressRulePaths(r.HTTP.Paths)
scheme := getSchemeForHost(r.Host, ig.Spec.TLS)
@ -129,16 +131,15 @@ func matchesHostPattern(pattern, host string) bool {
return pattern == host
}
func getLabelsForIngressPath(ig *Ingress, scheme, host, path string) map[string]string {
m := map[string]string{
"__address__": host,
"__meta_kubernetes_namespace": ig.Metadata.Namespace,
"__meta_kubernetes_ingress_name": ig.Metadata.Name,
"__meta_kubernetes_ingress_scheme": scheme,
"__meta_kubernetes_ingress_host": host,
"__meta_kubernetes_ingress_path": path,
"__meta_kubernetes_ingress_class_name": ig.Spec.IngressClassName,
}
func getLabelsForIngressPath(ig *Ingress, scheme, host, path string) *promutils.Labels {
m := promutils.GetLabels()
m.Add("__address__", host)
m.Add("__meta_kubernetes_namespace", ig.Metadata.Namespace)
m.Add("__meta_kubernetes_ingress_name", ig.Metadata.Name)
m.Add("__meta_kubernetes_ingress_scheme", scheme)
m.Add("__meta_kubernetes_ingress_host", host)
m.Add("__meta_kubernetes_ingress_path", path)
m.Add("__meta_kubernetes_ingress_class_name", ig.Spec.IngressClassName)
ig.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_ingress", m)
return m
}

View file

@ -4,8 +4,7 @@ import (
"bytes"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestMatchesHostPattern(t *testing.T) {
@ -103,8 +102,8 @@ func TestParseIngressListSuccess(t *testing.T) {
t.Fatalf("unexpected resource version; got %s; want %s", meta.ResourceVersion, expectedResourceVersion)
}
sortedLabelss := getSortedLabelss(objectsByKey)
expectedLabelss := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
expectedLabelss := []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "foobar",
"__meta_kubernetes_ingress_annotation_kubectl_kubernetes_io_last_applied_configuration": `{"apiVersion":"networking.k8s.io/v1","kind":"Ingress","metadata":{"annotations":{},"name":"test-ingress","namespace":"default"},"spec":{"backend":{"serviceName":"testsvc","servicePort":80}}}` + "\n",
"__meta_kubernetes_ingress_annotationpresent_kubectl_kubernetes_io_last_applied_configuration": "true",

View file

@ -6,6 +6,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -69,7 +70,7 @@ type Selector struct {
}
// ScrapeWorkConstructorFunc must construct ScrapeWork object for the given metaLabels.
type ScrapeWorkConstructorFunc func(metaLabels map[string]string) interface{}
type ScrapeWorkConstructorFunc func(metaLabels *promutils.Labels) interface{}
// GetScrapeWorkObjects returns ScrapeWork objects for the given sdc.
//

View file

@ -6,6 +6,7 @@ import (
"io"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// getNodesLabels returns labels for k8s nodes obtained from the given cfg
@ -84,19 +85,18 @@ type NodeDaemonEndpoints struct {
// getTargetLabels returs labels for the given n.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#node
func (n *Node) getTargetLabels(gw *groupWatcher) []map[string]string {
func (n *Node) getTargetLabels(gw *groupWatcher) []*promutils.Labels {
addr := getNodeAddr(n.Status.Addresses)
if len(addr) == 0 {
// Skip node without address
return nil
}
addr = discoveryutils.JoinHostPort(addr, n.Status.DaemonEndpoints.KubeletEndpoint.Port)
m := map[string]string{
"__address__": addr,
"instance": n.Metadata.Name,
"__meta_kubernetes_node_name": n.Metadata.Name,
"__meta_kubernetes_node_provider_id": n.Spec.ProviderID,
}
m := promutils.GetLabels()
m.Add("__address__", addr)
m.Add("instance", n.Metadata.Name)
m.Add("__meta_kubernetes_node_name", n.Metadata.Name)
m.Add("__meta_kubernetes_node_provider_id", n.Spec.ProviderID)
n.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_node", m)
addrTypesUsed := make(map[string]bool, len(n.Status.Addresses))
for _, a := range n.Status.Addresses {
@ -104,9 +104,9 @@ func (n *Node) getTargetLabels(gw *groupWatcher) []map[string]string {
continue
}
addrTypesUsed[a.Type] = true
m[discoveryutils.SanitizeLabelName("__meta_kubernetes_node_address_"+a.Type)] = a.Address
m.Add(discoveryutils.SanitizeLabelName("__meta_kubernetes_node_address_"+a.Type), a.Address)
}
return []map[string]string{m}
return []*promutils.Labels{m}
}
func getNodeAddr(nas []NodeAddress) string {

View file

@ -7,8 +7,7 @@ import (
"strconv"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestParseNodeListFailure(t *testing.T) {
@ -242,8 +241,8 @@ func TestParseNodeListSuccess(t *testing.T) {
t.Fatalf("unexpected resource version; got %s; want %s", meta.ResourceVersion, expectedResourceVersion)
}
sortedLabelss := getSortedLabelss(objectsByKey)
expectedLabelss := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
expectedLabelss := []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"instance": "m01",
"__address__": "172.17.0.2:10250",
"__meta_kubernetes_node_name": "m01",
@ -288,7 +287,20 @@ func TestParseNodeListSuccess(t *testing.T) {
}
}
func getSortedLabelss(objectsByKey map[string]object) [][]prompbmarshal.Label {
func getSortedLabelss(objectsByKey map[string]object) []*promutils.Labels {
gw := newTestGroupWatcher()
var result []*promutils.Labels
for _, o := range objectsByKey {
labelss := o.getTargetLabels(gw)
for _, labels := range labelss {
labels.Sort()
result = append(result, labels)
}
}
return result
}
func newTestGroupWatcher() *groupWatcher {
var gw groupWatcher
gw.m = map[string]*urlWatcher{
"node": {
@ -296,43 +308,31 @@ func getSortedLabelss(objectsByKey map[string]object) [][]prompbmarshal.Label {
objectsByKey: map[string]object{
"/test-node": &Node{
Metadata: ObjectMeta{
Labels: []prompbmarshal.Label{
{
Name: "node-label",
Value: "xyz",
},
},
Labels: promutils.NewLabelsFromMap(map[string]string{"node-label": "xyz"}),
},
},
},
},
}
gw.attachNodeMetadata = true
var result [][]prompbmarshal.Label
for _, o := range objectsByKey {
labelss := o.getTargetLabels(&gw)
for _, labels := range labelss {
result = append(result, discoveryutils.GetSortedLabels(labels))
}
}
return result
return &gw
}
func areEqualLabelss(a, b [][]prompbmarshal.Label) bool {
func areEqualLabelss(a, b []*promutils.Labels) bool {
sortLabelss(a)
sortLabelss(b)
return reflect.DeepEqual(a, b)
}
func sortLabelss(a [][]prompbmarshal.Label) {
func sortLabelss(a []*promutils.Labels) {
sort.Slice(a, func(i, j int) bool {
return marshalLabels(a[i]) < marshalLabels(a[j])
})
}
func marshalLabels(a []prompbmarshal.Label) string {
func marshalLabels(a *promutils.Labels) string {
var b []byte
for _, label := range a {
for _, label := range a.Labels {
b = strconv.AppendQuote(b, label.Name)
b = append(b, ':')
b = strconv.AppendQuote(b, label.Value)

View file

@ -4,10 +4,11 @@ import (
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func (p *Pod) key() string {
@ -98,18 +99,18 @@ type PodCondition struct {
// getTargetLabels returns labels for each port of the given p.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#pod
func (p *Pod) getTargetLabels(gw *groupWatcher) []map[string]string {
func (p *Pod) getTargetLabels(gw *groupWatcher) []*promutils.Labels {
if len(p.Status.PodIP) == 0 {
// Skip pod without IP
return nil
}
var ms []map[string]string
var ms []*promutils.Labels
ms = appendPodLabels(ms, gw, p, p.Spec.Containers, "false")
ms = appendPodLabels(ms, gw, p, p.Spec.InitContainers, "true")
return ms
}
func appendPodLabels(ms []map[string]string, gw *groupWatcher, p *Pod, cs []Container, isInit string) []map[string]string {
func appendPodLabels(ms []*promutils.Labels, gw *groupWatcher, p *Pod, cs []Container, isInit string) []*promutils.Labels {
for _, c := range cs {
for _, cp := range c.Ports {
ms = appendPodLabelsInternal(ms, gw, p, c, &cp, isInit)
@ -121,53 +122,52 @@ func appendPodLabels(ms []map[string]string, gw *groupWatcher, p *Pod, cs []Cont
return ms
}
func appendPodLabelsInternal(ms []map[string]string, gw *groupWatcher, p *Pod, c Container, cp *ContainerPort, isInit string) []map[string]string {
func appendPodLabelsInternal(ms []*promutils.Labels, gw *groupWatcher, p *Pod, c Container, cp *ContainerPort, isInit string) []*promutils.Labels {
addr := p.Status.PodIP
if cp != nil {
addr = discoveryutils.JoinHostPort(addr, cp.ContainerPort)
}
m := map[string]string{
"__address__": addr,
"__meta_kubernetes_pod_container_init": isInit,
}
m := promutils.GetLabels()
m.Add("__address__", addr)
m.Add("__meta_kubernetes_pod_container_init", isInit)
p.appendCommonLabels(m, gw)
p.appendContainerLabels(m, c, cp)
return append(ms, m)
}
func (p *Pod) appendContainerLabels(m map[string]string, c Container, cp *ContainerPort) {
m["__meta_kubernetes_pod_container_image"] = c.Image
m["__meta_kubernetes_pod_container_name"] = c.Name
func (p *Pod) appendContainerLabels(m *promutils.Labels, c Container, cp *ContainerPort) {
m.Add("__meta_kubernetes_pod_container_image", c.Image)
m.Add("__meta_kubernetes_pod_container_name", c.Name)
if cp != nil {
m["__meta_kubernetes_pod_container_port_name"] = cp.Name
m["__meta_kubernetes_pod_container_port_number"] = strconv.Itoa(cp.ContainerPort)
m["__meta_kubernetes_pod_container_port_protocol"] = cp.Protocol
m.Add("__meta_kubernetes_pod_container_port_name", cp.Name)
m.Add("__meta_kubernetes_pod_container_port_number", bytesutil.Itoa(cp.ContainerPort))
m.Add("__meta_kubernetes_pod_container_port_protocol", cp.Protocol)
}
}
func (p *Pod) appendCommonLabels(m map[string]string, gw *groupWatcher) {
func (p *Pod) appendCommonLabels(m *promutils.Labels, gw *groupWatcher) {
if gw.attachNodeMetadata {
m["__meta_kubernetes_node_name"] = p.Spec.NodeName
m.Add("__meta_kubernetes_node_name", p.Spec.NodeName)
o := gw.getObjectByRoleLocked("node", p.Metadata.Namespace, p.Spec.NodeName)
if o != nil {
n := o.(*Node)
n.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_node", m)
}
}
m["__meta_kubernetes_pod_name"] = p.Metadata.Name
m["__meta_kubernetes_pod_ip"] = p.Status.PodIP
m["__meta_kubernetes_pod_ready"] = getPodReadyStatus(p.Status.Conditions)
m["__meta_kubernetes_pod_phase"] = p.Status.Phase
m["__meta_kubernetes_pod_node_name"] = p.Spec.NodeName
m["__meta_kubernetes_pod_host_ip"] = p.Status.HostIP
m["__meta_kubernetes_pod_uid"] = p.Metadata.UID
m["__meta_kubernetes_namespace"] = p.Metadata.Namespace
m.Add("__meta_kubernetes_pod_name", p.Metadata.Name)
m.Add("__meta_kubernetes_pod_ip", p.Status.PodIP)
m.Add("__meta_kubernetes_pod_ready", getPodReadyStatus(p.Status.Conditions))
m.Add("__meta_kubernetes_pod_phase", p.Status.Phase)
m.Add("__meta_kubernetes_pod_node_name", p.Spec.NodeName)
m.Add("__meta_kubernetes_pod_host_ip", p.Status.HostIP)
m.Add("__meta_kubernetes_pod_uid", p.Metadata.UID)
m.Add("__meta_kubernetes_namespace", p.Metadata.Namespace)
if pc := getPodController(p.Metadata.OwnerReferences); pc != nil {
if pc.Kind != "" {
m["__meta_kubernetes_pod_controller_kind"] = pc.Kind
m.Add("__meta_kubernetes_pod_controller_kind", pc.Kind)
}
if pc.Name != "" {
m["__meta_kubernetes_pod_controller_name"] = pc.Name
m.Add("__meta_kubernetes_pod_controller_name", pc.Name)
}
}
p.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_pod", m)
@ -185,8 +185,10 @@ func getPodController(ors []OwnerReference) *OwnerReference {
func getPodReadyStatus(conds []PodCondition) string {
for _, c := range conds {
if c.Type == "Ready" {
return strings.ToLower(c.Status)
return toLowerConverter.Transform(c.Status)
}
}
return "unknown"
}
var toLowerConverter = bytesutil.NewFastStringTransformer(strings.ToLower)

View file

@ -4,8 +4,7 @@ import (
"bytes"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestParsePodListFailure(t *testing.T) {
@ -26,8 +25,7 @@ func TestParsePodListFailure(t *testing.T) {
f(`{"items":[{"metadata":{"labels":[1]}}]}`)
}
func TestParsePodListSuccess(t *testing.T) {
data := `
const testPodsList = `
{
"kind": "PodList",
"apiVersion": "v1",
@ -229,7 +227,9 @@ func TestParsePodListSuccess(t *testing.T) {
]
}
`
r := bytes.NewBufferString(data)
func TestParsePodListSuccess(t *testing.T) {
r := bytes.NewBufferString(testPodsList)
objectsByKey, meta, err := parsePodList(r)
if err != nil {
t.Fatalf("unexpected error: %s", err)
@ -239,8 +239,8 @@ func TestParsePodListSuccess(t *testing.T) {
t.Fatalf("unexpected resource version; got %s; want %s", meta.ResourceVersion, expectedResourceVersion)
}
sortedLabelss := getSortedLabelss(objectsByKey)
expectedLabelss := [][]prompbmarshal.Label{
discoveryutils.GetSortedLabels(map[string]string{
expectedLabelss := []*promutils.Labels{
promutils.NewLabelsFromMap(map[string]string{
"__address__": "172.17.0.2:1234",
"__meta_kubernetes_namespace": "kube-system",

View file

@ -0,0 +1,35 @@
package kubernetes
import (
"bytes"
"fmt"
"testing"
)
func BenchmarkPodGetTargetLabels(b *testing.B) {
r := bytes.NewBufferString(testPodsList)
objectsByKey, _, err := parsePodList(r)
if err != nil {
panic(fmt.Errorf("BUG: unexpected error: %s", err))
}
var o object
for _, srcObject := range objectsByKey {
o = srcObject
break
}
if o == nil {
panic(fmt.Errorf("BUG: expecting at least a single pod object"))
}
gw := newTestGroupWatcher()
b.ReportAllocs()
b.SetBytes(1)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
labelss := o.getTargetLabels(gw)
if len(labelss) != 1 {
panic(fmt.Errorf("BUG: unexpected number of labelss returned: %d; want 1", len(labelss)))
}
putLabelssToPool(labelss)
}
})
}

Some files were not shown because too many files have changed in this diff Show more