2020-02-23 11:35:47 +00:00
package promscrape
import (
2022-05-06 21:02:54 +00:00
"encoding/json"
2020-04-13 10:15:30 +00:00
"flag"
2020-02-23 11:35:47 +00:00
"fmt"
"net/url"
"path/filepath"
2023-12-06 22:05:29 +00:00
"slices"
2021-02-26 19:41:54 +00:00
"sort"
2021-09-09 15:49:37 +00:00
"strconv"
2020-02-23 11:35:47 +00:00
"strings"
"time"
2022-08-26 23:22:37 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
2021-02-28 16:39:57 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
2021-02-26 10:46:28 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
2020-08-13 13:43:55 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
2024-06-20 11:58:42 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
2024-01-21 19:58:26 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
2020-02-23 11:35:47 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
2020-04-13 09:59:05 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
2020-02-23 11:35:47 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
2022-07-13 20:43:18 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/azure"
2020-05-04 17:48:02 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
2023-05-04 09:36:21 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consulagent"
2021-06-14 10:15:04 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/digitalocean"
2020-05-05 21:01:49 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dns"
2021-06-25 08:42:47 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/docker"
2021-06-25 10:20:18 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dockerswarm"
2020-04-27 16:25:45 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/ec2"
2020-11-20 11:38:12 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/eureka"
2020-04-24 14:50:21 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/gce"
2024-01-15 09:13:22 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/hetzner"
2021-06-22 10:33:37 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/http"
2020-04-13 18:02:27 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kubernetes"
2023-02-22 12:59:56 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kuma"
2023-01-05 22:03:58 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/nomad"
2020-10-05 13:45:33 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/openstack"
2024-09-30 12:42:46 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/ovhcloud"
2024-05-08 08:01:48 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/vultr"
2022-08-04 17:44:16 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/yandexcloud"
2022-02-11 14:17:00 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
2020-12-24 08:56:10 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
2021-02-21 21:21:17 +00:00
"github.com/VictoriaMetrics/metrics"
2022-06-21 17:23:30 +00:00
"github.com/cespare/xxhash/v2"
2020-02-23 11:35:47 +00:00
"gopkg.in/yaml.v2"
)
2020-04-13 10:15:30 +00:00
var (
2023-01-17 18:14:46 +00:00
noStaleMarkers = flag . Bool ( "promscrape.noStaleMarkers" , false , "Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series" )
2024-04-17 23:31:37 +00:00
seriesLimitPerTarget = flag . Int ( "promscrape.seriesLimitPerTarget" , 0 , "Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent/#cardinality-limiter for more info" )
2023-01-17 18:14:46 +00:00
strictParse = flag . Bool ( "promscrape.config.strictParse" , true , "Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields" )
dryRun = flag . Bool ( "promscrape.config.dryRun" , false , "Checks -promscrape.config file for errors and unsupported fields and then exits. " +
2020-05-21 11:54:28 +00:00
"Returns non-zero exit code on parsing errors and emits these errors to stderr. " +
2020-12-07 11:15:42 +00:00
"See also -promscrape.config.strictParse command-line flag. " +
"Pass -loggerLevel=ERROR if you don't need to see info messages in the output." )
2020-11-04 09:08:30 +00:00
dropOriginalLabels = flag . Bool ( "promscrape.dropOriginalLabels" , false , "Whether to drop original labels for scrape targets at /targets and /api/v1/targets pages. " +
"This may be needed for reducing memory usage when original labels for big number of scrape targets occupy big amounts of memory. " +
"Note that this reduces debuggability for improper per-target relabeling configs" )
2023-08-29 07:58:24 +00:00
clusterMembersCount = flag . Int ( "promscrape.cluster.membersCount" , 1 , "The number of members in a cluster of scrapers. " +
2023-05-10 07:50:41 +00:00
"Each member must have a unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . " +
2023-10-16 12:46:20 +00:00
"Each member then scrapes roughly 1/N of all the targets. By default, cluster scraping is disabled, i.e. a single scraper scrapes all the targets. " +
2024-04-17 23:31:37 +00:00
"See https://docs.victoriametrics.com/vmagent/#scraping-big-number-of-targets for more info" )
2023-08-24 20:00:14 +00:00
clusterMemberNum = flag . String ( "promscrape.cluster.memberNum" , "0" , "The number of vmagent instance in the cluster of scrapers. " +
2023-05-10 07:50:41 +00:00
"It must be a unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. " +
2023-08-24 20:00:14 +00:00
"Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name. " +
2024-04-17 23:31:37 +00:00
"See also -promscrape.cluster.memberLabel . See https://docs.victoriametrics.com/vmagent/#scraping-big-number-of-targets for more info" )
2023-08-24 20:00:14 +00:00
clusterMemberLabel = flag . String ( "promscrape.cluster.memberLabel" , "" , "If non-empty, then the label with this name and the -promscrape.cluster.memberNum value " +
2024-04-17 23:31:37 +00:00
"is added to all the scraped metrics. See https://docs.victoriametrics.com/vmagent/#scraping-big-number-of-targets for more info" )
2023-12-07 14:04:19 +00:00
clusterMemberURLTemplate = flag . String ( "promscrape.cluster.memberURLTemplate" , "" , "An optional template for URL to access vmagent instance with the given -promscrape.cluster.memberNum value. " +
2023-12-12 10:28:18 +00:00
"Every %d occurrence in the template is substituted with -promscrape.cluster.memberNum at urls to vmagent instances responsible for scraping the given target " +
2023-12-07 14:04:19 +00:00
"at /service-discovery page. For example -promscrape.cluster.memberURLTemplate='http://vmagent-%d:8429/targets'. " +
2024-04-17 23:31:37 +00:00
"See https://docs.victoriametrics.com/vmagent/#scraping-big-number-of-targets for more details" )
2021-03-04 08:20:15 +00:00
clusterReplicationFactor = flag . Int ( "promscrape.cluster.replicationFactor" , 1 , "The number of members in the cluster, which scrape the same targets. " +
2023-10-16 12:46:20 +00:00
"If the replication factor is greater than 1, then the deduplication must be enabled at remote storage side. " +
2024-04-17 23:31:37 +00:00
"See https://docs.victoriametrics.com/vmagent/#scraping-big-number-of-targets for more info" )
2022-06-03 21:35:51 +00:00
clusterName = flag . String ( "promscrape.cluster.name" , "" , "Optional name of the cluster. If multiple vmagent clusters scrape the same targets, " +
"then each cluster must have unique name in order to properly de-duplicate samples received from these clusters. " +
2024-04-17 23:31:37 +00:00
"See https://docs.victoriametrics.com/vmagent/#scraping-big-number-of-targets for more info" )
2024-06-20 11:58:42 +00:00
maxScrapeSize = flagutil . NewBytes ( "promscrape.maxScrapeSize" , 16 * 1024 * 1024 , "The maximum size of scrape response in bytes to process from Prometheus targets. " +
2024-07-16 10:24:14 +00:00
"Bigger responses are rejected. See also max_scrape_size option at https://docs.victoriametrics.com/sd_configs/#scrape_configs" )
2020-04-13 10:15:30 +00:00
)
2022-04-12 09:24:11 +00:00
var clusterMemberID int
2022-04-12 09:36:17 +00:00
func mustInitClusterMemberID ( ) {
2022-04-12 09:24:11 +00:00
s := * clusterMemberNum
// special case for kubernetes deployment, where pod-name formatted at some-pod-name-1
// obtain memberNum from last segment
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2359
if idx := strings . LastIndexByte ( s , '-' ) ; idx >= 0 {
s = s [ idx + 1 : ]
}
2022-05-31 22:42:30 +00:00
n , err := strconv . Atoi ( s )
2022-04-12 09:24:11 +00:00
if err != nil {
2022-04-12 09:36:17 +00:00
logger . Fatalf ( "cannot parse -promscrape.cluster.memberNum=%q: %s" , * clusterMemberNum , err )
2022-04-12 09:24:11 +00:00
}
2023-08-29 08:04:57 +00:00
if * clusterMembersCount < 1 {
logger . Fatalf ( "-promscrape.cluster.membersCount can't be lower than 1: got %d" , * clusterMembersCount )
}
if n < 0 || n >= * clusterMembersCount {
2023-08-24 20:00:14 +00:00
logger . Fatalf ( "-promscrape.cluster.memberNum must be in the range [0..%d] according to -promscrape.cluster.membersCount=%d" ,
* clusterMembersCount , * clusterMembersCount )
}
2022-05-31 22:42:30 +00:00
clusterMemberID = n
2022-04-12 09:24:11 +00:00
}
2020-02-23 11:35:47 +00:00
// Config represents essential parts from Prometheus config defined at https://prometheus.io/docs/prometheus/latest/configuration/configuration/
type Config struct {
2022-04-16 17:28:46 +00:00
Global GlobalConfig ` yaml:"global,omitempty" `
ScrapeConfigs [ ] * ScrapeConfig ` yaml:"scrape_configs,omitempty" `
ScrapeConfigFiles [ ] string ` yaml:"scrape_config_files,omitempty" `
2020-02-23 11:35:47 +00:00
// This is set to the directory from where the config has been loaded.
baseDir string
}
2022-04-16 11:25:54 +00:00
func ( cfg * Config ) unmarshal ( data [ ] byte , isStrict bool ) error {
var err error
2022-10-26 11:49:20 +00:00
data , err = envtemplate . ReplaceBytes ( data )
2022-10-18 07:28:39 +00:00
if err != nil {
return fmt . Errorf ( "cannot expand environment variables: %w" , err )
}
2022-04-16 11:25:54 +00:00
if isStrict {
if err = yaml . UnmarshalStrict ( data , cfg ) ; err != nil {
err = fmt . Errorf ( "%w; pass -promscrape.config.strictParse=false command-line flag for ignoring unknown fields in yaml config" , err )
}
} else {
err = yaml . Unmarshal ( data , cfg )
}
return err
}
2021-08-26 05:51:14 +00:00
func ( cfg * Config ) marshal ( ) [ ] byte {
data , err := yaml . Marshal ( cfg )
if err != nil {
logger . Panicf ( "BUG: cannot marshal Config: %s" , err )
}
return data
}
2021-04-05 19:02:09 +00:00
func ( cfg * Config ) mustStart ( ) {
startTime := time . Now ( )
logger . Infof ( "starting service discovery routines..." )
2022-04-16 17:28:46 +00:00
for _ , sc := range cfg . ScrapeConfigs {
sc . mustStart ( cfg . baseDir )
2021-04-05 19:02:09 +00:00
}
2021-06-18 07:53:10 +00:00
jobNames := cfg . getJobNames ( )
tsmGlobal . registerJobNames ( jobNames )
2023-10-25 21:02:30 +00:00
logger . Infof ( "started %d service discovery routines in %.3f seconds" , len ( cfg . ScrapeConfigs ) , time . Since ( startTime ) . Seconds ( ) )
2021-04-05 19:02:09 +00:00
}
2023-10-25 21:02:30 +00:00
// mustRestart restarts service discovery routines at cfg if they were changed comparing to prevCfg.
2023-10-25 21:19:33 +00:00
//
// It returns true if at least a single scraper has been restarted.
func ( cfg * Config ) mustRestart ( prevCfg * Config ) bool {
2022-04-16 17:28:46 +00:00
startTime := time . Now ( )
prevScrapeCfgByName := make ( map [ string ] * ScrapeConfig , len ( prevCfg . ScrapeConfigs ) )
for _ , scPrev := range prevCfg . ScrapeConfigs {
prevScrapeCfgByName [ scPrev . JobName ] = scPrev
}
2022-07-18 14:15:02 +00:00
// Restart all the scrape jobs on Global config change.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884
needGlobalRestart := ! areEqualGlobalConfigs ( & cfg . Global , & prevCfg . Global )
2022-12-04 05:53:01 +00:00
// Loop over the new jobs, start new ones and restart updated ones.
2022-04-16 17:28:46 +00:00
var started , stopped , restarted int
currentJobNames := make ( map [ string ] struct { } , len ( cfg . ScrapeConfigs ) )
for i , sc := range cfg . ScrapeConfigs {
currentJobNames [ sc . JobName ] = struct { } { }
scPrev := prevScrapeCfgByName [ sc . JobName ]
if scPrev == nil {
// New scrape config has been appeared. Start it.
sc . mustStart ( cfg . baseDir )
started ++
continue
}
2022-07-18 14:15:02 +00:00
if ! needGlobalRestart && areEqualScrapeConfigs ( scPrev , sc ) {
2022-04-16 17:28:46 +00:00
// The scrape config didn't change, so no need to restart it.
// Use the reference to the previous job, so it could be stopped properly later.
cfg . ScrapeConfigs [ i ] = scPrev
} else {
// The scrape config has been changed. Stop the previous scrape config and start new one.
scPrev . mustStop ( )
sc . mustStart ( cfg . baseDir )
restarted ++
}
}
2023-10-25 21:02:30 +00:00
// Stop previous jobs which weren't found in the current configuration.
2022-04-16 17:28:46 +00:00
for _ , scPrev := range prevCfg . ScrapeConfigs {
if _ , ok := currentJobNames [ scPrev . JobName ] ; ! ok {
scPrev . mustStop ( )
stopped ++
}
}
jobNames := cfg . getJobNames ( )
tsmGlobal . registerJobNames ( jobNames )
2023-10-25 22:06:26 +00:00
updated := started + stopped + restarted
if updated == 0 {
return false
2023-10-25 21:19:33 +00:00
}
2023-10-25 22:06:26 +00:00
logger . Infof ( "updated %d service discovery routines in %.3f seconds, started=%d, stopped=%d, restarted=%d" ,
updated , time . Since ( startTime ) . Seconds ( ) , started , stopped , restarted )
return true
2022-04-16 17:28:46 +00:00
}
2022-07-18 14:15:02 +00:00
func areEqualGlobalConfigs ( a , b * GlobalConfig ) bool {
sa := a . marshalJSON ( )
sb := b . marshalJSON ( )
return string ( sa ) == string ( sb )
}
2022-04-16 17:28:46 +00:00
func areEqualScrapeConfigs ( a , b * ScrapeConfig ) bool {
2022-05-06 21:02:54 +00:00
sa := a . marshalJSON ( )
sb := b . marshalJSON ( )
2024-04-03 21:46:40 +00:00
return string ( sa ) == string ( sb )
2022-04-16 17:28:46 +00:00
}
2022-05-06 21:02:54 +00:00
func ( sc * ScrapeConfig ) unmarshalJSON ( data [ ] byte ) error {
return json . Unmarshal ( data , sc )
2022-04-22 10:19:20 +00:00
}
2022-05-06 21:02:54 +00:00
func ( sc * ScrapeConfig ) marshalJSON ( ) [ ] byte {
data , err := json . Marshal ( sc )
2022-04-16 17:28:46 +00:00
if err != nil {
logger . Panicf ( "BUG: cannot marshal ScrapeConfig: %s" , err )
}
return data
}
2022-07-18 14:15:02 +00:00
func ( gc * GlobalConfig ) marshalJSON ( ) [ ] byte {
data , err := json . Marshal ( gc )
if err != nil {
logger . Panicf ( "BUG: cannot marshal GlobalConfig: %s" , err )
}
return data
}
2021-03-01 12:13:56 +00:00
func ( cfg * Config ) mustStop ( ) {
startTime := time . Now ( )
logger . Infof ( "stopping service discovery routines..." )
2022-04-16 17:28:46 +00:00
for _ , sc := range cfg . ScrapeConfigs {
sc . mustStop ( )
2021-03-01 12:13:56 +00:00
}
2023-10-25 21:02:30 +00:00
logger . Infof ( "stopped %d service discovery routines in %.3f seconds" , len ( cfg . ScrapeConfigs ) , time . Since ( startTime ) . Seconds ( ) )
2021-03-01 12:13:56 +00:00
}
2021-06-18 07:53:10 +00:00
// getJobNames returns all the scrape job names from the cfg.
func ( cfg * Config ) getJobNames ( ) [ ] string {
a := make ( [ ] string , 0 , len ( cfg . ScrapeConfigs ) )
2022-04-16 17:28:46 +00:00
for _ , sc := range cfg . ScrapeConfigs {
a = append ( a , sc . JobName )
2021-06-18 07:53:10 +00:00
}
return a
}
2020-02-23 11:35:47 +00:00
// GlobalConfig represents essential parts for `global` section of Prometheus config.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/
type GlobalConfig struct {
2022-04-16 11:25:54 +00:00
ScrapeInterval * promutils . Duration ` yaml:"scrape_interval,omitempty" `
ScrapeTimeout * promutils . Duration ` yaml:"scrape_timeout,omitempty" `
2022-11-30 05:22:12 +00:00
ExternalLabels * promutils . Labels ` yaml:"external_labels,omitempty" `
2022-10-01 13:13:17 +00:00
}
2020-02-23 11:35:47 +00:00
// ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
type ScrapeConfig struct {
2023-07-29 04:36:16 +00:00
JobName string ` yaml:"job_name" `
ScrapeInterval * promutils . Duration ` yaml:"scrape_interval,omitempty" `
ScrapeTimeout * promutils . Duration ` yaml:"scrape_timeout,omitempty" `
2024-06-20 11:58:42 +00:00
MaxScrapeSize string ` yaml:"max_scrape_size,omitempty" `
2023-07-29 04:36:16 +00:00
MetricsPath string ` yaml:"metrics_path,omitempty" `
HonorLabels bool ` yaml:"honor_labels,omitempty" `
// HonorTimestamps is set to false by default contrary to Prometheus, which sets it to true by default,
// because of the issue with gaps on graphs when scraping cadvisor or similar targets, which export invalid timestamps.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4697#issuecomment-1654614799 for details.
HonorTimestamps bool ` yaml:"honor_timestamps,omitempty" `
2020-11-13 14:17:03 +00:00
Scheme string ` yaml:"scheme,omitempty" `
Params map [ string ] [ ] string ` yaml:"params,omitempty" `
2021-04-02 18:17:43 +00:00
HTTPClientConfig promauth . HTTPClientConfig ` yaml:",inline" `
2021-10-26 18:21:08 +00:00
ProxyURL * proxy . URL ` yaml:"proxy_url,omitempty" `
2020-11-13 14:17:03 +00:00
RelabelConfigs [ ] promrelabel . RelabelConfig ` yaml:"relabel_configs,omitempty" `
MetricRelabelConfigs [ ] promrelabel . RelabelConfig ` yaml:"metric_relabel_configs,omitempty" `
SampleLimit int ` yaml:"sample_limit,omitempty" `
2020-02-23 11:35:47 +00:00
2024-02-18 17:40:34 +00:00
// This silly option is needed for compatibility with Prometheus.
// vmagent was supporting disable_compression option since the beginning, while Prometheus developers
// decided adding enable_compression option in https://github.com/prometheus/prometheus/pull/13166
// That's why it needs to be supported too :(
EnableCompression * bool ` yaml:"enable_compression,omitempty" `
2022-07-13 20:43:18 +00:00
AzureSDConfigs [ ] azure . SDConfig ` yaml:"azure_sd_configs,omitempty" `
2021-06-25 10:20:18 +00:00
ConsulSDConfigs [ ] consul . SDConfig ` yaml:"consul_sd_configs,omitempty" `
2023-05-04 09:36:21 +00:00
ConsulAgentSDConfigs [ ] consulagent . SDConfig ` yaml:"consulagent_sd_configs,omitempty" `
2021-06-25 10:20:18 +00:00
DigitaloceanSDConfigs [ ] digitalocean . SDConfig ` yaml:"digitalocean_sd_configs,omitempty" `
DNSSDConfigs [ ] dns . SDConfig ` yaml:"dns_sd_configs,omitempty" `
DockerSDConfigs [ ] docker . SDConfig ` yaml:"docker_sd_configs,omitempty" `
DockerSwarmSDConfigs [ ] dockerswarm . SDConfig ` yaml:"dockerswarm_sd_configs,omitempty" `
EC2SDConfigs [ ] ec2 . SDConfig ` yaml:"ec2_sd_configs,omitempty" `
EurekaSDConfigs [ ] eureka . SDConfig ` yaml:"eureka_sd_configs,omitempty" `
FileSDConfigs [ ] FileSDConfig ` yaml:"file_sd_configs,omitempty" `
GCESDConfigs [ ] gce . SDConfig ` yaml:"gce_sd_configs,omitempty" `
2024-01-20 14:52:41 +00:00
HetznerSDConfigs [ ] hetzner . SDConfig ` yaml:"hetzner_sd_configs,omitempty" `
2021-06-25 10:20:18 +00:00
HTTPSDConfigs [ ] http . SDConfig ` yaml:"http_sd_configs,omitempty" `
KubernetesSDConfigs [ ] kubernetes . SDConfig ` yaml:"kubernetes_sd_configs,omitempty" `
2023-02-22 12:59:56 +00:00
KumaSDConfigs [ ] kuma . SDConfig ` yaml:"kuma_sd_configs,omitempty" `
2023-01-05 22:03:58 +00:00
NomadSDConfigs [ ] nomad . SDConfig ` yaml:"nomad_sd_configs,omitempty" `
2021-06-25 10:20:18 +00:00
OpenStackSDConfigs [ ] openstack . SDConfig ` yaml:"openstack_sd_configs,omitempty" `
2024-09-30 12:42:46 +00:00
OVHCloudSDConfigs [ ] ovhcloud . SDConfig ` yaml:"ovhcloud_sd_configs,omitempty" `
2021-06-25 10:20:18 +00:00
StaticConfigs [ ] StaticConfig ` yaml:"static_configs,omitempty" `
2024-07-05 15:30:29 +00:00
VultrSDConfigs [ ] vultr . SDConfig ` yaml:"vultr_configs,omitempty" `
2022-08-04 17:44:16 +00:00
YandexCloudSDConfigs [ ] yandexcloud . SDConfig ` yaml:"yandexcloud_sd_configs,omitempty" `
2021-04-02 18:17:43 +00:00
2020-07-02 11:19:11 +00:00
// These options are supported only by lib/promscrape.
2021-04-03 21:40:08 +00:00
DisableCompression bool ` yaml:"disable_compression,omitempty" `
DisableKeepAlive bool ` yaml:"disable_keepalive,omitempty" `
StreamParse bool ` yaml:"stream_parse,omitempty" `
2022-04-16 11:25:54 +00:00
ScrapeAlignInterval * promutils . Duration ` yaml:"scrape_align_interval,omitempty" `
ScrapeOffset * promutils . Duration ` yaml:"scrape_offset,omitempty" `
2024-01-23 11:09:14 +00:00
SeriesLimit * int ` yaml:"series_limit,omitempty" `
2022-10-07 20:36:11 +00:00
NoStaleMarkers * bool ` yaml:"no_stale_markers,omitempty" `
2021-04-03 21:40:08 +00:00
ProxyClientConfig promauth . ProxyClientConfig ` yaml:",inline" `
2020-07-02 11:19:11 +00:00
2020-02-23 11:35:47 +00:00
// This is set in loadConfig
swc * scrapeWorkConfig
}
2021-04-05 19:02:09 +00:00
func ( sc * ScrapeConfig ) mustStart ( baseDir string ) {
2024-07-09 22:14:15 +00:00
swosFunc := func ( metaLabels * promutils . Labels ) any {
2022-11-30 05:22:12 +00:00
target := metaLabels . Get ( "__address__" )
2021-04-08 06:31:05 +00:00
sw , err := sc . swc . getScrapeWork ( target , nil , metaLabels )
if err != nil {
2023-10-25 21:02:30 +00:00
logger . Errorf ( "cannot create kubernetes_sd_config target %q for job_name=%s: %s" , target , sc . swc . jobName , err )
2021-04-08 06:31:05 +00:00
return nil
2021-04-05 19:02:09 +00:00
}
2021-04-08 06:31:05 +00:00
return sw
}
for i := range sc . KubernetesSDConfigs {
2021-04-05 19:02:09 +00:00
sc . KubernetesSDConfigs [ i ] . MustStart ( baseDir , swosFunc )
}
}
2021-03-01 12:13:56 +00:00
func ( sc * ScrapeConfig ) mustStop ( ) {
2022-07-13 20:43:18 +00:00
for i := range sc . AzureSDConfigs {
sc . AzureSDConfigs [ i ] . MustStop ( )
}
2021-03-01 12:13:56 +00:00
for i := range sc . ConsulSDConfigs {
sc . ConsulSDConfigs [ i ] . MustStop ( )
}
2023-05-04 09:36:21 +00:00
for i := range sc . ConsulAgentSDConfigs {
sc . ConsulAgentSDConfigs [ i ] . MustStop ( )
}
2021-06-25 09:10:20 +00:00
for i := range sc . DigitaloceanSDConfigs {
sc . DigitaloceanSDConfigs [ i ] . MustStop ( )
}
for i := range sc . DNSSDConfigs {
sc . DNSSDConfigs [ i ] . MustStop ( )
2021-03-01 12:13:56 +00:00
}
2021-06-25 08:42:47 +00:00
for i := range sc . DockerSDConfigs {
sc . DockerSDConfigs [ i ] . MustStop ( )
}
2021-03-01 12:13:56 +00:00
for i := range sc . DockerSwarmSDConfigs {
sc . DockerSwarmSDConfigs [ i ] . MustStop ( )
}
for i := range sc . EC2SDConfigs {
sc . EC2SDConfigs [ i ] . MustStop ( )
}
2021-06-25 09:10:20 +00:00
for i := range sc . EurekaSDConfigs {
sc . EurekaSDConfigs [ i ] . MustStop ( )
}
2021-03-01 12:13:56 +00:00
for i := range sc . GCESDConfigs {
sc . GCESDConfigs [ i ] . MustStop ( )
}
2024-01-20 14:52:41 +00:00
for i := range sc . HetznerSDConfigs {
sc . HetznerSDConfigs [ i ] . MustStop ( )
}
2021-06-25 08:39:18 +00:00
for i := range sc . HTTPSDConfigs {
sc . HTTPSDConfigs [ i ] . MustStop ( )
}
2021-06-25 09:10:20 +00:00
for i := range sc . KubernetesSDConfigs {
sc . KubernetesSDConfigs [ i ] . MustStop ( )
}
2023-02-22 12:59:56 +00:00
for i := range sc . KumaSDConfigs {
sc . KumaSDConfigs [ i ] . MustStop ( )
}
2023-01-05 22:03:58 +00:00
for i := range sc . NomadSDConfigs {
sc . NomadSDConfigs [ i ] . MustStop ( )
}
2021-06-25 09:10:20 +00:00
for i := range sc . OpenStackSDConfigs {
sc . OpenStackSDConfigs [ i ] . MustStop ( )
}
2024-09-30 12:42:46 +00:00
for i := range sc . OVHCloudSDConfigs {
sc . OVHCloudSDConfigs [ i ] . MustStop ( )
}
2024-07-05 15:30:29 +00:00
for i := range sc . VultrSDConfigs {
sc . VultrSDConfigs [ i ] . MustStop ( )
}
for i := range sc . YandexCloudSDConfigs {
sc . YandexCloudSDConfigs [ i ] . MustStop ( )
2024-05-08 08:01:48 +00:00
}
2021-03-01 12:13:56 +00:00
}
2020-02-23 11:35:47 +00:00
// FileSDConfig represents file-based service discovery config.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config
type FileSDConfig struct {
Files [ ] string ` yaml:"files" `
2022-02-15 10:04:26 +00:00
// `refresh_interval` is ignored. See `-promscrape.fileSDCheckInterval`
2020-02-23 11:35:47 +00:00
}
// StaticConfig represents essential parts for `static_config` section of Prometheus config.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config
type StaticConfig struct {
Targets [ ] string ` yaml:"targets" `
2022-11-30 05:22:12 +00:00
Labels * promutils . Labels ` yaml:"labels,omitempty" `
2020-02-23 11:35:47 +00:00
}
func loadStaticConfigs ( path string ) ( [ ] StaticConfig , error ) {
2024-01-21 19:58:26 +00:00
data , err := fscore . ReadFileOrHTTP ( path )
2020-02-23 11:35:47 +00:00
if err != nil {
2020-06-30 19:58:18 +00:00
return nil , fmt . Errorf ( "cannot read `static_configs` from %q: %w" , path , err )
2020-02-23 11:35:47 +00:00
}
2022-10-26 11:49:20 +00:00
data , err = envtemplate . ReplaceBytes ( data )
2022-10-18 07:28:39 +00:00
if err != nil {
return nil , fmt . Errorf ( "cannot expand environment vars in %q: %w" , path , err )
}
2020-02-23 11:35:47 +00:00
var stcs [ ] StaticConfig
2020-03-06 18:18:28 +00:00
if err := yaml . UnmarshalStrict ( data , & stcs ) ; err != nil {
2020-06-30 19:58:18 +00:00
return nil , fmt . Errorf ( "cannot unmarshal `static_configs` from %q: %w" , path , err )
2020-02-23 11:35:47 +00:00
}
return stcs , nil
}
// loadConfig loads Prometheus config from the given path.
2023-10-25 21:19:33 +00:00
func loadConfig ( path string ) ( * Config , error ) {
2024-01-21 19:58:26 +00:00
data , err := fscore . ReadFileOrHTTP ( path )
2020-02-23 11:35:47 +00:00
if err != nil {
2023-10-25 21:19:33 +00:00
return nil , fmt . Errorf ( "cannot read Prometheus config from %q: %w" , path , err )
2020-02-23 11:35:47 +00:00
}
2021-08-26 05:51:14 +00:00
var c Config
2023-10-25 21:19:33 +00:00
if err := c . parseData ( data , path ) ; err != nil {
return nil , fmt . Errorf ( "cannot parse Prometheus config from %q: %w" , path , err )
2021-08-26 05:51:14 +00:00
}
2023-10-25 21:19:33 +00:00
return & c , nil
2021-08-26 05:51:14 +00:00
}
2024-01-16 20:29:05 +00:00
func loadScrapeConfigFiles ( baseDir string , scrapeConfigFiles [ ] string , isStrict bool ) ( [ ] * ScrapeConfig , error ) {
2022-04-16 17:28:46 +00:00
var scrapeConfigs [ ] * ScrapeConfig
2021-08-26 05:51:14 +00:00
for _ , filePath := range scrapeConfigFiles {
2024-01-21 19:58:26 +00:00
filePath := fscore . GetFilepath ( baseDir , filePath )
2021-08-26 05:51:14 +00:00
paths := [ ] string { filePath }
if strings . Contains ( filePath , "*" ) {
ps , err := filepath . Glob ( filePath )
if err != nil {
2023-10-25 21:19:33 +00:00
logger . Errorf ( "skipping pattern %q at `scrape_config_files` because of error: %s" , filePath , err )
continue
2021-08-26 05:51:14 +00:00
}
sort . Strings ( ps )
paths = ps
}
for _ , path := range paths {
2024-01-21 19:58:26 +00:00
data , err := fscore . ReadFileOrHTTP ( path )
2021-08-26 05:51:14 +00:00
if err != nil {
2023-10-25 21:19:33 +00:00
logger . Errorf ( "skipping %q at `scrape_config_files` because of error: %s" , path , err )
continue
2021-08-26 05:51:14 +00:00
}
2022-10-26 11:49:20 +00:00
data , err = envtemplate . ReplaceBytes ( data )
2022-10-18 07:28:39 +00:00
if err != nil {
2023-10-25 21:19:33 +00:00
logger . Errorf ( "skipping %q at `scrape_config_files` because of failure to expand environment vars: %s" , path , err )
continue
2022-10-18 07:28:39 +00:00
}
2022-04-16 17:28:46 +00:00
var scs [ ] * ScrapeConfig
2024-01-16 09:30:02 +00:00
if isStrict {
if err = yaml . UnmarshalStrict ( data , & scs ) ; err != nil {
2024-01-16 20:29:05 +00:00
return nil , fmt . Errorf ( "cannot unmarshal data from `scrape_config_files` %s: %w; " +
"pass -promscrape.config.strictParse=false command-line flag for ignoring invalid scrape_config_files" , path , err )
2024-01-16 09:30:02 +00:00
}
} else {
if err = yaml . Unmarshal ( data , & scs ) ; err != nil {
logger . Errorf ( "skipping %q at `scrape_config_files` because of failure to parse it: %s" , path , err )
continue
}
2021-08-26 05:51:14 +00:00
}
scrapeConfigs = append ( scrapeConfigs , scs ... )
}
2020-02-23 11:35:47 +00:00
}
2024-01-16 09:30:02 +00:00
return scrapeConfigs , nil
2020-02-23 11:35:47 +00:00
}
2020-11-25 20:59:13 +00:00
// IsDryRun returns true if -promscrape.config.dryRun command-line flag is set
func IsDryRun ( ) bool {
return * dryRun
}
2023-10-25 21:19:33 +00:00
func ( cfg * Config ) parseData ( data [ ] byte , path string ) error {
2022-04-16 11:25:54 +00:00
if err := cfg . unmarshal ( data , * strictParse ) ; err != nil {
2023-10-25 21:19:33 +00:00
cfg . ScrapeConfigs = nil
return fmt . Errorf ( "cannot unmarshal data: %w" , err )
2020-02-23 11:35:47 +00:00
}
absPath , err := filepath . Abs ( path )
if err != nil {
2023-10-25 21:19:33 +00:00
cfg . ScrapeConfigs = nil
return fmt . Errorf ( "cannot obtain abs path for %q: %w" , path , err )
2020-02-23 11:35:47 +00:00
}
cfg . baseDir = filepath . Dir ( absPath )
2021-08-26 05:51:14 +00:00
// Load cfg.ScrapeConfigFiles into c.ScrapeConfigs
2024-01-16 20:29:05 +00:00
scs , err := loadScrapeConfigFiles ( cfg . baseDir , cfg . ScrapeConfigFiles , * strictParse )
2024-01-16 09:30:02 +00:00
if err != nil {
return err
}
2021-08-26 05:51:14 +00:00
cfg . ScrapeConfigFiles = nil
cfg . ScrapeConfigs = append ( cfg . ScrapeConfigs , scs ... )
// Check that all the scrape configs have unique JobName
m := make ( map [ string ] struct { } , len ( cfg . ScrapeConfigs ) )
2022-04-16 17:28:46 +00:00
for _ , sc := range cfg . ScrapeConfigs {
jobName := sc . JobName
2021-08-26 05:51:14 +00:00
if _ , ok := m [ jobName ] ; ok {
2023-10-25 21:19:33 +00:00
cfg . ScrapeConfigs = nil
return fmt . Errorf ( "duplicate `job_name` in `scrape_configs` loaded from %q: %q" , path , jobName )
2021-08-26 05:51:14 +00:00
}
m [ jobName ] = struct { } { }
}
// Initialize cfg.ScrapeConfigs
2023-10-25 21:19:33 +00:00
validScrapeConfigs := cfg . ScrapeConfigs [ : 0 ]
for _ , sc := range cfg . ScrapeConfigs {
2022-04-22 10:19:20 +00:00
// Make a copy of sc in order to remove references to `data` memory.
// This should prevent from memory leaks on config reload.
sc = sc . clone ( )
2020-02-23 11:35:47 +00:00
swc , err := getScrapeWorkConfig ( sc , cfg . baseDir , & cfg . Global )
if err != nil {
2023-10-25 21:19:33 +00:00
logger . Errorf ( "skipping `scrape_config` for job_name=%s because of error: %s" , sc . JobName , err )
2023-10-17 09:58:19 +00:00
continue
2020-02-23 11:35:47 +00:00
}
sc . swc = swc
2023-10-17 09:58:19 +00:00
validScrapeConfigs = append ( validScrapeConfigs , sc )
2020-02-23 11:35:47 +00:00
}
2023-10-25 21:19:33 +00:00
tailScrapeConfigs := cfg . ScrapeConfigs [ len ( validScrapeConfigs ) : ]
2023-10-17 09:58:19 +00:00
cfg . ScrapeConfigs = validScrapeConfigs
2023-10-25 21:19:33 +00:00
for i := range tailScrapeConfigs {
tailScrapeConfigs [ i ] = nil
}
return nil
2020-02-23 11:35:47 +00:00
}
2022-04-22 10:19:20 +00:00
func ( sc * ScrapeConfig ) clone ( ) * ScrapeConfig {
2022-05-06 21:02:54 +00:00
data := sc . marshalJSON ( )
2022-04-22 10:19:20 +00:00
var scCopy ScrapeConfig
2022-05-06 21:02:54 +00:00
if err := scCopy . unmarshalJSON ( data ) ; err != nil {
2022-04-22 10:19:20 +00:00
logger . Panicf ( "BUG: cannot unmarshal scrape config: %s" , err )
}
return & scCopy
}
2020-12-08 15:50:03 +00:00
func getSWSByJob ( sws [ ] * ScrapeWork ) map [ string ] [ ] * ScrapeWork {
m := make ( map [ string ] [ ] * ScrapeWork )
2020-06-23 12:35:19 +00:00
for _ , sw := range sws {
m [ sw . jobNameOriginal ] = append ( m [ sw . jobNameOriginal ] , sw )
}
return m
}
2022-07-13 20:43:18 +00:00
// getAzureSDScrapeWork returns `azure_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getAzureSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . AzureSDConfigs {
visitor ( & sc . AzureSDConfigs [ i ] )
2022-07-13 20:43:18 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "azure_sd_config" , prev )
2022-07-13 20:43:18 +00:00
}
2021-06-25 09:10:20 +00:00
// getConsulSDScrapeWork returns `consul_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getConsulSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . ConsulSDConfigs {
visitor ( & sc . ConsulSDConfigs [ i ] )
2021-06-25 09:10:20 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "consul_sd_config" , prev )
2021-06-25 09:10:20 +00:00
}
2023-05-04 09:36:21 +00:00
// getConsulAgentSDScrapeWork returns `consulagent_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getConsulAgentSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . ConsulAgentSDConfigs {
visitor ( & sc . ConsulAgentSDConfigs [ i ] )
2023-05-04 09:36:21 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "consulagent_sd_config" , prev )
2023-05-04 09:36:21 +00:00
}
2021-06-25 09:10:20 +00:00
// getDigitalOceanDScrapeWork returns `digitalocean_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getDigitalOceanDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . DigitaloceanSDConfigs {
visitor ( & sc . DigitaloceanSDConfigs [ i ] )
2020-06-23 12:35:19 +00:00
}
2020-04-13 18:02:27 +00:00
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "digitalocean_sd_config" , prev )
2020-04-13 18:02:27 +00:00
}
2021-06-25 09:10:20 +00:00
// getDNSSDScrapeWork returns `dns_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getDNSSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . DNSSDConfigs {
visitor ( & sc . DNSSDConfigs [ i ] )
2020-10-05 13:45:33 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "dns_sd_config" , prev )
2020-10-05 13:45:33 +00:00
}
2021-06-25 08:42:47 +00:00
// getDockerSDScrapeWork returns `docker_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getDockerSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . DockerSDConfigs {
visitor ( & sc . DockerSDConfigs [ i ] )
2021-06-25 08:42:47 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "docker_sd_config" , prev )
2021-06-25 08:42:47 +00:00
}
2020-10-12 10:38:21 +00:00
// getDockerSwarmSDScrapeWork returns `dockerswarm_sd_configs` ScrapeWork from cfg.
2020-12-08 15:50:03 +00:00
func ( cfg * Config ) getDockerSwarmSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . DockerSwarmSDConfigs {
visitor ( & sc . DockerSwarmSDConfigs [ i ] )
2020-10-12 10:38:21 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "dockerswarm_sd_config" , prev )
2020-10-12 10:38:21 +00:00
}
2021-06-25 09:10:20 +00:00
// getEC2SDScrapeWork returns `ec2_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getEC2SDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . EC2SDConfigs {
visitor ( & sc . EC2SDConfigs [ i ] )
2020-05-04 17:48:02 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "ec2_sd_config" , prev )
2020-05-04 17:48:02 +00:00
}
2020-11-20 11:38:12 +00:00
// getEurekaSDScrapeWork returns `eureka_sd_configs` ScrapeWork from cfg.
2020-12-08 15:50:03 +00:00
func ( cfg * Config ) getEurekaSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . EurekaSDConfigs {
visitor ( & sc . EurekaSDConfigs [ i ] )
2020-11-20 11:38:12 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "eureka_sd_config" , prev )
2020-11-20 11:38:12 +00:00
}
2021-06-25 09:10:20 +00:00
// getFileSDScrapeWork returns `file_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getFileSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2020-12-08 15:50:03 +00:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 17:28:46 +00:00
for _ , sc := range cfg . ScrapeConfigs {
2021-06-25 09:10:20 +00:00
for j := range sc . FileSDConfigs {
sdc := & sc . FileSDConfigs [ j ]
2023-04-03 04:05:01 +00:00
dst = sdc . appendScrapeWork ( dst , cfg . baseDir , sc . swc )
2020-05-05 21:01:49 +00:00
}
}
return dst
}
2021-06-25 09:10:20 +00:00
// getGCESDScrapeWork returns `gce_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getGCESDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . GCESDConfigs {
visitor ( & sc . GCESDConfigs [ i ] )
2020-04-27 16:25:45 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "gce_sd_config" , prev )
2020-04-27 16:25:45 +00:00
}
2024-01-20 14:52:41 +00:00
// getHetznerSDScrapeWork returns `hetzner_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getHetznerSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . HetznerSDConfigs {
visitor ( & sc . HetznerSDConfigs [ i ] )
}
}
return cfg . getScrapeWorkGeneric ( visitConfigs , "hetzner_sd_config" , prev )
}
2021-06-25 09:10:20 +00:00
// getHTTPDScrapeWork returns `http_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getHTTPDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . HTTPSDConfigs {
visitor ( & sc . HTTPSDConfigs [ i ] )
2020-04-24 14:50:21 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "http_sd_config" , prev )
2020-04-24 14:50:21 +00:00
}
2021-06-25 09:10:20 +00:00
// getKubernetesSDScrapeWork returns `kubernetes_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getKubernetesSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
const discoveryType = "kubernetes_sd_config"
2021-06-14 10:15:04 +00:00
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 17:28:46 +00:00
for _ , sc := range cfg . ScrapeConfigs {
2021-06-14 10:15:04 +00:00
dstLen := len ( dst )
ok := true
2021-06-25 09:10:20 +00:00
for j := range sc . KubernetesSDConfigs {
sdc := & sc . KubernetesSDConfigs [ j ]
swos , err := sdc . GetScrapeWorkObjects ( )
if err != nil {
2023-10-25 21:02:30 +00:00
logger . Errorf ( "skipping %s targets for job_name=%s because of error: %s" , discoveryType , sc . swc . jobName , err )
2021-06-25 09:10:20 +00:00
ok = false
break
}
for _ , swo := range swos {
sw := swo . ( * ScrapeWork )
dst = append ( dst , sw )
2021-06-14 10:15:04 +00:00
}
}
2023-10-25 21:02:30 +00:00
if ! ok {
dst = sc . appendPrevTargets ( dst [ : dstLen ] , swsPrevByJob , discoveryType )
2021-06-14 10:15:04 +00:00
}
}
return dst
}
2023-02-22 12:59:56 +00:00
// getKumaSDScrapeWork returns `kuma_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getKumaSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . KumaSDConfigs {
visitor ( & sc . KumaSDConfigs [ i ] )
2023-02-22 12:59:56 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "kuma_sd_config" , prev )
2023-02-22 12:59:56 +00:00
}
2023-01-05 22:03:58 +00:00
// getNomadSDScrapeWork returns `nomad_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getNomadSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . NomadSDConfigs {
visitor ( & sc . NomadSDConfigs [ i ] )
2023-01-05 22:03:58 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "nomad_sd_config" , prev )
2023-01-05 22:03:58 +00:00
}
2021-06-25 09:10:20 +00:00
// getOpenStackSDScrapeWork returns `openstack_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getOpenStackSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . OpenStackSDConfigs {
visitor ( & sc . OpenStackSDConfigs [ i ] )
2021-06-22 10:33:37 +00:00
}
}
2023-10-25 21:02:30 +00:00
return cfg . getScrapeWorkGeneric ( visitConfigs , "openstack_sd_config" , prev )
2021-06-22 10:33:37 +00:00
}
2024-09-30 12:42:46 +00:00
// getOVHCloudSDScrapeWork returns `ovhcloud_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getOVHCloudSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . OVHCloudSDConfigs {
visitor ( & sc . OVHCloudSDConfigs [ i ] )
}
}
return cfg . getScrapeWorkGeneric ( visitConfigs , "ovhcloud_sd_config" , prev )
}
2024-05-08 08:01:48 +00:00
// getVultrSDScrapeWork returns `vultr_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getVultrSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
2024-07-05 15:30:29 +00:00
for i := range sc . VultrSDConfigs {
visitor ( & sc . VultrSDConfigs [ i ] )
2024-05-08 08:01:48 +00:00
}
}
return cfg . getScrapeWorkGeneric ( visitConfigs , "vultr_sd_config" , prev )
}
2022-08-04 17:44:16 +00:00
// getYandexCloudSDScrapeWork returns `yandexcloud_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getYandexCloudSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2023-10-25 21:02:30 +00:00
visitConfigs := func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) {
for i := range sc . YandexCloudSDConfigs {
visitor ( & sc . YandexCloudSDConfigs [ i ] )
}
}
return cfg . getScrapeWorkGeneric ( visitConfigs , "yandexcloud_sd_config" , prev )
}
type targetLabelsGetter interface {
GetLabels ( baseDir string ) ( [ ] * promutils . Labels , error )
}
func ( cfg * Config ) getScrapeWorkGeneric ( visitConfigs func ( sc * ScrapeConfig , visitor func ( sdc targetLabelsGetter ) ) , discoveryType string , prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2022-08-04 17:44:16 +00:00
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
for _ , sc := range cfg . ScrapeConfigs {
dstLen := len ( dst )
ok := true
2023-10-25 21:02:30 +00:00
visitConfigs ( sc , func ( sdc targetLabelsGetter ) {
if ! ok {
return
2022-08-04 17:44:16 +00:00
}
2023-10-25 21:02:30 +00:00
targetLabels , err := sdc . GetLabels ( cfg . baseDir )
if err != nil {
logger . Errorf ( "skipping %s targets for job_name=%s because of error: %s" , discoveryType , sc . swc . jobName , err )
ok = false
return
}
dst = appendScrapeWorkForTargetLabels ( dst , sc . swc , targetLabels , discoveryType )
} )
if ! ok {
dst = sc . appendPrevTargets ( dst [ : dstLen ] , swsPrevByJob , discoveryType )
2022-08-04 17:44:16 +00:00
}
}
return dst
}
2023-10-25 21:02:30 +00:00
func ( sc * ScrapeConfig ) appendPrevTargets ( dst [ ] * ScrapeWork , swsPrevByJob map [ string ] [ ] * ScrapeWork , discoveryType string ) [ ] * ScrapeWork {
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) == 0 {
return dst
}
logger . Errorf ( "preserving the previous %s targets for job_name=%s because of temporary discovery error" , discoveryType , sc . swc . jobName )
return append ( dst , swsPrev ... )
}
2023-02-22 12:59:56 +00:00
// getStaticScrapeWork returns `static_configs` ScrapeWork from cfg.
2020-12-08 15:50:03 +00:00
func ( cfg * Config ) getStaticScrapeWork ( ) [ ] * ScrapeWork {
var dst [ ] * ScrapeWork
2022-04-16 17:28:46 +00:00
for _ , sc := range cfg . ScrapeConfigs {
2020-04-23 11:38:12 +00:00
for j := range sc . StaticConfigs {
stc := & sc . StaticConfigs [ j ]
2020-04-13 09:59:05 +00:00
dst = stc . appendScrapeWork ( dst , sc . swc , nil )
2020-02-23 11:35:47 +00:00
}
}
2020-04-13 09:59:05 +00:00
return dst
2020-02-23 11:35:47 +00:00
}
func getScrapeWorkConfig ( sc * ScrapeConfig , baseDir string , globalCfg * GlobalConfig ) ( * scrapeWorkConfig , error ) {
jobName := sc . JobName
if jobName == "" {
return nil , fmt . Errorf ( "missing `job_name` field in `scrape_config`" )
}
2022-02-11 14:17:00 +00:00
scrapeInterval := sc . ScrapeInterval . Duration ( )
2020-02-23 11:35:47 +00:00
if scrapeInterval <= 0 {
2022-02-11 14:17:00 +00:00
scrapeInterval = globalCfg . ScrapeInterval . Duration ( )
2020-02-23 11:35:47 +00:00
if scrapeInterval <= 0 {
scrapeInterval = defaultScrapeInterval
}
}
2022-02-11 14:17:00 +00:00
scrapeTimeout := sc . ScrapeTimeout . Duration ( )
2020-02-23 11:35:47 +00:00
if scrapeTimeout <= 0 {
2022-02-11 14:17:00 +00:00
scrapeTimeout = globalCfg . ScrapeTimeout . Duration ( )
2020-02-23 11:35:47 +00:00
if scrapeTimeout <= 0 {
scrapeTimeout = defaultScrapeTimeout
}
}
2021-05-13 13:09:45 +00:00
if scrapeTimeout > scrapeInterval {
// Limit the `scrape_timeout` with `scrape_interval` like Prometheus does.
// This guarantees that the scraper can miss only a single scrape if the target sometimes responds slowly.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1281#issuecomment-840538907
scrapeTimeout = scrapeInterval
}
2024-06-20 11:58:42 +00:00
mss := maxScrapeSize . N
2024-07-16 10:24:14 +00:00
if sc . MaxScrapeSize != "" {
n , err := flagutil . ParseBytes ( sc . MaxScrapeSize )
2024-06-20 11:58:42 +00:00
if err != nil {
2024-07-16 10:24:14 +00:00
return nil , fmt . Errorf ( "cannot parse `max_scrape_size` value %q for `job_name` %q`: %w" , sc . MaxScrapeSize , jobName , err )
}
if n > 0 {
mss = n
2024-06-20 11:58:42 +00:00
}
}
2020-02-23 11:35:47 +00:00
honorLabels := sc . HonorLabels
2023-07-29 04:08:41 +00:00
honorTimestamps := sc . HonorTimestamps
2021-04-02 16:56:38 +00:00
denyRedirects := false
2023-05-26 07:39:45 +00:00
if sc . HTTPClientConfig . FollowRedirects != nil {
denyRedirects = ! * sc . HTTPClientConfig . FollowRedirects
2021-04-02 16:56:38 +00:00
}
2020-02-23 11:35:47 +00:00
metricsPath := sc . MetricsPath
if metricsPath == "" {
metricsPath = "/metrics"
}
2022-08-26 23:22:37 +00:00
scheme := strings . ToLower ( sc . Scheme )
2020-02-23 11:35:47 +00:00
if scheme == "" {
scheme = "http"
}
if scheme != "http" && scheme != "https" {
return nil , fmt . Errorf ( "unexpected `scheme` for `job_name` %q: %q; supported values: http or https" , jobName , scheme )
}
params := sc . Params
2021-04-02 18:17:43 +00:00
ac , err := sc . HTTPClientConfig . NewConfig ( baseDir )
2020-04-13 09:59:05 +00:00
if err != nil {
2020-06-30 19:58:18 +00:00
return nil , fmt . Errorf ( "cannot parse auth config for `job_name` %q: %w" , jobName , err )
2020-02-23 11:35:47 +00:00
}
2021-04-03 21:40:08 +00:00
proxyAC , err := sc . ProxyClientConfig . NewConfig ( baseDir )
2021-03-12 01:35:49 +00:00
if err != nil {
return nil , fmt . Errorf ( "cannot parse proxy auth config for `job_name` %q: %w" , jobName , err )
}
2022-12-10 10:09:21 +00:00
relabelConfigs , err := promrelabel . ParseRelabelConfigs ( sc . RelabelConfigs )
2020-02-23 11:35:47 +00:00
if err != nil {
2020-06-30 19:58:18 +00:00
return nil , fmt . Errorf ( "cannot parse `relabel_configs` for `job_name` %q: %w" , jobName , err )
2020-02-23 11:35:47 +00:00
}
2022-12-10 10:09:21 +00:00
metricRelabelConfigs , err := promrelabel . ParseRelabelConfigs ( sc . MetricRelabelConfigs )
2020-02-23 11:35:47 +00:00
if err != nil {
2020-06-30 19:58:18 +00:00
return nil , fmt . Errorf ( "cannot parse `metric_relabel_configs` for `job_name` %q: %w" , jobName , err )
2020-02-23 11:35:47 +00:00
}
2022-11-30 05:22:12 +00:00
externalLabels := globalCfg . ExternalLabels
2022-10-07 20:36:11 +00:00
noStaleTracking := * noStaleMarkers
if sc . NoStaleMarkers != nil {
noStaleTracking = * sc . NoStaleMarkers
}
2023-01-17 18:14:46 +00:00
seriesLimit := * seriesLimitPerTarget
2024-01-23 11:09:14 +00:00
if sc . SeriesLimit != nil {
seriesLimit = * sc . SeriesLimit
2023-01-17 18:14:46 +00:00
}
2024-02-18 17:40:34 +00:00
disableCompression := sc . DisableCompression
if sc . EnableCompression != nil {
disableCompression = ! * sc . EnableCompression
}
2020-02-23 11:35:47 +00:00
swc := & scrapeWorkConfig {
2020-04-13 09:59:05 +00:00
scrapeInterval : scrapeInterval ,
2022-04-20 12:25:41 +00:00
scrapeIntervalString : scrapeInterval . String ( ) ,
2020-04-13 09:59:05 +00:00
scrapeTimeout : scrapeTimeout ,
2022-04-20 12:25:41 +00:00
scrapeTimeoutString : scrapeTimeout . String ( ) ,
2024-06-20 11:58:42 +00:00
maxScrapeSize : mss ,
2020-04-13 09:59:05 +00:00
jobName : jobName ,
metricsPath : metricsPath ,
scheme : scheme ,
params : params ,
2020-12-24 08:56:10 +00:00
proxyURL : sc . ProxyURL ,
2021-03-12 01:35:49 +00:00
proxyAuthConfig : proxyAC ,
2020-04-13 09:59:05 +00:00
authConfig : ac ,
honorLabels : honorLabels ,
honorTimestamps : honorTimestamps ,
2021-04-02 16:56:38 +00:00
denyRedirects : denyRedirects ,
2022-10-01 13:13:17 +00:00
externalLabels : externalLabels ,
2020-04-13 09:59:05 +00:00
relabelConfigs : relabelConfigs ,
metricRelabelConfigs : metricRelabelConfigs ,
2020-04-14 08:58:15 +00:00
sampleLimit : sc . SampleLimit ,
2024-02-18 17:40:34 +00:00
disableCompression : disableCompression ,
2020-07-02 11:19:11 +00:00
disableKeepAlive : sc . DisableKeepAlive ,
2020-11-01 21:12:13 +00:00
streamParse : sc . StreamParse ,
2022-02-11 14:17:00 +00:00
scrapeAlignInterval : sc . ScrapeAlignInterval . Duration ( ) ,
scrapeOffset : sc . ScrapeOffset . Duration ( ) ,
2023-01-17 18:14:46 +00:00
seriesLimit : seriesLimit ,
2022-10-07 20:36:11 +00:00
noStaleMarkers : noStaleTracking ,
2020-02-23 11:35:47 +00:00
}
return swc , nil
}
type scrapeWorkConfig struct {
2020-04-13 09:59:05 +00:00
scrapeInterval time . Duration
2022-04-20 12:25:41 +00:00
scrapeIntervalString string
2020-04-13 09:59:05 +00:00
scrapeTimeout time . Duration
2022-04-20 12:25:41 +00:00
scrapeTimeoutString string
2024-06-20 11:58:42 +00:00
maxScrapeSize int64
2020-04-13 09:59:05 +00:00
jobName string
metricsPath string
scheme string
params map [ string ] [ ] string
2021-10-26 18:21:08 +00:00
proxyURL * proxy . URL
2021-03-12 01:35:49 +00:00
proxyAuthConfig * promauth . Config
2020-04-13 09:59:05 +00:00
authConfig * promauth . Config
honorLabels bool
honorTimestamps bool
2021-04-02 16:56:38 +00:00
denyRedirects bool
2022-11-30 05:22:12 +00:00
externalLabels * promutils . Labels
2021-02-22 14:33:55 +00:00
relabelConfigs * promrelabel . ParsedConfigs
metricRelabelConfigs * promrelabel . ParsedConfigs
2020-04-14 08:58:15 +00:00
sampleLimit int
2020-07-02 11:19:11 +00:00
disableCompression bool
disableKeepAlive bool
2020-11-01 21:12:13 +00:00
streamParse bool
2021-02-18 21:51:29 +00:00
scrapeAlignInterval time . Duration
2021-03-08 09:58:25 +00:00
scrapeOffset time . Duration
2021-09-01 11:14:37 +00:00
seriesLimit int
2022-10-07 20:36:11 +00:00
noStaleMarkers bool
2020-02-23 11:35:47 +00:00
}
2022-11-30 05:22:12 +00:00
func appendScrapeWorkForTargetLabels ( dst [ ] * ScrapeWork , swc * scrapeWorkConfig , targetLabels [ ] * promutils . Labels , discoveryType string ) [ ] * ScrapeWork {
2021-02-21 21:21:17 +00:00
startTime := time . Now ( )
2021-02-26 10:46:28 +00:00
// Process targetLabels in parallel in order to reduce processing time for big number of targetLabels.
type result struct {
sw * ScrapeWork
err error
}
goroutines := cgroup . AvailableCPUs ( )
2021-02-28 14:05:13 +00:00
resultCh := make ( chan result , len ( targetLabels ) )
2022-11-30 05:22:12 +00:00
workCh := make ( chan * promutils . Labels , goroutines )
2021-02-26 10:46:28 +00:00
for i := 0 ; i < goroutines ; i ++ {
go func ( ) {
for metaLabels := range workCh {
2022-11-30 05:22:12 +00:00
target := metaLabels . Get ( "__address__" )
2021-02-26 19:41:54 +00:00
sw , err := swc . getScrapeWork ( target , nil , metaLabels )
2021-02-26 10:46:28 +00:00
if err != nil {
2023-10-25 21:02:30 +00:00
err = fmt . Errorf ( "skipping %s target %q for job_name%s because of error: %w" , discoveryType , target , swc . jobName , err )
2021-02-26 10:46:28 +00:00
}
resultCh <- result {
sw : sw ,
err : err ,
}
}
} ( )
}
2020-04-13 18:02:27 +00:00
for _ , metaLabels := range targetLabels {
2021-02-26 10:46:28 +00:00
workCh <- metaLabels
}
close ( workCh )
for range targetLabels {
r := <- resultCh
if r . err != nil {
logger . Errorf ( "%s" , r . err )
2020-04-13 18:02:27 +00:00
continue
}
2021-02-26 10:46:28 +00:00
if r . sw != nil {
dst = append ( dst , r . sw )
}
2020-04-13 18:02:27 +00:00
}
2021-02-26 13:53:42 +00:00
metrics . GetOrCreateHistogram ( fmt . Sprintf ( "vm_promscrape_target_relabel_duration_seconds{type=%q}" , discoveryType ) ) . UpdateDuration ( startTime )
2020-04-13 18:02:27 +00:00
return dst
}
2023-04-03 04:05:01 +00:00
func ( sdc * FileSDConfig ) appendScrapeWork ( dst [ ] * ScrapeWork , baseDir string , swc * scrapeWorkConfig ) [ ] * ScrapeWork {
2022-11-30 05:22:12 +00:00
metaLabels := promutils . GetLabels ( )
defer promutils . PutLabels ( metaLabels )
2020-02-23 11:35:47 +00:00
for _ , file := range sdc . Files {
2024-01-21 19:58:26 +00:00
pathPattern := fscore . GetFilepath ( baseDir , file )
2020-02-23 11:35:47 +00:00
paths := [ ] string { pathPattern }
if strings . Contains ( pathPattern , "*" ) {
var err error
paths , err = filepath . Glob ( pathPattern )
if err != nil {
2020-04-13 09:59:05 +00:00
// Do not return this error, since other files may contain valid scrape configs.
2023-10-25 21:02:30 +00:00
logger . Errorf ( "skipping entry %q in `file_sd_config->files` for job_name=%s because of error: %s" , file , swc . jobName , err )
2020-04-13 09:59:05 +00:00
continue
2020-02-23 11:35:47 +00:00
}
}
for _ , path := range paths {
stcs , err := loadStaticConfigs ( path )
if err != nil {
// Do not return this error, since other paths may contain valid scrape configs.
2023-10-25 21:02:30 +00:00
logger . Errorf ( "skipping file %s for job_name=%s at `file_sd_configs` because of error: %s" , path , swc . jobName , err )
2020-02-23 11:35:47 +00:00
continue
}
pathShort := path
if strings . HasPrefix ( pathShort , baseDir ) {
pathShort = path [ len ( baseDir ) : ]
if len ( pathShort ) > 0 && pathShort [ 0 ] == filepath . Separator {
pathShort = pathShort [ 1 : ]
}
}
2022-11-30 05:22:12 +00:00
metaLabels . Reset ( )
metaLabels . Add ( "__meta_filepath" , pathShort )
2020-02-23 11:35:47 +00:00
for i := range stcs {
2020-04-13 09:59:05 +00:00
dst = stcs [ i ] . appendScrapeWork ( dst , swc , metaLabels )
2020-02-23 11:35:47 +00:00
}
}
}
2020-04-13 09:59:05 +00:00
return dst
2020-02-23 11:35:47 +00:00
}
2022-11-30 05:22:12 +00:00
func ( stc * StaticConfig ) appendScrapeWork ( dst [ ] * ScrapeWork , swc * scrapeWorkConfig , metaLabels * promutils . Labels ) [ ] * ScrapeWork {
2020-02-23 11:35:47 +00:00
for _ , target := range stc . Targets {
if target == "" {
2020-04-13 09:59:05 +00:00
// Do not return this error, since other targets may be valid
2023-10-25 21:02:30 +00:00
logger . Errorf ( "skipping empty `static_configs` target for job_name=%s" , swc . jobName )
2020-02-23 11:35:47 +00:00
continue
}
2021-02-26 19:41:54 +00:00
sw , err := swc . getScrapeWork ( target , stc . Labels , metaLabels )
2020-04-13 09:59:05 +00:00
if err != nil {
// Do not return this error, since other targets may be valid
2023-10-25 21:02:30 +00:00
logger . Errorf ( "skipping `static_configs` target %q for job_name=%s because of error: %s" , target , swc . jobName , err )
2020-02-23 11:35:47 +00:00
continue
}
2021-02-26 10:46:28 +00:00
if sw != nil {
dst = append ( dst , sw )
}
2020-02-23 11:35:47 +00:00
}
2020-04-13 09:59:05 +00:00
return dst
}
2022-11-30 05:22:12 +00:00
func appendScrapeWorkKey ( dst [ ] byte , labels * promutils . Labels ) [ ] byte {
for _ , label := range labels . GetLabels ( ) {
2021-10-12 14:03:09 +00:00
// Do not use strconv.AppendQuote, since it is slow according to CPU profile.
dst = append ( dst , label . Name ... )
dst = append ( dst , '=' )
dst = append ( dst , label . Value ... )
dst = append ( dst , ',' )
}
2021-02-28 20:29:34 +00:00
return dst
2021-02-26 19:41:54 +00:00
}
2023-12-06 22:05:29 +00:00
func getClusterMemberNumsForScrapeWork ( key string , membersCount , replicasCount int ) [ ] int {
2021-03-04 08:20:15 +00:00
if membersCount <= 1 {
2023-12-06 22:05:29 +00:00
return [ ] int { 0 }
2021-02-28 16:39:57 +00:00
}
2021-03-05 07:05:52 +00:00
h := xxhash . Sum64 ( bytesutil . ToUnsafeBytes ( key ) )
idx := int ( h % uint64 ( membersCount ) )
2021-03-04 08:20:15 +00:00
if replicasCount < 1 {
replicasCount = 1
}
2023-12-06 22:05:29 +00:00
memberNums := make ( [ ] int , replicasCount )
2021-03-04 08:20:15 +00:00
for i := 0 ; i < replicasCount ; i ++ {
2023-12-06 22:05:29 +00:00
memberNums [ i ] = idx
2021-03-04 08:20:15 +00:00
idx ++
2021-05-13 08:14:51 +00:00
if idx >= membersCount {
2021-03-04 08:20:15 +00:00
idx = 0
}
}
2023-12-06 22:05:29 +00:00
return memberNums
2021-02-28 16:39:57 +00:00
}
2021-03-02 14:42:48 +00:00
var scrapeWorkKeyBufPool bytesutil . ByteBufferPool
2022-11-30 05:22:12 +00:00
func ( swc * scrapeWorkConfig ) getScrapeWork ( target string , extraLabels , metaLabels * promutils . Labels ) ( * ScrapeWork , error ) {
labels := promutils . GetLabels ( )
defer promutils . PutLabels ( labels )
2022-10-09 11:51:14 +00:00
2022-11-30 05:22:12 +00:00
mergeLabels ( labels , swc , target , extraLabels , metaLabels )
2024-02-14 01:24:42 +00:00
var originalLabels * promutils . Labels
if ! * dropOriginalLabels {
originalLabels = labels . Clone ( )
}
2022-11-30 05:22:12 +00:00
labels . Labels = swc . relabelConfigs . Apply ( labels . Labels , 0 )
2022-10-07 19:39:28 +00:00
// Remove labels starting from "__meta_" prefix according to https://www.robustperception.io/life-of-a-label/
2022-11-30 05:22:12 +00:00
labels . RemoveMetaLabels ( )
2020-11-07 14:16:56 +00:00
2023-12-01 14:37:57 +00:00
if labels . Len ( ) == 0 {
// Drop target without labels.
originalLabels = sortOriginalLabelsIfNeeded ( originalLabels )
2023-12-06 22:05:29 +00:00
droppedTargetsMap . Register ( originalLabels , swc . relabelConfigs , targetDropReasonRelabeling , nil )
2023-12-01 14:37:57 +00:00
return nil , nil
}
2021-10-12 14:03:09 +00:00
// Verify whether the scrape work must be skipped because of `-promscrape.cluster.*` configs.
// Perform the verification on labels after the relabeling in order to guarantee that targets with the same set of labels
// go to the same vmagent shard.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1687#issuecomment-940629495
if * clusterMembersCount > 1 {
bb := scrapeWorkKeyBufPool . Get ( )
bb . B = appendScrapeWorkKey ( bb . B [ : 0 ] , labels )
2023-12-06 22:05:29 +00:00
memberNums := getClusterMemberNumsForScrapeWork ( bytesutil . ToUnsafeString ( bb . B ) , * clusterMembersCount , * clusterReplicationFactor )
2021-10-12 14:03:09 +00:00
scrapeWorkKeyBufPool . Put ( bb )
2023-12-06 22:05:29 +00:00
if ! slices . Contains ( memberNums , clusterMemberID ) {
2023-12-01 14:37:57 +00:00
originalLabels = sortOriginalLabelsIfNeeded ( originalLabels )
2023-12-06 22:05:29 +00:00
droppedTargetsMap . Register ( originalLabels , swc . relabelConfigs , targetDropReasonSharding , memberNums )
2021-10-12 14:03:09 +00:00
return nil , nil
}
}
2023-03-21 05:07:52 +00:00
scrapeURL , address := promrelabel . GetScrapeURL ( labels , swc . params )
if scrapeURL == "" {
// Drop target without URL.
2023-12-01 14:37:57 +00:00
originalLabels = sortOriginalLabelsIfNeeded ( originalLabels )
2023-12-06 22:05:29 +00:00
droppedTargetsMap . Register ( originalLabels , swc . relabelConfigs , targetDropReasonMissingScrapeURL , nil )
2021-02-26 10:46:28 +00:00
return nil , nil
2020-04-13 09:59:05 +00:00
}
2023-03-21 05:07:52 +00:00
if _ , err := url . Parse ( scrapeURL ) ; err != nil {
return nil , fmt . Errorf ( "invalid target url=%q for job=%q: %w" , scrapeURL , swc . jobName , err )
2020-04-13 09:59:05 +00:00
}
2022-08-08 11:10:18 +00:00
var at * auth . Token
2022-11-30 05:22:12 +00:00
tenantID := labels . Get ( "__tenant_id__" )
2022-10-07 19:39:28 +00:00
if len ( tenantID ) > 0 {
2022-08-08 11:46:24 +00:00
newToken , err := auth . NewToken ( tenantID )
2022-08-08 11:10:18 +00:00
if err != nil {
2022-10-07 19:39:28 +00:00
return nil , fmt . Errorf ( "cannot parse __tenant_id__=%q for job=%q: %w" , tenantID , swc . jobName , err )
2022-08-08 11:10:18 +00:00
}
at = newToken
}
2021-09-12 10:33:39 +00:00
// Read __scrape_interval__ and __scrape_timeout__ from labels.
scrapeInterval := swc . scrapeInterval
2022-11-30 05:22:12 +00:00
if s := labels . Get ( "__scrape_interval__" ) ; len ( s ) > 0 {
2022-02-11 14:17:00 +00:00
d , err := promutils . ParseDuration ( s )
2021-09-12 10:33:39 +00:00
if err != nil {
return nil , fmt . Errorf ( "cannot parse __scrape_interval__=%q: %w" , s , err )
}
scrapeInterval = d
}
scrapeTimeout := swc . scrapeTimeout
2022-11-30 05:22:12 +00:00
if s := labels . Get ( "__scrape_timeout__" ) ; len ( s ) > 0 {
2022-02-11 14:17:00 +00:00
d , err := promutils . ParseDuration ( s )
2021-09-12 10:33:39 +00:00
if err != nil {
return nil , fmt . Errorf ( "cannot parse __scrape_timeout__=%q: %w" , s , err )
}
scrapeTimeout = d
}
2021-09-09 15:49:37 +00:00
// Read series_limit option from __series_limit__ label.
2024-04-17 23:31:37 +00:00
// See https://docs.victoriametrics.com/vmagent/#cardinality-limiter
2021-09-09 15:49:37 +00:00
seriesLimit := swc . seriesLimit
2022-11-30 05:22:12 +00:00
if s := labels . Get ( "__series_limit__" ) ; len ( s ) > 0 {
2021-09-09 15:49:37 +00:00
n , err := strconv . Atoi ( s )
if err != nil {
return nil , fmt . Errorf ( "cannot parse __series_limit__=%q: %w" , s , err )
}
seriesLimit = n
}
2024-08-07 07:36:14 +00:00
// Read sample_limit option from __sample_limit__ label.
// See https://docs.victoriametrics.com/vmagent/#automatically-generated-metrics
sampleLimit := swc . sampleLimit
if s := labels . Get ( "__sample_limit__" ) ; len ( s ) > 0 {
n , err := strconv . Atoi ( s )
if err != nil {
return nil , fmt . Errorf ( "cannot parse __sample_limit__=%q: %w" , s , err )
}
sampleLimit = n
}
2021-09-09 15:49:37 +00:00
// Read stream_parse option from __stream_parse__ label.
2024-04-17 23:31:37 +00:00
// See https://docs.victoriametrics.com/vmagent/#stream-parsing-mode
2021-09-09 15:49:37 +00:00
streamParse := swc . streamParse
2022-11-30 05:22:12 +00:00
if s := labels . Get ( "__stream_parse__" ) ; len ( s ) > 0 {
2021-09-09 15:49:37 +00:00
b , err := strconv . ParseBool ( s )
if err != nil {
return nil , fmt . Errorf ( "cannot parse __stream_parse__=%q: %w" , s , err )
}
streamParse = b
}
2022-10-07 19:39:28 +00:00
// Remove labels with "__" prefix according to https://www.robustperception.io/life-of-a-label/
2022-11-30 05:22:12 +00:00
labels . RemoveLabelsWithDoubleUnderscorePrefix ( )
2022-10-09 11:51:14 +00:00
// Add missing "instance" label according to https://www.robustperception.io/life-of-a-label
2022-11-30 05:22:12 +00:00
if labels . Get ( "instance" ) == "" {
labels . Add ( "instance" , address )
2022-10-09 11:51:14 +00:00
}
2023-08-24 20:00:14 +00:00
if * clusterMemberLabel != "" && * clusterMemberNum != "" {
labels . Add ( * clusterMemberLabel , * clusterMemberNum )
}
2022-11-30 05:22:12 +00:00
// Remove references to deleted labels, so GC could clean strings for label name and label value past len(labels.Labels).
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
labelsCopy := labels . Clone ( )
// Sort labels in alphabetical order of their names.
labelsCopy . Sort ( )
2020-12-08 10:22:57 +00:00
// Reduce memory usage by interning all the strings in labels.
2022-11-30 05:22:12 +00:00
labelsCopy . InternStrings ( )
2022-10-07 19:39:28 +00:00
2023-12-01 14:37:57 +00:00
originalLabels = sortOriginalLabelsIfNeeded ( originalLabels )
2021-02-26 10:46:28 +00:00
sw := & ScrapeWork {
2020-04-13 09:59:05 +00:00
ScrapeURL : scrapeURL ,
2021-09-12 10:33:39 +00:00
ScrapeInterval : scrapeInterval ,
ScrapeTimeout : scrapeTimeout ,
2024-06-20 11:58:42 +00:00
MaxScrapeSize : swc . maxScrapeSize ,
2020-04-13 09:59:05 +00:00
HonorLabels : swc . honorLabels ,
HonorTimestamps : swc . honorTimestamps ,
2021-04-02 16:56:38 +00:00
DenyRedirects : swc . denyRedirects ,
2020-10-08 15:50:22 +00:00
OriginalLabels : originalLabels ,
2022-11-30 05:22:12 +00:00
Labels : labelsCopy ,
2022-10-01 13:13:17 +00:00
ExternalLabels : swc . externalLabels ,
2020-12-24 08:52:37 +00:00
ProxyURL : swc . proxyURL ,
2021-03-12 01:35:49 +00:00
ProxyAuthConfig : swc . proxyAuthConfig ,
2020-04-13 09:59:05 +00:00
AuthConfig : swc . authConfig ,
2022-12-10 10:09:21 +00:00
RelabelConfigs : swc . relabelConfigs ,
2020-04-13 09:59:05 +00:00
MetricRelabelConfigs : swc . metricRelabelConfigs ,
2024-08-07 07:36:14 +00:00
SampleLimit : sampleLimit ,
2020-07-02 11:19:11 +00:00
DisableCompression : swc . disableCompression ,
DisableKeepAlive : swc . disableKeepAlive ,
2021-09-09 15:49:37 +00:00
StreamParse : streamParse ,
2021-02-18 21:51:29 +00:00
ScrapeAlignInterval : swc . scrapeAlignInterval ,
2021-03-08 09:58:25 +00:00
ScrapeOffset : swc . scrapeOffset ,
2021-09-09 15:49:37 +00:00
SeriesLimit : seriesLimit ,
2022-10-07 20:36:11 +00:00
NoStaleMarkers : swc . noStaleMarkers ,
2022-08-08 11:10:18 +00:00
AuthToken : at ,
2020-06-23 12:35:19 +00:00
jobNameOriginal : swc . jobName ,
2021-02-26 10:46:28 +00:00
}
return sw , nil
2020-02-23 11:35:47 +00:00
}
2023-12-01 14:37:57 +00:00
func sortOriginalLabelsIfNeeded ( originalLabels * promutils . Labels ) * promutils . Labels {
2024-02-14 01:24:42 +00:00
if originalLabels == nil {
2023-12-01 14:37:57 +00:00
return nil
}
originalLabels . Sort ( )
// Reduce memory usage by interning all the strings in originalLabels.
originalLabels . InternStrings ( )
return originalLabels
}
2022-11-30 05:22:12 +00:00
func mergeLabels ( dst * promutils . Labels , swc * scrapeWorkConfig , target string , extraLabels , metaLabels * promutils . Labels ) {
if n := dst . Len ( ) ; n > 0 {
logger . Panicf ( "BUG: len(dst.Labels) must be 0; got %d" , n )
2022-04-20 12:25:41 +00:00
}
2020-02-23 11:35:47 +00:00
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
2022-11-30 05:22:12 +00:00
dst . Add ( "job" , swc . jobName )
dst . Add ( "__address__" , target )
dst . Add ( "__scheme__" , swc . scheme )
dst . Add ( "__metrics_path__" , swc . metricsPath )
dst . Add ( "__scrape_interval__" , swc . scrapeIntervalString )
dst . Add ( "__scrape_timeout__" , swc . scrapeTimeoutString )
2021-09-12 10:33:39 +00:00
for k , args := range swc . params {
2020-02-23 11:35:47 +00:00
if len ( args ) == 0 {
continue
}
k = "__param_" + k
v := args [ 0 ]
2022-11-30 05:22:12 +00:00
dst . Add ( k , v )
2020-02-23 11:35:47 +00:00
}
2022-11-30 05:22:12 +00:00
dst . AddFrom ( extraLabels )
dst . AddFrom ( metaLabels )
dst . RemoveDuplicates ( )
2020-02-23 11:35:47 +00:00
}
const (
defaultScrapeInterval = time . Minute
defaultScrapeTimeout = 10 * time . Second
)