lib/promscrape: store ScrapeWork items by pointer in the slice returned from get*ScrapeWork()

This should prevent from possible 'memory leaks' when a pointer to ScrapeWork item stored in the slice
could prevent from releasing memory occupied by all the ScrapeWork items stored in the slice when they
are no longer used.

See the related commit e205975716 and the related issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825
This commit is contained in:
Aliaksandr Valialkin 2020-12-08 17:50:03 +02:00
parent 1906f841c9
commit 364f30a6e7
3 changed files with 76 additions and 79 deletions

View file

@ -174,8 +174,8 @@ func unmarshalMaybeStrict(data []byte, dst interface{}) error {
return err
}
func getSWSByJob(sws []ScrapeWork) map[string][]ScrapeWork {
m := make(map[string][]ScrapeWork)
func getSWSByJob(sws []*ScrapeWork) map[string][]*ScrapeWork {
m := make(map[string][]*ScrapeWork)
for _, sw := range sws {
m[sw.jobNameOriginal] = append(m[sw.jobNameOriginal], sw)
}
@ -183,9 +183,9 @@ func getSWSByJob(sws []ScrapeWork) map[string][]ScrapeWork {
}
// getKubernetesSDScrapeWork returns `kubernetes_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getKubernetesSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getKubernetesSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
swsPrevByJob := getSWSByJob(prev)
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
dstLen := len(dst)
@ -211,9 +211,9 @@ func (cfg *Config) getKubernetesSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getOpenStackSDScrapeWork returns `openstack_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getOpenStackSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getOpenStackSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
swsPrevByJob := getSWSByJob(prev)
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
dstLen := len(dst)
@ -239,9 +239,9 @@ func (cfg *Config) getOpenStackSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getDockerSwarmSDScrapeWork returns `dockerswarm_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getDockerSwarmSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getDockerSwarmSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
swsPrevByJob := getSWSByJob(prev)
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
dstLen := len(dst)
@ -267,9 +267,9 @@ func (cfg *Config) getDockerSwarmSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getConsulSDScrapeWork returns `consul_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getConsulSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getConsulSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
swsPrevByJob := getSWSByJob(prev)
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
dstLen := len(dst)
@ -295,9 +295,9 @@ func (cfg *Config) getConsulSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getEurekaSDScrapeWork returns `eureka_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getEurekaSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getEurekaSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
swsPrevByJob := getSWSByJob(prev)
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
dstLen := len(dst)
@ -323,9 +323,9 @@ func (cfg *Config) getEurekaSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getDNSSDScrapeWork returns `dns_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getDNSSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getDNSSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
swsPrevByJob := getSWSByJob(prev)
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
dstLen := len(dst)
@ -351,9 +351,9 @@ func (cfg *Config) getDNSSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getEC2SDScrapeWork returns `ec2_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getEC2SDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getEC2SDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
swsPrevByJob := getSWSByJob(prev)
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
dstLen := len(dst)
@ -379,9 +379,9 @@ func (cfg *Config) getEC2SDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getGCESDScrapeWork returns `gce_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getGCESDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getGCESDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
swsPrevByJob := getSWSByJob(prev)
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
dstLen := len(dst)
@ -407,19 +407,18 @@ func (cfg *Config) getGCESDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getFileSDScrapeWork returns `file_sd_configs` ScrapeWork from cfg.
func (cfg *Config) getFileSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
func (cfg *Config) getFileSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
// Create a map for the previous scrape work.
swsMapPrev := make(map[string][]ScrapeWork)
for i := range prev {
sw := &prev[i]
swsMapPrev := make(map[string][]*ScrapeWork)
for _, sw := range prev {
filepath := promrelabel.GetLabelValueByName(sw.Labels, "__vm_filepath")
if len(filepath) == 0 {
logger.Panicf("BUG: missing `__vm_filepath` label")
} else {
swsMapPrev[filepath] = append(swsMapPrev[filepath], *sw)
swsMapPrev[filepath] = append(swsMapPrev[filepath], sw)
}
}
dst := make([]ScrapeWork, 0, len(prev))
dst := make([]*ScrapeWork, 0, len(prev))
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
for j := range sc.FileSDConfigs {
@ -431,8 +430,8 @@ func (cfg *Config) getFileSDScrapeWork(prev []ScrapeWork) []ScrapeWork {
}
// getStaticScrapeWork returns `static_configs` ScrapeWork from from cfg.
func (cfg *Config) getStaticScrapeWork() []ScrapeWork {
var dst []ScrapeWork
func (cfg *Config) getStaticScrapeWork() []*ScrapeWork {
var dst []*ScrapeWork
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
for j := range sc.StaticConfigs {
@ -530,7 +529,7 @@ type scrapeWorkConfig struct {
streamParse bool
}
func appendKubernetesScrapeWork(dst []ScrapeWork, sdc *kubernetes.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
func appendKubernetesScrapeWork(dst []*ScrapeWork, sdc *kubernetes.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]*ScrapeWork, bool) {
targetLabels, err := kubernetes.GetLabels(sdc, baseDir)
if err != nil {
logger.Errorf("error when discovering kubernetes targets for `job_name` %q: %s; skipping it", swc.jobName, err)
@ -539,7 +538,7 @@ func appendKubernetesScrapeWork(dst []ScrapeWork, sdc *kubernetes.SDConfig, base
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, "kubernetes_sd_config"), true
}
func appendOpenstackScrapeWork(dst []ScrapeWork, sdc *openstack.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
func appendOpenstackScrapeWork(dst []*ScrapeWork, sdc *openstack.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]*ScrapeWork, bool) {
targetLabels, err := openstack.GetLabels(sdc, baseDir)
if err != nil {
logger.Errorf("error when discovering openstack targets for `job_name` %q: %s; skipping it", swc.jobName, err)
@ -548,7 +547,7 @@ func appendOpenstackScrapeWork(dst []ScrapeWork, sdc *openstack.SDConfig, baseDi
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, "openstack_sd_config"), true
}
func appendDockerSwarmScrapeWork(dst []ScrapeWork, sdc *dockerswarm.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
func appendDockerSwarmScrapeWork(dst []*ScrapeWork, sdc *dockerswarm.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]*ScrapeWork, bool) {
targetLabels, err := dockerswarm.GetLabels(sdc, baseDir)
if err != nil {
logger.Errorf("error when discovering dockerswarm targets for `job_name` %q: %s; skipping it", swc.jobName, err)
@ -557,7 +556,7 @@ func appendDockerSwarmScrapeWork(dst []ScrapeWork, sdc *dockerswarm.SDConfig, ba
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, "dockerswarm_sd_config"), true
}
func appendConsulScrapeWork(dst []ScrapeWork, sdc *consul.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
func appendConsulScrapeWork(dst []*ScrapeWork, sdc *consul.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]*ScrapeWork, bool) {
targetLabels, err := consul.GetLabels(sdc, baseDir)
if err != nil {
logger.Errorf("error when discovering consul targets for `job_name` %q: %s; skipping it", swc.jobName, err)
@ -566,7 +565,7 @@ func appendConsulScrapeWork(dst []ScrapeWork, sdc *consul.SDConfig, baseDir stri
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, "consul_sd_config"), true
}
func appendEurekaScrapeWork(dst []ScrapeWork, sdc *eureka.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
func appendEurekaScrapeWork(dst []*ScrapeWork, sdc *eureka.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]*ScrapeWork, bool) {
targetLabels, err := eureka.GetLabels(sdc, baseDir)
if err != nil {
logger.Errorf("error when discovering eureka targets for `job_name` %q: %s; skipping it", swc.jobName, err)
@ -575,7 +574,7 @@ func appendEurekaScrapeWork(dst []ScrapeWork, sdc *eureka.SDConfig, baseDir stri
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, "eureka_sd_config"), true
}
func appendDNSScrapeWork(dst []ScrapeWork, sdc *dns.SDConfig, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
func appendDNSScrapeWork(dst []*ScrapeWork, sdc *dns.SDConfig, swc *scrapeWorkConfig) ([]*ScrapeWork, bool) {
targetLabels, err := dns.GetLabels(sdc)
if err != nil {
logger.Errorf("error when discovering dns targets for `job_name` %q: %s; skipping it", swc.jobName, err)
@ -584,7 +583,7 @@ func appendDNSScrapeWork(dst []ScrapeWork, sdc *dns.SDConfig, swc *scrapeWorkCon
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, "dns_sd_config"), true
}
func appendEC2ScrapeWork(dst []ScrapeWork, sdc *ec2.SDConfig, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
func appendEC2ScrapeWork(dst []*ScrapeWork, sdc *ec2.SDConfig, swc *scrapeWorkConfig) ([]*ScrapeWork, bool) {
targetLabels, err := ec2.GetLabels(sdc)
if err != nil {
logger.Errorf("error when discovering ec2 targets for `job_name` %q: %s; skipping it", swc.jobName, err)
@ -593,7 +592,7 @@ func appendEC2ScrapeWork(dst []ScrapeWork, sdc *ec2.SDConfig, swc *scrapeWorkCon
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, "ec2_sd_config"), true
}
func appendGCEScrapeWork(dst []ScrapeWork, sdc *gce.SDConfig, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
func appendGCEScrapeWork(dst []*ScrapeWork, sdc *gce.SDConfig, swc *scrapeWorkConfig) ([]*ScrapeWork, bool) {
targetLabels, err := gce.GetLabels(sdc)
if err != nil {
logger.Errorf("error when discovering gce targets for `job_name` %q: %s; skippint it", swc.jobName, err)
@ -602,7 +601,7 @@ func appendGCEScrapeWork(dst []ScrapeWork, sdc *gce.SDConfig, swc *scrapeWorkCon
return appendScrapeWorkForTargetLabels(dst, swc, targetLabels, "gce_sd_config"), true
}
func appendScrapeWorkForTargetLabels(dst []ScrapeWork, swc *scrapeWorkConfig, targetLabels []map[string]string, sectionName string) []ScrapeWork {
func appendScrapeWorkForTargetLabels(dst []*ScrapeWork, swc *scrapeWorkConfig, targetLabels []map[string]string, sectionName string) []*ScrapeWork {
for _, metaLabels := range targetLabels {
target := metaLabels["__address__"]
var err error
@ -615,7 +614,7 @@ func appendScrapeWorkForTargetLabels(dst []ScrapeWork, swc *scrapeWorkConfig, ta
return dst
}
func (sdc *FileSDConfig) appendScrapeWork(dst []ScrapeWork, swsMapPrev map[string][]ScrapeWork, baseDir string, swc *scrapeWorkConfig) []ScrapeWork {
func (sdc *FileSDConfig) appendScrapeWork(dst []*ScrapeWork, swsMapPrev map[string][]*ScrapeWork, baseDir string, swc *scrapeWorkConfig) []*ScrapeWork {
for _, file := range sdc.Files {
pathPattern := getFilepath(baseDir, file)
paths := []string{pathPattern}
@ -660,7 +659,7 @@ func (sdc *FileSDConfig) appendScrapeWork(dst []ScrapeWork, swsMapPrev map[strin
return dst
}
func (stc *StaticConfig) appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, metaLabels map[string]string) []ScrapeWork {
func (stc *StaticConfig) appendScrapeWork(dst []*ScrapeWork, swc *scrapeWorkConfig, metaLabels map[string]string) []*ScrapeWork {
for _, target := range stc.Targets {
if target == "" {
// Do not return this error, since other targets may be valid
@ -678,7 +677,7 @@ func (stc *StaticConfig) appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfi
return dst
}
func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, extraLabels, metaLabels map[string]string) ([]ScrapeWork, error) {
func appendScrapeWork(dst []*ScrapeWork, swc *scrapeWorkConfig, target string, extraLabels, metaLabels map[string]string) ([]*ScrapeWork, error) {
labels := mergeLabels(swc.jobName, swc.scheme, target, swc.metricsPath, extraLabels, swc.externalLabels, metaLabels, swc.params)
var originalLabels []prompbmarshal.Label
if !*dropOriginalLabels {
@ -744,7 +743,7 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex
}
// Reduce memory usage by interning all the strings in labels.
internLabelStrings(labels)
dst = append(dst, ScrapeWork{
dst = append(dst, &ScrapeWork{
ID: atomic.AddUint64(&nextScrapeWorkID, 1),
ScrapeURL: scrapeURL,
ScrapeInterval: swc.scrapeInterval,

View file

@ -94,7 +94,7 @@ scrape_configs:
}
sws := cfg.getStaticScrapeWork()
resetNonEssentialFields(sws)
swsExpected := []ScrapeWork{{
swsExpected := []*ScrapeWork{{
ScrapeURL: "http://black:9115/probe?module=dns_udp_example&target=8.8.8.8",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
@ -199,7 +199,7 @@ scrape_configs:
}
}
func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
func getFileSDScrapeWork(data []byte, path string) ([]*ScrapeWork, error) {
var cfg Config
if err := cfg.parse(data, path); err != nil {
return nil, fmt.Errorf("cannot parse data: %w", err)
@ -207,7 +207,7 @@ func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
return cfg.getFileSDScrapeWork(nil), nil
}
func getStaticScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
func getStaticScrapeWork(data []byte, path string) ([]*ScrapeWork, error) {
var cfg Config
if err := cfg.parse(data, path); err != nil {
return nil, fmt.Errorf("cannot parse data: %w", err)
@ -440,7 +440,7 @@ scrape_configs:
`)
}
func resetNonEssentialFields(sws []ScrapeWork) {
func resetNonEssentialFields(sws []*ScrapeWork) {
for i := range sws {
sws[i].ID = 0
sws[i].OriginalLabels = nil
@ -448,7 +448,7 @@ func resetNonEssentialFields(sws []ScrapeWork) {
}
func TestGetFileSDScrapeWorkSuccess(t *testing.T) {
f := func(data string, expectedSws []ScrapeWork) {
f := func(data string, expectedSws []*ScrapeWork) {
t.Helper()
sws, err := getFileSDScrapeWork([]byte(data), "non-existing-file")
if err != nil {
@ -457,8 +457,7 @@ func TestGetFileSDScrapeWorkSuccess(t *testing.T) {
resetNonEssentialFields(sws)
// Remove `__vm_filepath` label, since its value depends on the current working dir.
for i := range sws {
sw := &sws[i]
for _, sw := range sws {
for j := range sw.Labels {
label := &sw.Labels[j]
if label.Name == "__vm_filepath" {
@ -475,14 +474,14 @@ scrape_configs:
- job_name: foo
static_configs:
- targets: ["xxx"]
`, []ScrapeWork{})
`, []*ScrapeWork{})
f(`
scrape_configs:
- job_name: foo
metrics_path: /abc/de
file_sd_configs:
- files: ["testdata/file_sd.json", "testdata/file_sd*.yml"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://host1:80/abc/de",
ScrapeInterval: defaultScrapeInterval,
@ -604,7 +603,7 @@ scrape_configs:
}
func TestGetStaticScrapeWorkSuccess(t *testing.T) {
f := func(data string, expectedSws []ScrapeWork) {
f := func(data string, expectedSws []*ScrapeWork) {
t.Helper()
sws, err := getStaticScrapeWork([]byte(data), "non-exsiting-file")
if err != nil {
@ -621,7 +620,7 @@ scrape_configs:
- job_name: foo
static_configs:
- targets: ["foo.bar:1234"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
@ -663,7 +662,7 @@ scrape_configs:
- job_name: foo
static_configs:
- targets: ["foo.bar:1234"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
@ -733,7 +732,7 @@ scrape_configs:
insecure_skip_verify: true
static_configs:
- targets: [1.2.3.4]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "https://foo.bar:443/foo/bar?p=x%26y&p=%3D",
ScrapeInterval: 543 * time.Second,
@ -887,7 +886,7 @@ scrape_configs:
x: [keep_me]
static_configs:
- targets: ["foo.bar:1234", "drop-this-target"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics?x=keep_me",
ScrapeInterval: defaultScrapeInterval,
@ -957,7 +956,7 @@ scrape_configs:
replacement: b
static_configs:
- targets: ["foo.bar:1234"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "mailto://foo.bar:1234/abc.de?a=b",
ScrapeInterval: defaultScrapeInterval,
@ -1012,7 +1011,7 @@ scrape_configs:
regex: ""
static_configs:
- targets: ["foo.bar:1234", "xyz"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
@ -1051,7 +1050,7 @@ scrape_configs:
target_label: abc
static_configs:
- targets: ["foo.bar:1234"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
@ -1091,7 +1090,7 @@ scrape_configs:
password_file: testdata/password.txt
static_configs:
- targets: ["foo.bar:1234"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
@ -1130,7 +1129,7 @@ scrape_configs:
bearer_token_file: testdata/password.txt
static_configs:
- targets: ["foo.bar:1234"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
@ -1175,7 +1174,7 @@ scrape_configs:
key_file: testdata/ssl-cert-snakeoil.key
static_configs:
- targets: ["foo.bar:1234"]
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
@ -1227,7 +1226,7 @@ scrape_configs:
__param_a: c
__address__: pp
job: yyy
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://pp:80/metrics?a=c&a=xy",
ScrapeInterval: defaultScrapeInterval,
@ -1290,7 +1289,7 @@ scrape_configs:
target_label: instance
- target_label: __address__
replacement: 127.0.0.1:9116 # The SNMP exporter's real hostname:port.
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://127.0.0.1:9116/snmp?module=if_mib&target=192.168.1.2",
ScrapeInterval: defaultScrapeInterval,
@ -1341,7 +1340,7 @@ scrape_configs:
relabel_configs:
- replacement: metricspath
target_label: __metrics_path__
`, []ScrapeWork{
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metricspath",
ScrapeInterval: defaultScrapeInterval,
@ -1376,7 +1375,7 @@ scrape_configs:
var defaultRegexForRelabelConfig = regexp.MustCompile("^(.*)$")
func equalStaticConfigForScrapeWorks(a, b []ScrapeWork) bool {
func equalStaticConfigForScrapeWorks(a, b []*ScrapeWork) bool {
if len(a) != len(b) {
return false
}

View file

@ -95,16 +95,16 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest)
}
scs := newScrapeConfigs(pushData)
scs.add("static_configs", 0, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getStaticScrapeWork() })
scs.add("file_sd_configs", *fileSDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getFileSDScrapeWork(swsPrev) })
scs.add("kubernetes_sd_configs", *kubernetesSDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getKubernetesSDScrapeWork(swsPrev) })
scs.add("openstack_sd_configs", *openstackSDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getOpenStackSDScrapeWork(swsPrev) })
scs.add("consul_sd_configs", *consul.SDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getConsulSDScrapeWork(swsPrev) })
scs.add("eureka_sd_configs", *eurekaSDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getEurekaSDScrapeWork(swsPrev) })
scs.add("dns_sd_configs", *dnsSDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getDNSSDScrapeWork(swsPrev) })
scs.add("ec2_sd_configs", *ec2SDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getEC2SDScrapeWork(swsPrev) })
scs.add("gce_sd_configs", *gceSDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getGCESDScrapeWork(swsPrev) })
scs.add("dockerswarm_sd_configs", *dockerswarmSDCheckInterval, func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork { return cfg.getDockerSwarmSDScrapeWork(swsPrev) })
scs.add("static_configs", 0, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getStaticScrapeWork() })
scs.add("file_sd_configs", *fileSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getFileSDScrapeWork(swsPrev) })
scs.add("kubernetes_sd_configs", *kubernetesSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getKubernetesSDScrapeWork(swsPrev) })
scs.add("openstack_sd_configs", *openstackSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getOpenStackSDScrapeWork(swsPrev) })
scs.add("consul_sd_configs", *consul.SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getConsulSDScrapeWork(swsPrev) })
scs.add("eureka_sd_configs", *eurekaSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getEurekaSDScrapeWork(swsPrev) })
scs.add("dns_sd_configs", *dnsSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getDNSSDScrapeWork(swsPrev) })
scs.add("ec2_sd_configs", *ec2SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getEC2SDScrapeWork(swsPrev) })
scs.add("gce_sd_configs", *gceSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getGCESDScrapeWork(swsPrev) })
scs.add("dockerswarm_sd_configs", *dockerswarmSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getDockerSwarmSDScrapeWork(swsPrev) })
sighupCh := procutil.NewSighupChan()
@ -171,7 +171,7 @@ func newScrapeConfigs(pushData func(wr *prompbmarshal.WriteRequest)) *scrapeConf
}
}
func (scs *scrapeConfigs) add(name string, checkInterval time.Duration, getScrapeWork func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork) {
func (scs *scrapeConfigs) add(name string, checkInterval time.Duration, getScrapeWork func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork) {
atomic.AddInt32(&PendingScrapeConfigs, 1)
scfg := &scrapeConfig{
name: name,
@ -204,7 +204,7 @@ func (scs *scrapeConfigs) stop() {
type scrapeConfig struct {
name string
pushData func(wr *prompbmarshal.WriteRequest)
getScrapeWork func(cfg *Config, swsPrev []ScrapeWork) []ScrapeWork
getScrapeWork func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork
checkInterval time.Duration
cfgCh chan *Config
stopCh <-chan struct{}
@ -222,7 +222,7 @@ func (scfg *scrapeConfig) run() {
}
cfg := <-scfg.cfgCh
var swsPrev []ScrapeWork
var swsPrev []*ScrapeWork
updateScrapeWork := func(cfg *Config) {
sws := scfg.getScrapeWork(cfg, swsPrev)
sg.update(sws)
@ -286,15 +286,14 @@ func (sg *scraperGroup) stop() {
sg.wg.Wait()
}
func (sg *scraperGroup) update(sws []ScrapeWork) {
func (sg *scraperGroup) update(sws []*ScrapeWork) {
sg.mLock.Lock()
defer sg.mLock.Unlock()
additionsCount := 0
deletionsCount := 0
swsMap := make(map[string][]prompbmarshal.Label, len(sws))
for i := range sws {
sw := &sws[i]
for _, sw := range sws {
key := sw.key()
originalLabels := swsMap[key]
if originalLabels != nil {