lib/promscrape: rename 'scrape_config->scrape_limit' to 'scrape_config->sample_limit'

`scrape_config` block from Prometheus config contains `sample_limit` field,
while in `vmagent` this field was mistakenly named as `scrape_limit`.
This commit is contained in:
Aliaksandr Valialkin 2020-04-14 11:58:15 +03:00
parent 755f649c72
commit f58d15f27c
3 changed files with 9 additions and 10 deletions

View file

@ -61,7 +61,7 @@ type ScrapeConfig struct {
KubernetesSDConfigs []KubernetesSDConfig `yaml:"kubernetes_sd_configs"` KubernetesSDConfigs []KubernetesSDConfig `yaml:"kubernetes_sd_configs"`
RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs"` RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs"`
MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs"` MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs"`
ScrapeLimit int `yaml:"scrape_limit"` SampleLimit int `yaml:"sample_limit"`
// This is set in loadConfig // This is set in loadConfig
swc *scrapeWorkConfig swc *scrapeWorkConfig
@ -264,7 +264,6 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %s", jobName, err) return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %s", jobName, err)
} }
scrapeLimit := sc.ScrapeLimit
swc := &scrapeWorkConfig{ swc := &scrapeWorkConfig{
scrapeInterval: scrapeInterval, scrapeInterval: scrapeInterval,
scrapeTimeout: scrapeTimeout, scrapeTimeout: scrapeTimeout,
@ -278,7 +277,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
externalLabels: globalCfg.ExternalLabels, externalLabels: globalCfg.ExternalLabels,
relabelConfigs: relabelConfigs, relabelConfigs: relabelConfigs,
metricRelabelConfigs: metricRelabelConfigs, metricRelabelConfigs: metricRelabelConfigs,
scrapeLimit: scrapeLimit, sampleLimit: sc.SampleLimit,
} }
return swc, nil return swc, nil
} }
@ -296,7 +295,7 @@ type scrapeWorkConfig struct {
externalLabels map[string]string externalLabels map[string]string
relabelConfigs []promrelabel.ParsedRelabelConfig relabelConfigs []promrelabel.ParsedRelabelConfig
metricRelabelConfigs []promrelabel.ParsedRelabelConfig metricRelabelConfigs []promrelabel.ParsedRelabelConfig
scrapeLimit int sampleLimit int
} }
func (sdc *KubernetesSDConfig) appendScrapeWork(dst []ScrapeWork, baseDir string, swc *scrapeWorkConfig) []ScrapeWork { func (sdc *KubernetesSDConfig) appendScrapeWork(dst []ScrapeWork, baseDir string, swc *scrapeWorkConfig) []ScrapeWork {
@ -481,7 +480,7 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex
Labels: labels, Labels: labels,
AuthConfig: swc.authConfig, AuthConfig: swc.authConfig,
MetricRelabelConfigs: swc.metricRelabelConfigs, MetricRelabelConfigs: swc.metricRelabelConfigs,
ScrapeLimit: swc.scrapeLimit, SampleLimit: swc.sampleLimit,
}) })
return dst, nil return dst, nil
} }

View file

@ -54,7 +54,7 @@ type ScrapeWork struct {
MetricRelabelConfigs []promrelabel.ParsedRelabelConfig MetricRelabelConfigs []promrelabel.ParsedRelabelConfig
// The maximum number of metrics to scrape after relabeling. // The maximum number of metrics to scrape after relabeling.
ScrapeLimit int SampleLimit int
} }
type scrapeWork struct { type scrapeWork struct {
@ -124,7 +124,7 @@ var (
scrapeDuration = metrics.NewHistogram("vm_promscrape_scrape_duration_seconds") scrapeDuration = metrics.NewHistogram("vm_promscrape_scrape_duration_seconds")
scrapeResponseSize = metrics.NewHistogram("vm_promscrape_scrape_response_size_bytes") scrapeResponseSize = metrics.NewHistogram("vm_promscrape_scrape_response_size_bytes")
scrapedSamples = metrics.NewHistogram("vm_promscrape_scraped_samples") scrapedSamples = metrics.NewHistogram("vm_promscrape_scraped_samples")
scrapesSkippedByScrapeLimit = metrics.NewCounter("vm_promscrape_scrapes_skipped_by_scrape_limit_total") scrapesSkippedBySampleLimit = metrics.NewCounter("vm_promscrape_scrapes_skipped_by_sample_limit_total")
scrapesFailed = metrics.NewCounter("vm_promscrape_scrapes_failed_total") scrapesFailed = metrics.NewCounter("vm_promscrape_scrapes_failed_total")
pushDataDuration = metrics.NewHistogram("vm_promscrape_push_data_duration_seconds") pushDataDuration = metrics.NewHistogram("vm_promscrape_push_data_duration_seconds")
) )
@ -151,10 +151,10 @@ func (sw *scrapeWork) scrapeInternal(timestamp int64) error {
sw.addRowToTimeseries(&srcRows[i], timestamp) sw.addRowToTimeseries(&srcRows[i], timestamp)
} }
sw.rows.Reset() sw.rows.Reset()
if sw.Config.ScrapeLimit > 0 && len(sw.writeRequest.Timeseries) > sw.Config.ScrapeLimit { if sw.Config.SampleLimit > 0 && len(sw.writeRequest.Timeseries) > sw.Config.SampleLimit {
prompbmarshal.ResetWriteRequest(&sw.writeRequest) prompbmarshal.ResetWriteRequest(&sw.writeRequest)
up = 0 up = 0
scrapesSkippedByScrapeLimit.Inc() scrapesSkippedBySampleLimit.Inc()
} }
samplesPostRelabeling := len(sw.writeRequest.Timeseries) samplesPostRelabeling := len(sw.writeRequest.Timeseries)
sw.addAutoTimeseries("up", float64(up), timestamp) sw.addAutoTimeseries("up", float64(up), timestamp)

View file

@ -243,7 +243,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
bar{a="b",c="d"} -3e4 bar{a="b",c="d"} -3e4
`, &ScrapeWork{ `, &ScrapeWork{
HonorLabels: true, HonorLabels: true,
ScrapeLimit: 1, SampleLimit: 1,
}, ` }, `
up 0 123 up 0 123
scrape_samples_scraped 2 123 scrape_samples_scraped 2 123