lib/promscrape: add ability to set disable_compression and disable_keepalive options in scrape_config section of the config passed to -promscrape.config

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/580
This commit is contained in:
Aliaksandr Valialkin 2020-07-02 14:19:11 +03:00
parent 8da3f773ae
commit 4dd3de9286
6 changed files with 65 additions and 24 deletions

View file

@ -147,14 +147,20 @@ The following scrape types in [scrape_config](https://prometheus.io/docs/prometh
* `dns_sd_configs` - for scraping targets discovered from DNS records (SRV, A and AAAA).
See [dns_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config) for details.
File feature requests at [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`.
`vmagent` also support the following additional options in `scrape_config` section:
* `disable_compression: true` - for disabling response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets
in order to save network bandwidth.
* `disable_keepalive: true` - for disabling [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) on a per-job basis.
By default `vmagent` uses keep-alive connections to scrape targets in order to reduce overhead on connection re-establishing.
Note that `vmagent` doesn't support `refresh_interval` option these scrape configs. Use the corresponding `-promscrape.*CheckInterval`
command-line flag instead. For example, `-promscrape.consulSDCheckInterval=60s` sets `refresh_interval` for all the `consul_sd_configs`
entries to 60s. Run `vmagent -help` in order to see default values for `-promscrape.*CheckInterval` flags.
File feature requests at [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`.
### Adding labels to metrics
Labels can be added to metrics via the following mechanisms:

View file

@ -147,14 +147,20 @@ The following scrape types in [scrape_config](https://prometheus.io/docs/prometh
* `dns_sd_configs` - for scraping targets discovered from DNS records (SRV, A and AAAA).
See [dns_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config) for details.
File feature requests at [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`.
`vmagent` also support the following additional options in `scrape_config` section:
* `disable_compression: true` - for disabling response compression on a per-job basis. By default `vmagent` requests compressed responses from scrape targets
in order to save network bandwidth.
* `disable_keepalive: true` - for disabling [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) on a per-job basis.
By default `vmagent` uses keep-alive connections to scrape targets in order to reduce overhead on connection re-establishing.
Note that `vmagent` doesn't support `refresh_interval` option these scrape configs. Use the corresponding `-promscrape.*CheckInterval`
command-line flag instead. For example, `-promscrape.consulSDCheckInterval=60s` sets `refresh_interval` for all the `consul_sd_configs`
entries to 60s. Run `vmagent -help` in order to see default values for `-promscrape.*CheckInterval` flags.
File feature requests at [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`.
### Adding labels to metrics
Labels can be added to metrics via the following mechanisms:

View file

@ -14,19 +14,24 @@ import (
var (
maxScrapeSize = flag.Int("promscrape.maxScrapeSize", 16*1024*1024, "The maximum size of scrape response in bytes to process from Prometheus targets. "+
"Bigger responses are rejected")
disableCompression = flag.Bool("promscrape.disableCompression", false, "Whether to disable sending 'Accept-Encoding: gzip' request headers to scrape targets. "+
"This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization")
disableKeepAlive = flag.Bool("promscrape.disableKeepAlive", false, "Whether to disable HTTP keep-alive connections when scraping targets. This may be useful when targets "+
"has no support for HTTP keep-alive connection. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets")
disableCompression = flag.Bool("promscrape.disableCompression", false, "Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. "+
"This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. "+
"It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control")
disableKeepAlive = flag.Bool("promscrape.disableKeepAlive", false, "Whether to disable HTTP keep-alive connections when scraping all the targets. "+
"This may be useful when targets has no support for HTTP keep-alive connection. "+
"It is possible to set `disable_keepalive: true` individually per each 'scrape_config` section in '-promscrape.config' for fine grained control. "+
"Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets")
)
type client struct {
hc *fasthttp.HostClient
scrapeURL string
host string
requestURI string
authHeader string
scrapeURL string
host string
requestURI string
authHeader string
disableCompression bool
disableKeepAlive bool
}
func newClient(sw *ScrapeWork) *client {
@ -61,10 +66,12 @@ func newClient(sw *ScrapeWork) *client {
return &client{
hc: hc,
scrapeURL: sw.ScrapeURL,
host: host,
requestURI: requestURI,
authHeader: sw.AuthConfig.Authorization,
scrapeURL: sw.ScrapeURL,
host: host,
requestURI: requestURI,
authHeader: sw.AuthConfig.Authorization,
disableCompression: sw.DisableCompression,
disableKeepAlive: sw.DisableKeepAlive,
}
}
@ -72,10 +79,10 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
req := fasthttp.AcquireRequest()
req.SetRequestURI(c.requestURI)
req.SetHost(c.host)
if !*disableCompression {
if !*disableCompression || c.disableCompression {
req.Header.Set("Accept-Encoding", "gzip")
}
if *disableKeepAlive {
if *disableKeepAlive || c.disableKeepAlive {
req.SetConnectionClose()
}
if c.authHeader != "" {

View file

@ -76,6 +76,10 @@ type ScrapeConfig struct {
MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs"`
SampleLimit int `yaml:"sample_limit"`
// These options are supported only by lib/promscrape.
DisableCompression bool `yaml:"disable_compression"`
DisableKeepAlive bool `yaml:"disable_keepalive"`
// This is set in loadConfig
swc *scrapeWorkConfig
}
@ -404,6 +408,8 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
relabelConfigs: relabelConfigs,
metricRelabelConfigs: metricRelabelConfigs,
sampleLimit: sc.SampleLimit,
disableCompression: sc.DisableCompression,
disableKeepAlive: sc.DisableKeepAlive,
}
return swc, nil
}
@ -422,6 +428,8 @@ type scrapeWorkConfig struct {
relabelConfigs []promrelabel.ParsedRelabelConfig
metricRelabelConfigs []promrelabel.ParsedRelabelConfig
sampleLimit int
disableCompression bool
disableKeepAlive bool
}
func appendKubernetesScrapeWork(dst []ScrapeWork, sdc *kubernetes.SDConfig, baseDir string, swc *scrapeWorkConfig) ([]ScrapeWork, bool) {
@ -602,6 +610,8 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex
AuthConfig: swc.authConfig,
MetricRelabelConfigs: swc.metricRelabelConfigs,
SampleLimit: swc.sampleLimit,
DisableCompression: swc.disableCompression,
DisableKeepAlive: swc.disableKeepAlive,
jobNameOriginal: swc.jobName,
})

View file

@ -1201,6 +1201,9 @@ scrape_configs:
f(`
scrape_configs:
- job_name: 'snmp'
sample_limit: 100
disable_keepalive: true
disable_compression: true
static_configs:
- targets:
- 192.168.1.2 # SNMP device.
@ -1249,8 +1252,11 @@ scrape_configs:
Value: "snmp",
},
},
AuthConfig: &promauth.Config{},
jobNameOriginal: "snmp",
AuthConfig: &promauth.Config{},
SampleLimit: 100,
DisableKeepAlive: true,
DisableCompression: true,
jobNameOriginal: "snmp",
},
})
}

View file

@ -67,6 +67,12 @@ type ScrapeWork struct {
// The maximum number of metrics to scrape after relabeling.
SampleLimit int
// Whether to disable response compression when querying ScrapeURL.
DisableCompression bool
// Whether to disable HTTP keep-alive when querying ScrapeURL.
DisableKeepAlive bool
// The original 'job_name'
jobNameOriginal string
}
@ -76,9 +82,9 @@ type ScrapeWork struct {
// it can be used for comparing for equality two ScrapeWork objects.
func (sw *ScrapeWork) key() string {
key := fmt.Sprintf("ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, HonorTimestamps=%v, Labels=%s, "+
"AuthConfig=%s, MetricRelabelConfigs=%s, SampleLimit=%d",
"AuthConfig=%s, MetricRelabelConfigs=%s, SampleLimit=%d, DisableCompression=%v, DisableKeepAlive=%v",
sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels, sw.HonorTimestamps, sw.LabelsString(),
sw.AuthConfig.String(), sw.metricRelabelConfigsString(), sw.SampleLimit)
sw.AuthConfig.String(), sw.metricRelabelConfigsString(), sw.SampleLimit, sw.DisableCompression, sw.DisableKeepAlive)
return key
}