lib/promscrape: support prometheus-like duration in scrape configs (#2169)

* lib/promscrape: support prometheus-like duration in scrape configs

The change allows to specify duration values like `1d`, `1w`
for fields `scrape_interval`, `scrape_timeout`, etc.

https://github.com/VictoriaMetrics/VictoriaMetrics/issues/817#issuecomment-1033384766
Signed-off-by: hagen1778 <roman@victoriametrics.com>

* lib/blockcache: make linter happy

Signed-off-by: hagen1778 <roman@victoriametrics.com>

* lib/promscrape: support prometheus-like duration in scrape configs

* add support for extra fields `scrape_align_interval` and `scrape_offset`;
* support Prometheus duration parsing for `__scrape_interval__`
and `__scrape_duration__` labels;

Signed-off-by: hagen1778 <roman@victoriametrics.com>

* wip

* wip

* docs/CHANGELOG.md: document the feature

Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
This commit is contained in:
Roman Khavronenko 2022-02-11 16:17:00 +02:00 committed by GitHub
parent 3cb72ccc2a
commit e3adcbec6e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 191 additions and 83 deletions

View file

@ -17,6 +17,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate" "github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
// Group contains list of Rules grouped into // Group contains list of Rules grouped into
@ -25,7 +26,7 @@ type Group struct {
Type datasource.Type `yaml:"type,omitempty"` Type datasource.Type `yaml:"type,omitempty"`
File string File string
Name string `yaml:"name"` Name string `yaml:"name"`
Interval utils.PromDuration `yaml:"interval"` Interval promutils.Duration `yaml:"interval"`
Rules []Rule `yaml:"rules"` Rules []Rule `yaml:"rules"`
Concurrency int `yaml:"concurrency"` Concurrency int `yaml:"concurrency"`
// ExtraFilterLabels is a list label filters applied to every rule // ExtraFilterLabels is a list label filters applied to every rule
@ -129,7 +130,7 @@ type Rule struct {
Record string `yaml:"record,omitempty"` Record string `yaml:"record,omitempty"`
Alert string `yaml:"alert,omitempty"` Alert string `yaml:"alert,omitempty"`
Expr string `yaml:"expr"` Expr string `yaml:"expr"`
For utils.PromDuration `yaml:"for"` For promutils.Duration `yaml:"for"`
Labels map[string]string `yaml:"labels,omitempty"` Labels map[string]string `yaml:"labels,omitempty"`
Annotations map[string]string `yaml:"annotations,omitempty"` Annotations map[string]string `yaml:"annotations,omitempty"`

View file

@ -11,7 +11,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
@ -260,7 +260,7 @@ func TestGroup_Validate(t *testing.T) {
Rules: []Rule{ Rules: []Rule{
{ {
Expr: "sumSeries(time('foo.bar',10))", Expr: "sumSeries(time('foo.bar',10))",
For: utils.NewPromDuration(10 * time.Millisecond), For: promutils.NewDuration(10 * time.Millisecond),
}, },
{ {
Expr: "sum(up == 0 ) by (host)", Expr: "sum(up == 0 ) by (host)",
@ -275,7 +275,7 @@ func TestGroup_Validate(t *testing.T) {
Rules: []Rule{ Rules: []Rule{
{ {
Expr: "sum(up == 0 ) by (host)", Expr: "sum(up == 0 ) by (host)",
For: utils.NewPromDuration(10 * time.Millisecond), For: promutils.NewDuration(10 * time.Millisecond),
}, },
{ {
Expr: "sumSeries(time('foo.bar',10))", Expr: "sumSeries(time('foo.bar',10))",
@ -342,7 +342,7 @@ func TestHashRule(t *testing.T) {
true, true,
}, },
{ {
Rule{Alert: "alert", Expr: "up == 1", For: utils.NewPromDuration(time.Minute)}, Rule{Alert: "alert", Expr: "up == 1", For: promutils.NewDuration(time.Minute)},
Rule{Alert: "alert", Expr: "up == 1"}, Rule{Alert: "alert", Expr: "up == 1"},
true, true,
}, },

View file

@ -9,7 +9,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
func init() { func init() {
@ -34,7 +34,7 @@ func TestUpdateWith(t *testing.T) {
[]config.Rule{{ []config.Rule{{
Alert: "foo", Alert: "foo",
Expr: "up > 0", Expr: "up > 0",
For: utils.NewPromDuration(time.Second), For: promutils.NewDuration(time.Second),
Labels: map[string]string{ Labels: map[string]string{
"bar": "baz", "bar": "baz",
}, },
@ -46,7 +46,7 @@ func TestUpdateWith(t *testing.T) {
[]config.Rule{{ []config.Rule{{
Alert: "foo", Alert: "foo",
Expr: "up > 10", Expr: "up > 10",
For: utils.NewPromDuration(time.Second), For: promutils.NewDuration(time.Second),
Labels: map[string]string{ Labels: map[string]string{
"baz": "bar", "baz": "bar",
}, },

View file

@ -11,11 +11,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
// Config contains list of supported configuration settings // Config contains list of supported configuration settings
@ -38,7 +38,7 @@ type Config struct {
RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"` RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"`
// The timeout used when sending alerts. // The timeout used when sending alerts.
Timeout utils.PromDuration `yaml:"timeout,omitempty"` Timeout promutils.Duration `yaml:"timeout,omitempty"`
// Checksum stores the hash of yaml definition for the config. // Checksum stores the hash of yaml definition for the config.
// May be used to detect any changes to the config file. // May be used to detect any changes to the config file.
@ -71,7 +71,7 @@ func (cfg *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
cfg.Scheme = "http" cfg.Scheme = "http"
} }
if cfg.Timeout.Duration() == 0 { if cfg.Timeout.Duration() == 0 {
cfg.Timeout = utils.NewPromDuration(time.Second * 10) cfg.Timeout = promutils.NewDuration(time.Second * 10)
} }
rCfg, err := promrelabel.ParseRelabelConfigs(cfg.RelabelConfigs, false) rCfg, err := promrelabel.ParseRelabelConfigs(cfg.RelabelConfigs, false)
if err != nil { if err != nil {

View file

@ -27,7 +27,7 @@ import (
textTpl "text/template" textTpl "text/template"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/metricsql" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
// metric is private copy of datasource.Metric, // metric is private copy of datasource.Metric,
@ -104,12 +104,12 @@ func InitTemplateFunc(externalURL *url.URL) {
}, },
// parseDuration parses a duration string such as "1h" into the number of seconds it represents // parseDuration parses a duration string such as "1h" into the number of seconds it represents
"parseDuration": func(d string) (float64, error) { "parseDuration": func(s string) (float64, error) {
ms, err := metricsql.DurationValue(d, 0) d, err := promutils.ParseDuration(s)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return float64(ms) / 1000, nil return d.Seconds(), nil
}, },
/* Numbers */ /* Numbers */

View file

@ -8,7 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
type fakeReplayQuerier struct { type fakeReplayQuerier struct {
@ -83,7 +83,7 @@ func TestReplay(t *testing.T) {
to: "2021-01-01T15:02:30.000Z", to: "2021-01-01T15:02:30.000Z",
maxDP: 60, maxDP: 60,
cfg: []config.Group{ cfg: []config.Group{
{Interval: utils.NewPromDuration(time.Minute), Rules: []config.Rule{{Record: "foo", Expr: "sum(up)"}}}, {Interval: promutils.NewDuration(time.Minute), Rules: []config.Rule{{Record: "foo", Expr: "sum(up)"}}},
}, },
qb: &fakeReplayQuerier{ qb: &fakeReplayQuerier{
registry: map[string]map[string]struct{}{ registry: map[string]map[string]struct{}{

View file

@ -1,43 +0,0 @@
package utils
import (
"time"
"github.com/VictoriaMetrics/metricsql"
)
// PromDuration is Prometheus duration.
type PromDuration struct {
milliseconds int64
}
// NewPromDuration returns PromDuration for given d.
func NewPromDuration(d time.Duration) PromDuration {
return PromDuration{
milliseconds: d.Milliseconds(),
}
}
// MarshalYAML implements yaml.Marshaler interface.
func (pd PromDuration) MarshalYAML() (interface{}, error) {
return pd.Duration().String(), nil
}
// UnmarshalYAML implements yaml.Unmarshaler interface.
func (pd *PromDuration) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
ms, err := metricsql.DurationValue(s, 0)
if err != nil {
return err
}
pd.milliseconds = ms
return nil
}
// Duration returns duration for pd.
func (pd *PromDuration) Duration() time.Duration {
return time.Duration(pd.milliseconds) * time.Millisecond
}

View file

@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metricsql" "github.com/VictoriaMetrics/metricsql"
) )
@ -48,14 +49,14 @@ func GetTime(r *http.Request, argKey string, defaultMs int64) (int64, error) {
return maxTimeMsecs, nil return maxTimeMsecs, nil
} }
// Try parsing duration relative to the current time // Try parsing duration relative to the current time
d, err1 := metricsql.DurationValue(argValue, 0) d, err1 := promutils.ParseDuration(argValue)
if err1 != nil { if err1 != nil {
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err) return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
} }
if d > 0 { if d > 0 {
d = -d d = -d
} }
t = time.Now().Add(time.Duration(d) * time.Millisecond) t = time.Now().Add(d)
} }
secs = float64(t.UnixNano()) / 1e9 secs = float64(t.UnixNano()) / 1e9
} }
@ -91,11 +92,11 @@ func GetDuration(r *http.Request, argKey string, defaultValue int64) (int64, err
secs, err := strconv.ParseFloat(argValue, 64) secs, err := strconv.ParseFloat(argValue, 64)
if err != nil { if err != nil {
// Try parsing string format // Try parsing string format
d, err := metricsql.DurationValue(argValue, 0) d, err := promutils.ParseDuration(argValue)
if err != nil { if err != nil {
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err) return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
} }
secs = float64(d) / 1000 secs = d.Seconds()
} }
msecs := int64(secs * 1e3) msecs := int64(secs * 1e3)
if msecs <= 0 || msecs > maxDurationMsecs { if msecs <= 0 || msecs > maxDurationMsecs {

View file

@ -31,6 +31,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): show the total number of scrapes and the total number of scrape errors per target at `/targets` page. This information may be useful when debugging unreliable scrape targets. * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): show the total number of scrapes and the total number of scrape errors per target at `/targets` page. This information may be useful when debugging unreliable scrape targets.
* FEATURE: vmagent and single-node VictoriaMetrics: disallow unknown fields at `-promscrape.config` file. Previously unknown fields were allowed. This could lead to long-living silent config errors. The previous behaviour can be returned by passing `-promscrape.config.strictParse=false` command-line flag. * FEATURE: vmagent and single-node VictoriaMetrics: disallow unknown fields at `-promscrape.config` file. Previously unknown fields were allowed. This could lead to long-living silent config errors. The previous behaviour can be returned by passing `-promscrape.config.strictParse=false` command-line flag.
* FEATURE: add `__meta_kubernetes_endpointslice_label*` and `__meta_kubernetes_endpointslice_annotation*` labels for `role: endpointslice` targets in [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) to be consistent with other `role` values. See [this issue](https://github.com/prometheus/prometheus/issues/10284). * FEATURE: add `__meta_kubernetes_endpointslice_label*` and `__meta_kubernetes_endpointslice_annotation*` labels for `role: endpointslice` targets in [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) to be consistent with other `role` values. See [this issue](https://github.com/prometheus/prometheus/issues/10284).
* FEATURE: vmagent: support Prometheus-like durations in `-promscrape.config`. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/817#issuecomment-1033384766).
* BUGFIX: return proper results from `highestMax()` function at [Graphite render API](https://docs.victoriametrics.com/#graphite-render-api-usage). Previously it was incorrectly returning timeseries with min peaks instead of max peaks. * BUGFIX: return proper results from `highestMax()` function at [Graphite render API](https://docs.victoriametrics.com/#graphite-render-api-usage). Previously it was incorrectly returning timeseries with min peaks instead of max peaks.
* BUGFIX: properly limit indexdb cache sizes. Previously they could exceed values set via `-memory.allowedPercent` and/or `-memory.allowedBytes` when `indexdb` contained many data parts. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2007). * BUGFIX: properly limit indexdb cache sizes. Previously they could exceed values set via `-memory.allowedPercent` and/or `-memory.allowedBytes` when `indexdb` contained many data parts. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2007).

View file

@ -30,6 +30,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/http" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/http"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kubernetes" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kubernetes"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/openstack" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/openstack"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy" "github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
"github.com/VictoriaMetrics/metrics" "github.com/VictoriaMetrics/metrics"
xxhash "github.com/cespare/xxhash/v2" xxhash "github.com/cespare/xxhash/v2"
@ -105,9 +106,9 @@ func (cfg *Config) getJobNames() []string {
// //
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/ // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/
type GlobalConfig struct { type GlobalConfig struct {
ScrapeInterval time.Duration `yaml:"scrape_interval,omitempty"` ScrapeInterval promutils.Duration `yaml:"scrape_interval,omitempty"`
ScrapeTimeout time.Duration `yaml:"scrape_timeout,omitempty"` ScrapeTimeout promutils.Duration `yaml:"scrape_timeout,omitempty"`
ExternalLabels map[string]string `yaml:"external_labels,omitempty"` ExternalLabels map[string]string `yaml:"external_labels,omitempty"`
} }
// ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config. // ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config.
@ -115,8 +116,8 @@ type GlobalConfig struct {
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
type ScrapeConfig struct { type ScrapeConfig struct {
JobName string `yaml:"job_name"` JobName string `yaml:"job_name"`
ScrapeInterval time.Duration `yaml:"scrape_interval,omitempty"` ScrapeInterval promutils.Duration `yaml:"scrape_interval,omitempty"`
ScrapeTimeout time.Duration `yaml:"scrape_timeout,omitempty"` ScrapeTimeout promutils.Duration `yaml:"scrape_timeout,omitempty"`
MetricsPath string `yaml:"metrics_path,omitempty"` MetricsPath string `yaml:"metrics_path,omitempty"`
HonorLabels bool `yaml:"honor_labels,omitempty"` HonorLabels bool `yaml:"honor_labels,omitempty"`
HonorTimestamps *bool `yaml:"honor_timestamps,omitempty"` HonorTimestamps *bool `yaml:"honor_timestamps,omitempty"`
@ -149,8 +150,8 @@ type ScrapeConfig struct {
DisableCompression bool `yaml:"disable_compression,omitempty"` DisableCompression bool `yaml:"disable_compression,omitempty"`
DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"` DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"`
StreamParse bool `yaml:"stream_parse,omitempty"` StreamParse bool `yaml:"stream_parse,omitempty"`
ScrapeAlignInterval time.Duration `yaml:"scrape_align_interval,omitempty"` ScrapeAlignInterval promutils.Duration `yaml:"scrape_align_interval,omitempty"`
ScrapeOffset time.Duration `yaml:"scrape_offset,omitempty"` ScrapeOffset promutils.Duration `yaml:"scrape_offset,omitempty"`
SeriesLimit int `yaml:"series_limit,omitempty"` SeriesLimit int `yaml:"series_limit,omitempty"`
ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"` ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"`
@ -705,16 +706,16 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
if jobName == "" { if jobName == "" {
return nil, fmt.Errorf("missing `job_name` field in `scrape_config`") return nil, fmt.Errorf("missing `job_name` field in `scrape_config`")
} }
scrapeInterval := sc.ScrapeInterval scrapeInterval := sc.ScrapeInterval.Duration()
if scrapeInterval <= 0 { if scrapeInterval <= 0 {
scrapeInterval = globalCfg.ScrapeInterval scrapeInterval = globalCfg.ScrapeInterval.Duration()
if scrapeInterval <= 0 { if scrapeInterval <= 0 {
scrapeInterval = defaultScrapeInterval scrapeInterval = defaultScrapeInterval
} }
} }
scrapeTimeout := sc.ScrapeTimeout scrapeTimeout := sc.ScrapeTimeout.Duration()
if scrapeTimeout <= 0 { if scrapeTimeout <= 0 {
scrapeTimeout = globalCfg.ScrapeTimeout scrapeTimeout = globalCfg.ScrapeTimeout.Duration()
if scrapeTimeout <= 0 { if scrapeTimeout <= 0 {
scrapeTimeout = defaultScrapeTimeout scrapeTimeout = defaultScrapeTimeout
} }
@ -788,8 +789,8 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
disableCompression: sc.DisableCompression, disableCompression: sc.DisableCompression,
disableKeepAlive: sc.DisableKeepAlive, disableKeepAlive: sc.DisableKeepAlive,
streamParse: sc.StreamParse, streamParse: sc.StreamParse,
scrapeAlignInterval: sc.ScrapeAlignInterval, scrapeAlignInterval: sc.ScrapeAlignInterval.Duration(),
scrapeOffset: sc.ScrapeOffset, scrapeOffset: sc.ScrapeOffset.Duration(),
seriesLimit: sc.SeriesLimit, seriesLimit: sc.SeriesLimit,
} }
return swc, nil return swc, nil
@ -1057,7 +1058,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
// Read __scrape_interval__ and __scrape_timeout__ from labels. // Read __scrape_interval__ and __scrape_timeout__ from labels.
scrapeInterval := swc.scrapeInterval scrapeInterval := swc.scrapeInterval
if s := promrelabel.GetLabelValueByName(labels, "__scrape_interval__"); len(s) > 0 { if s := promrelabel.GetLabelValueByName(labels, "__scrape_interval__"); len(s) > 0 {
d, err := time.ParseDuration(s) d, err := promutils.ParseDuration(s)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse __scrape_interval__=%q: %w", s, err) return nil, fmt.Errorf("cannot parse __scrape_interval__=%q: %w", s, err)
} }
@ -1065,7 +1066,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
} }
scrapeTimeout := swc.scrapeTimeout scrapeTimeout := swc.scrapeTimeout
if s := promrelabel.GetLabelValueByName(labels, "__scrape_timeout__"); len(s) > 0 { if s := promrelabel.GetLabelValueByName(labels, "__scrape_timeout__"); len(s) > 0 {
d, err := time.ParseDuration(s) d, err := promutils.ParseDuration(s)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse __scrape_timeout__=%q: %w", s, err) return nil, fmt.Errorf("cannot parse __scrape_timeout__=%q: %w", s, err)
} }

View file

@ -1648,6 +1648,59 @@ scrape_configs:
ProxyAuthConfig: &promauth.Config{}, ProxyAuthConfig: &promauth.Config{},
}, },
}) })
f(`
global:
scrape_timeout: 1d
scrape_configs:
- job_name: foo
scrape_interval: 1w
scrape_align_interval: 1d
scrape_offset: 2d
static_configs:
- targets: ["foo.bar:1234"]
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: time.Hour * 24 * 7,
ScrapeTimeout: time.Hour * 24,
ScrapeAlignInterval: time.Hour * 24,
ScrapeOffset: time.Hour * 24 * 2,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
Value: "foo.bar:1234",
},
{
Name: "__metrics_path__",
Value: "/metrics",
},
{
Name: "__scheme__",
Value: "http",
},
{
Name: "__scrape_interval__",
Value: "168h0m0s",
},
{
Name: "__scrape_timeout__",
Value: "24h0m0s",
},
{
Name: "instance",
Value: "foo.bar:1234",
},
{
Name: "job",
Value: "foo",
},
},
AuthConfig: &promauth.Config{},
ProxyAuthConfig: &promauth.Config{},
jobNameOriginal: "foo",
},
})
} }
func equalStaticConfigForScrapeWorks(a, b []*ScrapeWork) bool { func equalStaticConfigForScrapeWorks(a, b []*ScrapeWork) bool {

52
lib/promutils/duration.go Normal file
View file

@ -0,0 +1,52 @@
package promutils
import (
"time"
"github.com/VictoriaMetrics/metricsql"
)
// Duration is duration, which must be used in Prometheus-compatible yaml configs.
type Duration struct {
d time.Duration
}
// NewDuration returns Duration for given d.
func NewDuration(d time.Duration) Duration {
return Duration{
d: d,
}
}
// MarshalYAML implements yaml.Marshaler interface.
func (pd Duration) MarshalYAML() (interface{}, error) {
return pd.d.String(), nil
}
// UnmarshalYAML implements yaml.Unmarshaler interface.
func (pd *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
ms, err := metricsql.DurationValue(s, 0)
if err != nil {
return err
}
pd.d = time.Duration(ms) * time.Millisecond
return nil
}
// Duration returns duration for pd.
func (pd Duration) Duration() time.Duration {
return pd.d
}
// ParseDuration parses duration string in Prometheus format
func ParseDuration(s string) (time.Duration, error) {
ms, err := metricsql.DurationValue(s, 0)
if err != nil {
return 0, err
}
return time.Duration(ms) * time.Millisecond, nil
}

View file

@ -0,0 +1,42 @@
package promutils
import (
"testing"
"time"
)
func TestDuration(t *testing.T) {
if _, err := ParseDuration("foobar"); err == nil {
t.Fatalf("expecting error for invalid duration")
}
dNative, err := ParseDuration("1w")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if dNative != 7*24*time.Hour {
t.Fatalf("unexpected duration; got %s; want %s", dNative, 7*24*time.Hour)
}
d := NewDuration(dNative)
if d.Duration() != dNative {
t.Fatalf("unexpected duration; got %s; want %s", d.Duration(), dNative)
}
v, err := d.MarshalYAML()
if err != nil {
t.Fatalf("unexpected error in MarshalYAML(): %s", err)
}
sExpected := "168h0m0s"
if s := v.(string); s != sExpected {
t.Fatalf("unexpected value from MarshalYAML(); got %q; want %q", s, sExpected)
}
if err := d.UnmarshalYAML(func(v interface{}) error {
sp := v.(*string)
s := "1w3d5h"
*sp = s
return nil
}); err != nil {
t.Fatalf("unexpected error in UnmarshalYAML(): %s", err)
}
if dNative := d.Duration(); dNative != (10*24+5)*time.Hour {
t.Fatalf("unexpected value; got %s; want %s", dNative, (10*24+5)*time.Hour)
}
}

View file

@ -11,7 +11,7 @@ import (
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/metricsql" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
) )
// partHeader represents part header. // partHeader represents part header.
@ -140,11 +140,11 @@ func (ph *partHeader) readMinDedupInterval(partPath string) error {
} }
return fmt.Errorf("cannot read %q: %w", filePath, err) return fmt.Errorf("cannot read %q: %w", filePath, err)
} }
dedupInterval, err := metricsql.DurationValue(string(data), 0) dedupInterval, err := promutils.ParseDuration(string(data))
if err != nil { if err != nil {
return fmt.Errorf("cannot parse minimum dedup interval %q at %q: %w", data, filePath, err) return fmt.Errorf("cannot parse minimum dedup interval %q at %q: %w", data, filePath, err)
} }
ph.MinDedupInterval = dedupInterval ph.MinDedupInterval = dedupInterval.Milliseconds()
return nil return nil
} }