lib/promscrape: reduce timestamp jitter when scraping targets

This should improve compression for timestamps
This commit is contained in:
Aliaksandr Valialkin 2020-04-01 16:10:35 +03:00
parent 4c56acbafa
commit c189104be7

View file

@ -88,31 +88,32 @@ type scrapeWork struct {
func (sw *scrapeWork) run(stopCh <-chan struct{}) { func (sw *scrapeWork) run(stopCh <-chan struct{}) {
// Randomize start time for the first scrape in order to spread load // Randomize start time for the first scrape in order to spread load
// when scraping many targets. // when scraping many targets.
randSleep := time.Duration(float64(sw.Config.ScrapeInterval) * rand.Float64()) scrapeInterval := sw.Config.ScrapeInterval
randSleep := time.Duration(float64(scrapeInterval) * rand.Float64())
timer := time.NewTimer(randSleep) timer := time.NewTimer(randSleep)
var timestamp int64
var ticker *time.Ticker var ticker *time.Ticker
select { select {
case <-stopCh: case <-stopCh:
timer.Stop() timer.Stop()
return return
case t := <-timer.C: case <-timer.C:
ticker = time.NewTicker(sw.Config.ScrapeInterval) ticker = time.NewTicker(scrapeInterval)
timestamp := t.UnixNano() / 1e6 timestamp = time.Now().UnixNano() / 1e6
sw.scrapeAndLogError(timestamp) sw.scrapeAndLogError(timestamp)
} }
defer ticker.Stop() defer ticker.Stop()
for { for {
startTime := time.Now() timestamp += scrapeInterval.Milliseconds()
select { select {
case <-stopCh: case <-stopCh:
return return
case t := <-ticker.C: case <-ticker.C:
// Adjust t if it is from the past (i.e. stale tick) t := time.Now().UnixNano() / 1e6
// This can be the case if the previous scrape took longer than the scrape interval. if d := t - timestamp; d > 0 && float64(d)/float64(scrapeInterval.Milliseconds()) > 0.1 {
if t.Sub(startTime) < 0 { // Too big jitter. Adjust timestamp
t = startTime timestamp = t
} }
timestamp := t.UnixNano() / 1e6
sw.scrapeAndLogError(timestamp) sw.scrapeAndLogError(timestamp)
} }
} }