mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/promscrape: exponentially increase retry interval on unsuccesful requests to scrape targets or to service discovery services
This should reduce CPU load at vmagent and at remote side when the remote side doesn't accept HTTP requests. Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1289
This commit is contained in:
parent
66c6976723
commit
d13906bf1f
2 changed files with 16 additions and 2 deletions
|
@ -299,6 +299,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error {
|
func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error {
|
||||||
|
sleepTime := time.Second
|
||||||
for {
|
for {
|
||||||
// Use DoDeadline instead of Do even if hc.ReadTimeout is already set in order to guarantee the given deadline
|
// Use DoDeadline instead of Do even if hc.ReadTimeout is already set in order to guarantee the given deadline
|
||||||
// across multiple retries.
|
// across multiple retries.
|
||||||
|
@ -310,9 +311,15 @@ func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request,
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Retry request if the server closes the keep-alive connection unless deadline exceeds.
|
// Retry request if the server closes the keep-alive connection unless deadline exceeds.
|
||||||
if time.Since(deadline) >= 0 {
|
maxSleepTime := time.Until(deadline)
|
||||||
|
if sleepTime > maxSleepTime {
|
||||||
return fmt.Errorf("the server closes all the connection attempts: %w", err)
|
return fmt.Errorf("the server closes all the connection attempts: %w", err)
|
||||||
}
|
}
|
||||||
|
sleepTime += sleepTime
|
||||||
|
if sleepTime > maxSleepTime {
|
||||||
|
maxSleepTime = maxSleepTime
|
||||||
|
}
|
||||||
|
time.Sleep(sleepTime)
|
||||||
scrapeRetries.Inc()
|
scrapeRetries.Inc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -222,6 +222,7 @@ func (c *Client) getAPIResponseWithParamsAndClient(client *fasthttp.HostClient,
|
||||||
}
|
}
|
||||||
|
|
||||||
func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error {
|
func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error {
|
||||||
|
sleepTime := time.Second
|
||||||
discoveryRequests.Inc()
|
discoveryRequests.Inc()
|
||||||
for {
|
for {
|
||||||
// Use DoDeadline instead of Do even if hc.ReadTimeout is already set in order to guarantee the given deadline
|
// Use DoDeadline instead of Do even if hc.ReadTimeout is already set in order to guarantee the given deadline
|
||||||
|
@ -234,9 +235,15 @@ func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request,
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Retry request if the server closes the keep-alive connection unless deadline exceeds.
|
// Retry request if the server closes the keep-alive connection unless deadline exceeds.
|
||||||
if time.Since(deadline) >= 0 {
|
maxSleepTime := time.Until(deadline)
|
||||||
|
if sleepTime > maxSleepTime {
|
||||||
return fmt.Errorf("the server closes all the connection attempts: %w", err)
|
return fmt.Errorf("the server closes all the connection attempts: %w", err)
|
||||||
}
|
}
|
||||||
|
sleepTime += sleepTime
|
||||||
|
if sleepTime > maxSleepTime {
|
||||||
|
sleepTime = maxSleepTime
|
||||||
|
}
|
||||||
|
time.Sleep(sleepTime)
|
||||||
discoveryRetries.Inc()
|
discoveryRetries.Inc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue