mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-20 15:16:42 +00:00
lib/promscrape: prevent from too big deadline misses on scrape retries
The maximum deadline miss duration is reduced to 2x scrape_interval in the worst case. By default it is limited to scrape_interval configured for the given scrape target.
This commit is contained in:
parent
44a54b8b3d
commit
8b133e40d5
1 changed files with 9 additions and 3 deletions
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/fasthttp"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -89,7 +90,7 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
|||
req.Header.Set("Authorization", c.authHeader)
|
||||
}
|
||||
resp := fasthttp.AcquireResponse()
|
||||
err := doRequestWithPossibleRetry(c.hc, req, resp)
|
||||
err := doRequestWithPossibleRetry(c.hc, req, resp, c.hc.ReadTimeout)
|
||||
statusCode := resp.StatusCode()
|
||||
if err == nil && (statusCode == fasthttp.StatusMovedPermanently || statusCode == fasthttp.StatusFound) {
|
||||
// Allow a single redirect.
|
||||
|
@ -144,7 +145,9 @@ var (
|
|||
scrapesGunzipFailed = metrics.NewCounter(`vm_promscrape_scrapes_gunzip_failed_total`)
|
||||
)
|
||||
|
||||
func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response) error {
|
||||
func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, timeout time.Duration) error {
|
||||
// Round deadline to the smallest value in order to protect from too big deadline misses on retry.
|
||||
deadline := fasttime.UnixTimestamp() + uint64(timeout.Seconds()) - 1
|
||||
attempts := 0
|
||||
again:
|
||||
// There is no need in calling DoTimeout, since the timeout must be already set in hc.ReadTimeout.
|
||||
|
@ -155,7 +158,10 @@ again:
|
|||
if err != fasthttp.ErrConnectionClosed {
|
||||
return err
|
||||
}
|
||||
// Retry request if the server closed the keep-alive connection during the first attempt.
|
||||
// Retry request if the server closes the keep-alive connection unless deadline exceeds.
|
||||
if fasttime.UnixTimestamp() > deadline {
|
||||
return fasthttp.ErrTimeout
|
||||
}
|
||||
attempts++
|
||||
if attempts > 3 {
|
||||
return fmt.Errorf("the server closed 3 subsequent connections: %w", err)
|
||||
|
|
Loading…
Reference in a new issue