mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/promscrape: read response body into memory in stream parsing mode before parsing it
This reduces scrape duration for targets returning big responses. The response body was already read into memory in stream parsing mode before this change, so this commit shouldn't increase memory usage.
This commit is contained in:
parent
ccad651a61
commit
74c00a8762
2 changed files with 50 additions and 33 deletions
|
@ -23,6 +23,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add `vm-native-step-interval` command line flag for `vm-native` mode. New option allows splitting the import process into chunks by time interval. This helps migrating data sets with high churn rate and provides better control over the process. See [feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2733).
|
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add `vm-native-step-interval` command line flag for `vm-native` mode. New option allows splitting the import process into chunks by time interval. This helps migrating data sets with high churn rate and provides better control over the process. See [feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2733).
|
||||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add `top queries` tab, which shows various stats for recently executed queries. See [these docs](https://docs.victoriametrics.com/#top-queries) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2707).
|
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add `top queries` tab, which shows various stats for recently executed queries. See [these docs](https://docs.victoriametrics.com/#top-queries) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2707).
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `debug` mode to the alerting rule settings for printing additional information into logs during evaluation. See `debug` param in [alerting rule config](https://docs.victoriametrics.com/vmalert.html#alerting-rules).
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `debug` mode to the alerting rule settings for printing additional information into logs during evaluation. See `debug` param in [alerting rule config](https://docs.victoriametrics.com/vmalert.html#alerting-rules).
|
||||||
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): minimize the time needed for reading large responses from scrape targets in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). This should reduce scrape durations for such targets as [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) running in a big Kubernetes cluster.
|
||||||
|
|
||||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `rate_over_sum(m[d])` as `sum_over_time(m[d])/d`. Previously the `sum_over_time(m[d])` could be improperly divided by smaller than `d` time range. See [rate_over_sum() docs](https://docs.victoriametrics.com/MetricsQL.html#rate_over_sum) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3045).
|
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `rate_over_sum(m[d])` as `sum_over_time(m[d])/d`. Previously the `sum_over_time(m[d])` could be improperly divided by smaller than `d` time range. See [rate_over_sum() docs](https://docs.victoriametrics.com/MetricsQL.html#rate_over_sum) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3045).
|
||||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): properly calculate query results at `vmselect`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3067). The issue has been introduced in [v1.81.0](https://docs.victoriametrics.com/CHANGELOG.html#v1810).
|
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): properly calculate query results at `vmselect`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3067). The issue has been introduced in [v1.81.0](https://docs.victoriametrics.com/CHANGELOG.html#v1810).
|
||||||
|
|
|
@ -516,19 +516,35 @@ func (sw *scrapeWork) pushData(at *auth.Token, wr *prompbmarshal.WriteRequest) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type streamBodyReader struct {
|
type streamBodyReader struct {
|
||||||
sr *streamReader
|
|
||||||
body []byte
|
body []byte
|
||||||
bodyLen int
|
bodyLen int
|
||||||
captureBody bool
|
readOffset int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sbr *streamBodyReader) Init(sr *streamReader) error {
|
||||||
|
sbr.body = nil
|
||||||
|
sbr.bodyLen = 0
|
||||||
|
sbr.readOffset = 0
|
||||||
|
// Read the whole response body in memory before parsing it in stream mode.
|
||||||
|
// This minimizes the time needed for reading response body from scrape target.
|
||||||
|
startTime := fasttime.UnixTimestamp()
|
||||||
|
body, err := io.ReadAll(sr)
|
||||||
|
if err != nil {
|
||||||
|
d := fasttime.UnixTimestamp() - startTime
|
||||||
|
return fmt.Errorf("cannot read stream body in %d seconds: %w", d, err)
|
||||||
|
}
|
||||||
|
sbr.body = body
|
||||||
|
sbr.bodyLen = len(body)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sbr *streamBodyReader) Read(b []byte) (int, error) {
|
func (sbr *streamBodyReader) Read(b []byte) (int, error) {
|
||||||
n, err := sbr.sr.Read(b)
|
if sbr.readOffset >= len(sbr.body) {
|
||||||
sbr.bodyLen += n
|
return 0, io.EOF
|
||||||
if sbr.captureBody {
|
|
||||||
sbr.body = append(sbr.body, b[:n]...)
|
|
||||||
}
|
}
|
||||||
return n, err
|
n := copy(b, sbr.body[sbr.readOffset:])
|
||||||
|
sbr.readOffset += n
|
||||||
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
||||||
|
@ -536,17 +552,16 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
||||||
samplesPostRelabeling := 0
|
samplesPostRelabeling := 0
|
||||||
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
|
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
|
||||||
// Do not pool sbr and do not pre-allocate sbr.body in order to reduce memory usage when scraping big responses.
|
// Do not pool sbr and do not pre-allocate sbr.body in order to reduce memory usage when scraping big responses.
|
||||||
sbr := &streamBodyReader{
|
var sbr streamBodyReader
|
||||||
captureBody: !*noStaleMarkers,
|
|
||||||
}
|
|
||||||
|
|
||||||
sr, err := sw.GetStreamReader()
|
sr, err := sw.GetStreamReader()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("cannot read data: %s", err)
|
err = fmt.Errorf("cannot read data: %s", err)
|
||||||
} else {
|
} else {
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
sbr.sr = sr
|
err = sbr.Init(sr)
|
||||||
err = parser.ParseStream(sbr, scrapeTimestamp, false, func(rows []parser.Row) error {
|
if err != nil {
|
||||||
|
err = parser.ParseStream(&sbr, scrapeTimestamp, false, func(rows []parser.Row) error {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
samplesScraped += len(rows)
|
samplesScraped += len(rows)
|
||||||
|
@ -567,6 +582,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
||||||
wc.resetNoRows()
|
wc.resetNoRows()
|
||||||
return nil
|
return nil
|
||||||
}, sw.logError)
|
}, sw.logError)
|
||||||
|
}
|
||||||
sr.MustClose()
|
sr.MustClose()
|
||||||
}
|
}
|
||||||
lastScrape := sw.loadLastScrape()
|
lastScrape := sw.loadLastScrape()
|
||||||
|
|
Loading…
Reference in a new issue