mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
all: add initial support for query tracing
See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1403
This commit is contained in:
parent
d2567ccdd6
commit
41958ed5dd
41 changed files with 2068 additions and 925 deletions
63
README.md
63
README.md
|
@ -1365,6 +1365,69 @@ VictoriaMetrics returns TSDB stats at `/api/v1/status/tsdb` page in the way simi
|
||||||
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
|
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
|
||||||
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.
|
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.
|
||||||
|
|
||||||
|
## Query tracing
|
||||||
|
|
||||||
|
VictoriaMetrics supports query tracing, which can be used for determining bottlenecks during query processing.
|
||||||
|
|
||||||
|
Query tracing can be enabled for a specific query by passing `trace=1` query arg.
|
||||||
|
In this case VictoriaMetrics puts query trace into `trace` field in the output JSON.
|
||||||
|
|
||||||
|
For example, the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://localhost:8428/api/v1/query_range -d 'query=2*rand()' -d 'start=-1h' -d 'step=1m' -d 'trace=1' | jq -r '.trace'
|
||||||
|
```
|
||||||
|
|
||||||
|
would return the following trace:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"duration_msec": 0.099,
|
||||||
|
"message": "/api/v1/query_range: start=1654034340000, end=1654037880000, step=60000, query=\"2*rand()\": series=1",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.034,
|
||||||
|
"message": "eval: query=2 * rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.032,
|
||||||
|
"message": "binary op \"*\": series=1",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.009,
|
||||||
|
"message": "eval: query=2, timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.017,
|
||||||
|
"message": "eval: query=rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.015,
|
||||||
|
"message": "transform rand(): series=1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.004,
|
||||||
|
"message": "sort series by metric name and labels"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.044,
|
||||||
|
"message": "generate /api/v1/query_range response for series=1, points=60"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All the durations and timestamps in traces are in milliseconds.
|
||||||
|
|
||||||
|
Query tracing is allowed by default. It can be denied by passing `-denyQueryTracing` command-line flag to VictoriaMetrics.
|
||||||
|
|
||||||
|
|
||||||
## Cardinality limiter
|
## Cardinality limiter
|
||||||
|
|
||||||
By default VictoriaMetrics doesn't limit the number of stored time series. The limit can be enforced by setting the following command-line flags:
|
By default VictoriaMetrics doesn't limit the number of stored time series. The limit can be enforced by setting the following command-line flags:
|
||||||
|
|
|
@ -525,7 +525,7 @@ There are following non-required `replay` flags:
|
||||||
(rules which depend on each other) rules. It is expected, that remote storage will be able to persist
|
(rules which depend on each other) rules. It is expected, that remote storage will be able to persist
|
||||||
previously accepted data during the delay, so data will be available for the subsequent queries.
|
previously accepted data during the delay, so data will be available for the subsequent queries.
|
||||||
Keep it equal or bigger than `-remoteWrite.flushInterval`.
|
Keep it equal or bigger than `-remoteWrite.flushInterval`.
|
||||||
* `replay.disableProgressBar` - whether to disable progress bar which shows progress work.
|
* `-replay.disableProgressBar` - whether to disable progress bar which shows progress work.
|
||||||
Progress bar may generate a lot of log records, which is not formatted as standard VictoriaMetrics logger.
|
Progress bar may generate a lot of log records, which is not formatted as standard VictoriaMetrics logger.
|
||||||
It could break logs parsing by external system and generate additional load on it.
|
It could break logs parsing by external system and generate additional load on it.
|
||||||
|
|
||||||
|
|
|
@ -206,7 +206,7 @@ func MetricsIndexHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
|
||||||
return fmt.Errorf("cannot parse form values: %w", err)
|
return fmt.Errorf("cannot parse form values: %w", err)
|
||||||
}
|
}
|
||||||
jsonp := r.FormValue("jsonp")
|
jsonp := r.FormValue("jsonp")
|
||||||
metricNames, err := netstorage.GetLabelValues("__name__", deadline)
|
metricNames, err := netstorage.GetLabelValues(nil, "__name__", deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain metric names: %w`, err)
|
return fmt.Errorf(`cannot obtain metric names: %w`, err)
|
||||||
}
|
}
|
||||||
|
@ -227,7 +227,7 @@ func metricsFind(tr storage.TimeRange, label, qHead, qTail string, delimiter byt
|
||||||
n := strings.IndexAny(qTail, "*{[")
|
n := strings.IndexAny(qTail, "*{[")
|
||||||
if n < 0 {
|
if n < 0 {
|
||||||
query := qHead + qTail
|
query := qHead + qTail
|
||||||
suffixes, err := netstorage.GetTagValueSuffixes(tr, label, query, delimiter, deadline)
|
suffixes, err := netstorage.GetTagValueSuffixes(nil, tr, label, query, delimiter, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -247,7 +247,7 @@ func metricsFind(tr storage.TimeRange, label, qHead, qTail string, delimiter byt
|
||||||
}
|
}
|
||||||
if n == len(qTail)-1 && strings.HasSuffix(qTail, "*") {
|
if n == len(qTail)-1 && strings.HasSuffix(qTail, "*") {
|
||||||
query := qHead + qTail[:len(qTail)-1]
|
query := qHead + qTail[:len(qTail)-1]
|
||||||
suffixes, err := netstorage.GetTagValueSuffixes(tr, label, query, delimiter, deadline)
|
suffixes, err := netstorage.GetTagValueSuffixes(nil, tr, label, query, delimiter, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,7 @@ func TagsDelSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
||||||
}
|
}
|
||||||
tfss := joinTagFilterss(tfs, etfs)
|
tfss := joinTagFilterss(tfs, etfs)
|
||||||
sq := storage.NewSearchQuery(0, ct, tfss, 0)
|
sq := storage.NewSearchQuery(0, ct, tfss, 0)
|
||||||
n, err := netstorage.DeleteSeries(sq, deadline)
|
n, err := netstorage.DeleteSeries(nil, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot delete series for %q: %w", sq, err)
|
return fmt.Errorf("cannot delete series for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, w http.ResponseWriter, r
|
||||||
// Escape special chars in tagPrefix as Graphite does.
|
// Escape special chars in tagPrefix as Graphite does.
|
||||||
// See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L228
|
// See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L228
|
||||||
filter := regexp.QuoteMeta(valuePrefix)
|
filter := regexp.QuoteMeta(valuePrefix)
|
||||||
tagValues, err = netstorage.GetGraphiteTagValues(tag, filter, limit, deadline)
|
tagValues, err = netstorage.GetGraphiteTagValues(nil, tag, filter, limit, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, w http.ResponseWriter, r
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
mns, err := netstorage.SearchMetricNames(sq, deadline)
|
mns, err := netstorage.SearchMetricNames(nil, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -276,7 +276,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, w http.ResponseWriter, r *
|
||||||
// Escape special chars in tagPrefix as Graphite does.
|
// Escape special chars in tagPrefix as Graphite does.
|
||||||
// See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L181
|
// See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L181
|
||||||
filter := regexp.QuoteMeta(tagPrefix)
|
filter := regexp.QuoteMeta(tagPrefix)
|
||||||
labels, err = netstorage.GetGraphiteTags(filter, limit, deadline)
|
labels, err = netstorage.GetGraphiteTags(nil, filter, limit, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -286,7 +286,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, w http.ResponseWriter, r *
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
mns, err := netstorage.SearchMetricNames(sq, deadline)
|
mns, err := netstorage.SearchMetricNames(nil, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -353,7 +353,7 @@ func TagsFindSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.R
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
mns, err := netstorage.SearchMetricNames(sq, deadline)
|
mns, err := netstorage.SearchMetricNames(nil, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -413,7 +413,7 @@ func TagValuesHandler(startTime time.Time, tagName string, w http.ResponseWriter
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
filter := r.FormValue("filter")
|
filter := r.FormValue("filter")
|
||||||
tagValues, err := netstorage.GetGraphiteTagValues(tagName, filter, limit, deadline)
|
tagValues, err := netstorage.GetGraphiteTagValues(nil, tagName, filter, limit, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -444,7 +444,7 @@ func TagsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) er
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
filter := r.FormValue("filter")
|
filter := r.FormValue("filter")
|
||||||
labels, err := netstorage.GetGraphiteTags(filter, limit, deadline)
|
labels, err := netstorage.GetGraphiteTags(nil, filter, limit, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
)
|
)
|
||||||
|
@ -97,6 +98,8 @@ var (
|
||||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
defer requestDuration.UpdateDuration(startTime)
|
defer requestDuration.UpdateDuration(startTime)
|
||||||
|
tracerEnabled := searchutils.GetBool(r, "trace")
|
||||||
|
qt := querytracer.New(tracerEnabled)
|
||||||
|
|
||||||
// Limit the number of concurrent queries.
|
// Limit the number of concurrent queries.
|
||||||
select {
|
select {
|
||||||
|
@ -112,6 +115,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
t := timerpool.Get(d)
|
t := timerpool.Get(d)
|
||||||
select {
|
select {
|
||||||
case concurrencyCh <- struct{}{}:
|
case concurrencyCh <- struct{}{}:
|
||||||
|
qt.Printf("wait in queue because -search.maxConcurrentRequests=%d concurrent requests are executed", *maxConcurrentRequests)
|
||||||
timerpool.Put(t)
|
timerpool.Put(t)
|
||||||
defer func() { <-concurrencyCh }()
|
defer func() { <-concurrencyCh }()
|
||||||
case <-t.C:
|
case <-t.C:
|
||||||
|
@ -194,7 +198,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
labelValuesRequests.Inc()
|
labelValuesRequests.Inc()
|
||||||
labelName := s[:len(s)-len("/values")]
|
labelName := s[:len(s)-len("/values")]
|
||||||
httpserver.EnableCORS(w, r)
|
httpserver.EnableCORS(w, r)
|
||||||
if err := prometheus.LabelValuesHandler(startTime, labelName, w, r); err != nil {
|
if err := prometheus.LabelValuesHandler(qt, startTime, labelName, w, r); err != nil {
|
||||||
labelValuesErrors.Inc()
|
labelValuesErrors.Inc()
|
||||||
sendPrometheusError(w, r, err)
|
sendPrometheusError(w, r, err)
|
||||||
return true
|
return true
|
||||||
|
@ -222,7 +226,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
case "/api/v1/query":
|
case "/api/v1/query":
|
||||||
queryRequests.Inc()
|
queryRequests.Inc()
|
||||||
httpserver.EnableCORS(w, r)
|
httpserver.EnableCORS(w, r)
|
||||||
if err := prometheus.QueryHandler(startTime, w, r); err != nil {
|
if err := prometheus.QueryHandler(qt, startTime, w, r); err != nil {
|
||||||
queryErrors.Inc()
|
queryErrors.Inc()
|
||||||
sendPrometheusError(w, r, err)
|
sendPrometheusError(w, r, err)
|
||||||
return true
|
return true
|
||||||
|
@ -231,7 +235,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
case "/api/v1/query_range":
|
case "/api/v1/query_range":
|
||||||
queryRangeRequests.Inc()
|
queryRangeRequests.Inc()
|
||||||
httpserver.EnableCORS(w, r)
|
httpserver.EnableCORS(w, r)
|
||||||
if err := prometheus.QueryRangeHandler(startTime, w, r); err != nil {
|
if err := prometheus.QueryRangeHandler(qt, startTime, w, r); err != nil {
|
||||||
queryRangeErrors.Inc()
|
queryRangeErrors.Inc()
|
||||||
sendPrometheusError(w, r, err)
|
sendPrometheusError(w, r, err)
|
||||||
return true
|
return true
|
||||||
|
@ -240,7 +244,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
case "/api/v1/series":
|
case "/api/v1/series":
|
||||||
seriesRequests.Inc()
|
seriesRequests.Inc()
|
||||||
httpserver.EnableCORS(w, r)
|
httpserver.EnableCORS(w, r)
|
||||||
if err := prometheus.SeriesHandler(startTime, w, r); err != nil {
|
if err := prometheus.SeriesHandler(qt, startTime, w, r); err != nil {
|
||||||
seriesErrors.Inc()
|
seriesErrors.Inc()
|
||||||
sendPrometheusError(w, r, err)
|
sendPrometheusError(w, r, err)
|
||||||
return true
|
return true
|
||||||
|
@ -258,7 +262,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
case "/api/v1/labels":
|
case "/api/v1/labels":
|
||||||
labelsRequests.Inc()
|
labelsRequests.Inc()
|
||||||
httpserver.EnableCORS(w, r)
|
httpserver.EnableCORS(w, r)
|
||||||
if err := prometheus.LabelsHandler(startTime, w, r); err != nil {
|
if err := prometheus.LabelsHandler(qt, startTime, w, r); err != nil {
|
||||||
labelsErrors.Inc()
|
labelsErrors.Inc()
|
||||||
sendPrometheusError(w, r, err)
|
sendPrometheusError(w, r, err)
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
"github.com/valyala/fastrand"
|
"github.com/valyala/fastrand"
|
||||||
|
@ -192,7 +193,8 @@ var resultPool sync.Pool
|
||||||
// Data processing is immediately stopped if f returns non-nil error.
|
// Data processing is immediately stopped if f returns non-nil error.
|
||||||
//
|
//
|
||||||
// rss becomes unusable after the call to RunParallel.
|
// rss becomes unusable after the call to RunParallel.
|
||||||
func (rss *Results) RunParallel(f func(rs *Result, workerID uint) error) error {
|
func (rss *Results) RunParallel(qt *querytracer.Tracer, f func(rs *Result, workerID uint) error) error {
|
||||||
|
qt = qt.NewChild()
|
||||||
defer rss.mustClose()
|
defer rss.mustClose()
|
||||||
|
|
||||||
// Spin up local workers.
|
// Spin up local workers.
|
||||||
|
@ -254,6 +256,7 @@ func (rss *Results) RunParallel(f func(rs *Result, workerID uint) error) error {
|
||||||
close(workCh)
|
close(workCh)
|
||||||
}
|
}
|
||||||
workChsWG.Wait()
|
workChsWG.Wait()
|
||||||
|
qt.Donef("parallel process of fetched data: series=%d, samples=%d", seriesProcessedTotal, rowsProcessedTotal)
|
||||||
|
|
||||||
return firstErr
|
return firstErr
|
||||||
}
|
}
|
||||||
|
@ -594,7 +597,9 @@ func (sbh *sortBlocksHeap) Pop() interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteSeries deletes time series matching the given tagFilterss.
|
// DeleteSeries deletes time series matching the given tagFilterss.
|
||||||
func DeleteSeries(sq *storage.SearchQuery, deadline searchutils.Deadline) (int, error) {
|
func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline) (int, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("delete series: %s", sq)
|
||||||
tr := storage.TimeRange{
|
tr := storage.TimeRange{
|
||||||
MinTimestamp: sq.MinTimestamp,
|
MinTimestamp: sq.MinTimestamp,
|
||||||
MaxTimestamp: sq.MaxTimestamp,
|
MaxTimestamp: sq.MaxTimestamp,
|
||||||
|
@ -607,11 +612,14 @@ func DeleteSeries(sq *storage.SearchQuery, deadline searchutils.Deadline) (int,
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLabelsOnTimeRange returns labels for the given tr until the given deadline.
|
// GetLabelsOnTimeRange returns labels for the given tr until the given deadline.
|
||||||
func GetLabelsOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
func GetLabelsOnTimeRange(qt *querytracer.Tracer, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get labels on timeRange=%s", &tr)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
labels, err := vmstorage.SearchTagKeysOnTimeRange(tr, *maxTagKeysPerSearch, deadline.Deadline())
|
labels, err := vmstorage.SearchTagKeysOnTimeRange(tr, *maxTagKeysPerSearch, deadline.Deadline())
|
||||||
|
qt.Printf("get %d labels", len(labels))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during labels search on time range: %w", err)
|
return nil, fmt.Errorf("error during labels search on time range: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -623,15 +631,18 @@ func GetLabelsOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) (
|
||||||
}
|
}
|
||||||
// Sort labels like Prometheus does
|
// Sort labels like Prometheus does
|
||||||
sort.Strings(labels)
|
sort.Strings(labels)
|
||||||
|
qt.Printf("sort %d labels", len(labels))
|
||||||
return labels, nil
|
return labels, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetGraphiteTags returns Graphite tags until the given deadline.
|
// GetGraphiteTags returns Graphite tags until the given deadline.
|
||||||
func GetGraphiteTags(filter string, limit int, deadline searchutils.Deadline) ([]string, error) {
|
func GetGraphiteTags(qt *querytracer.Tracer, filter string, limit int, deadline searchutils.Deadline) ([]string, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get graphite tags: filter=%s, limit=%d", filter, limit)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
labels, err := GetLabels(deadline)
|
labels, err := GetLabels(nil, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -672,11 +683,14 @@ func hasString(a []string, s string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLabels returns labels until the given deadline.
|
// GetLabels returns labels until the given deadline.
|
||||||
func GetLabels(deadline searchutils.Deadline) ([]string, error) {
|
func GetLabels(qt *querytracer.Tracer, deadline searchutils.Deadline) ([]string, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get labels")
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
labels, err := vmstorage.SearchTagKeys(*maxTagKeysPerSearch, deadline.Deadline())
|
labels, err := vmstorage.SearchTagKeys(*maxTagKeysPerSearch, deadline.Deadline())
|
||||||
|
qt.Printf("get %d labels from global index", len(labels))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during labels search: %w", err)
|
return nil, fmt.Errorf("error during labels search: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -688,12 +702,15 @@ func GetLabels(deadline searchutils.Deadline) ([]string, error) {
|
||||||
}
|
}
|
||||||
// Sort labels like Prometheus does
|
// Sort labels like Prometheus does
|
||||||
sort.Strings(labels)
|
sort.Strings(labels)
|
||||||
|
qt.Printf("sort %d labels", len(labels))
|
||||||
return labels, nil
|
return labels, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLabelValuesOnTimeRange returns label values for the given labelName on the given tr
|
// GetLabelValuesOnTimeRange returns label values for the given labelName on the given tr
|
||||||
// until the given deadline.
|
// until the given deadline.
|
||||||
func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
func GetLabelValuesOnTimeRange(qt *querytracer.Tracer, labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get values for label %s on a timeRange %s", labelName, &tr)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -702,23 +719,27 @@ func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline
|
||||||
}
|
}
|
||||||
// Search for tag values
|
// Search for tag values
|
||||||
labelValues, err := vmstorage.SearchTagValuesOnTimeRange([]byte(labelName), tr, *maxTagValuesPerSearch, deadline.Deadline())
|
labelValues, err := vmstorage.SearchTagValuesOnTimeRange([]byte(labelName), tr, *maxTagValuesPerSearch, deadline.Deadline())
|
||||||
|
qt.Printf("get %d label values", len(labelValues))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during label values search on time range for labelName=%q: %w", labelName, err)
|
return nil, fmt.Errorf("error during label values search on time range for labelName=%q: %w", labelName, err)
|
||||||
}
|
}
|
||||||
// Sort labelValues like Prometheus does
|
// Sort labelValues like Prometheus does
|
||||||
sort.Strings(labelValues)
|
sort.Strings(labelValues)
|
||||||
|
qt.Printf("sort %d label values", len(labelValues))
|
||||||
return labelValues, nil
|
return labelValues, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetGraphiteTagValues returns tag values for the given tagName until the given deadline.
|
// GetGraphiteTagValues returns tag values for the given tagName until the given deadline.
|
||||||
func GetGraphiteTagValues(tagName, filter string, limit int, deadline searchutils.Deadline) ([]string, error) {
|
func GetGraphiteTagValues(qt *querytracer.Tracer, tagName, filter string, limit int, deadline searchutils.Deadline) ([]string, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get graphite tag values for tagName=%s, filter=%s, limit=%d", tagName, filter, limit)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
if tagName == "name" {
|
if tagName == "name" {
|
||||||
tagName = ""
|
tagName = ""
|
||||||
}
|
}
|
||||||
tagValues, err := GetLabelValues(tagName, deadline)
|
tagValues, err := GetLabelValues(nil, tagName, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -736,7 +757,9 @@ func GetGraphiteTagValues(tagName, filter string, limit int, deadline searchutil
|
||||||
|
|
||||||
// GetLabelValues returns label values for the given labelName
|
// GetLabelValues returns label values for the given labelName
|
||||||
// until the given deadline.
|
// until the given deadline.
|
||||||
func GetLabelValues(labelName string, deadline searchutils.Deadline) ([]string, error) {
|
func GetLabelValues(qt *querytracer.Tracer, labelName string, deadline searchutils.Deadline) ([]string, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get values for label %s", labelName)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -745,18 +768,22 @@ func GetLabelValues(labelName string, deadline searchutils.Deadline) ([]string,
|
||||||
}
|
}
|
||||||
// Search for tag values
|
// Search for tag values
|
||||||
labelValues, err := vmstorage.SearchTagValues([]byte(labelName), *maxTagValuesPerSearch, deadline.Deadline())
|
labelValues, err := vmstorage.SearchTagValues([]byte(labelName), *maxTagValuesPerSearch, deadline.Deadline())
|
||||||
|
qt.Printf("get %d label values", len(labelValues))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during label values search for labelName=%q: %w", labelName, err)
|
return nil, fmt.Errorf("error during label values search for labelName=%q: %w", labelName, err)
|
||||||
}
|
}
|
||||||
// Sort labelValues like Prometheus does
|
// Sort labelValues like Prometheus does
|
||||||
sort.Strings(labelValues)
|
sort.Strings(labelValues)
|
||||||
|
qt.Printf("sort %d label values", len(labelValues))
|
||||||
return labelValues, nil
|
return labelValues, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTagValueSuffixes returns tag value suffixes for the given tagKey and the given tagValuePrefix.
|
// GetTagValueSuffixes returns tag value suffixes for the given tagKey and the given tagValuePrefix.
|
||||||
//
|
//
|
||||||
// It can be used for implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find
|
// It can be used for implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find
|
||||||
func GetTagValueSuffixes(tr storage.TimeRange, tagKey, tagValuePrefix string, delimiter byte, deadline searchutils.Deadline) ([]string, error) {
|
func GetTagValueSuffixes(qt *querytracer.Tracer, tr storage.TimeRange, tagKey, tagValuePrefix string, delimiter byte, deadline searchutils.Deadline) ([]string, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get tag value suffixes for tagKey=%s, tagValuePrefix=%s, timeRange=%s", tagKey, tagValuePrefix, &tr)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -774,7 +801,9 @@ func GetTagValueSuffixes(tr storage.TimeRange, tagKey, tagValuePrefix string, de
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLabelEntries returns all the label entries until the given deadline.
|
// GetLabelEntries returns all the label entries until the given deadline.
|
||||||
func GetLabelEntries(deadline searchutils.Deadline) ([]storage.TagEntry, error) {
|
func GetLabelEntries(qt *querytracer.Tracer, deadline searchutils.Deadline) ([]storage.TagEntry, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get label entries")
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -782,6 +811,7 @@ func GetLabelEntries(deadline searchutils.Deadline) ([]storage.TagEntry, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during label entries request: %w", err)
|
return nil, fmt.Errorf("error during label entries request: %w", err)
|
||||||
}
|
}
|
||||||
|
qt.Printf("get %d label entries", len(labelEntries))
|
||||||
|
|
||||||
// Substitute "" with "__name__"
|
// Substitute "" with "__name__"
|
||||||
for i := range labelEntries {
|
for i := range labelEntries {
|
||||||
|
@ -799,12 +829,15 @@ func GetLabelEntries(deadline searchutils.Deadline) ([]storage.TagEntry, error)
|
||||||
}
|
}
|
||||||
return labelEntries[i].Key > labelEntries[j].Key
|
return labelEntries[i].Key > labelEntries[j].Key
|
||||||
})
|
})
|
||||||
|
qt.Printf("sort %d label entries", len(labelEntries))
|
||||||
|
|
||||||
return labelEntries, nil
|
return labelEntries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTSDBStatusForDate returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
|
// GetTSDBStatusForDate returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
|
||||||
func GetTSDBStatusForDate(deadline searchutils.Deadline, date uint64, topN, maxMetrics int) (*storage.TSDBStatus, error) {
|
func GetTSDBStatusForDate(qt *querytracer.Tracer, deadline searchutils.Deadline, date uint64, topN, maxMetrics int) (*storage.TSDBStatus, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get tsdb stats for date=%d, topN=%d", date, topN)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -818,7 +851,9 @@ func GetTSDBStatusForDate(deadline searchutils.Deadline, date uint64, topN, maxM
|
||||||
// GetTSDBStatusWithFilters returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
|
// GetTSDBStatusWithFilters returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
|
||||||
//
|
//
|
||||||
// It accepts aribtrary filters on time series in sq.
|
// It accepts aribtrary filters on time series in sq.
|
||||||
func GetTSDBStatusWithFilters(deadline searchutils.Deadline, sq *storage.SearchQuery, topN int) (*storage.TSDBStatus, error) {
|
func GetTSDBStatusWithFilters(qt *querytracer.Tracer, deadline searchutils.Deadline, sq *storage.SearchQuery, topN int) (*storage.TSDBStatus, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get tsdb stats: %s, topN=%d", sq, topN)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -839,7 +874,9 @@ func GetTSDBStatusWithFilters(deadline searchutils.Deadline, sq *storage.SearchQ
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSeriesCount returns the number of unique series.
|
// GetSeriesCount returns the number of unique series.
|
||||||
func GetSeriesCount(deadline searchutils.Deadline) (uint64, error) {
|
func GetSeriesCount(qt *querytracer.Tracer, deadline searchutils.Deadline) (uint64, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("get series count")
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return 0, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return 0, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -871,7 +908,9 @@ var ssPool sync.Pool
|
||||||
// Data processing is immediately stopped if f returns non-nil error.
|
// Data processing is immediately stopped if f returns non-nil error.
|
||||||
// It is the responsibility of f to call b.UnmarshalData before reading timestamps and values from the block.
|
// It is the responsibility of f to call b.UnmarshalData before reading timestamps and values from the block.
|
||||||
// It is the responsibility of f to filter blocks according to the given tr.
|
// It is the responsibility of f to filter blocks according to the given tr.
|
||||||
func ExportBlocks(sq *storage.SearchQuery, deadline searchutils.Deadline, f func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error) error {
|
func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline, f func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error) error {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("export blocks: %s", sq)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return fmt.Errorf("timeout exceeded before starting data export: %s", deadline.String())
|
return fmt.Errorf("timeout exceeded before starting data export: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -893,7 +932,7 @@ func ExportBlocks(sq *storage.SearchQuery, deadline searchutils.Deadline, f func
|
||||||
sr := getStorageSearch()
|
sr := getStorageSearch()
|
||||||
defer putStorageSearch(sr)
|
defer putStorageSearch(sr)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
sr.Init(vmstorage.Storage, tfss, tr, sq.MaxMetrics, deadline.Deadline())
|
sr.Init(qt, vmstorage.Storage, tfss, tr, sq.MaxMetrics, deadline.Deadline())
|
||||||
indexSearchDuration.UpdateDuration(startTime)
|
indexSearchDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
// Start workers that call f in parallel on available CPU cores.
|
// Start workers that call f in parallel on available CPU cores.
|
||||||
|
@ -926,6 +965,7 @@ func ExportBlocks(sq *storage.SearchQuery, deadline searchutils.Deadline, f func
|
||||||
|
|
||||||
// Feed workers with work
|
// Feed workers with work
|
||||||
blocksRead := 0
|
blocksRead := 0
|
||||||
|
samples := 0
|
||||||
for sr.NextMetricBlock() {
|
for sr.NextMetricBlock() {
|
||||||
blocksRead++
|
blocksRead++
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
|
@ -938,13 +978,16 @@ func ExportBlocks(sq *storage.SearchQuery, deadline searchutils.Deadline, f func
|
||||||
if err := xw.mn.Unmarshal(sr.MetricBlockRef.MetricName); err != nil {
|
if err := xw.mn.Unmarshal(sr.MetricBlockRef.MetricName); err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal metricName for block #%d: %w", blocksRead, err)
|
return fmt.Errorf("cannot unmarshal metricName for block #%d: %w", blocksRead, err)
|
||||||
}
|
}
|
||||||
sr.MetricBlockRef.BlockRef.MustReadBlock(&xw.b, true)
|
br := sr.MetricBlockRef.BlockRef
|
||||||
|
br.MustReadBlock(&xw.b, true)
|
||||||
|
samples += br.RowsCount()
|
||||||
workCh <- xw
|
workCh <- xw
|
||||||
}
|
}
|
||||||
close(workCh)
|
close(workCh)
|
||||||
|
|
||||||
// Wait for workers to finish.
|
// Wait for workers to finish.
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
qt.Printf("export blocks=%d, samples=%d", blocksRead, samples)
|
||||||
|
|
||||||
// Check errors.
|
// Check errors.
|
||||||
err = sr.Error()
|
err = sr.Error()
|
||||||
|
@ -977,7 +1020,9 @@ var exportWorkPool = &sync.Pool{
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchMetricNames returns all the metric names matching sq until the given deadline.
|
// SearchMetricNames returns all the metric names matching sq until the given deadline.
|
||||||
func SearchMetricNames(sq *storage.SearchQuery, deadline searchutils.Deadline) ([]storage.MetricName, error) {
|
func SearchMetricNames(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline) ([]storage.MetricName, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("fetch metric names: %s", sq)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting to search metric names: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting to search metric names: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -995,7 +1040,7 @@ func SearchMetricNames(sq *storage.SearchQuery, deadline searchutils.Deadline) (
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mns, err := vmstorage.SearchMetricNames(tfss, tr, sq.MaxMetrics, deadline.Deadline())
|
mns, err := vmstorage.SearchMetricNames(qt, tfss, tr, sq.MaxMetrics, deadline.Deadline())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot find metric names: %w", err)
|
return nil, fmt.Errorf("cannot find metric names: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1005,7 +1050,9 @@ func SearchMetricNames(sq *storage.SearchQuery, deadline searchutils.Deadline) (
|
||||||
// ProcessSearchQuery performs sq until the given deadline.
|
// ProcessSearchQuery performs sq until the given deadline.
|
||||||
//
|
//
|
||||||
// Results.RunParallel or Results.Cancel must be called on the returned Results.
|
// Results.RunParallel or Results.Cancel must be called on the returned Results.
|
||||||
func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline searchutils.Deadline) (*Results, error) {
|
func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, fetchData bool, deadline searchutils.Deadline) (*Results, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("fetch matching series: %s, fetchData=%v", sq, fetchData)
|
||||||
if deadline.Exceeded() {
|
if deadline.Exceeded() {
|
||||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||||
}
|
}
|
||||||
|
@ -1028,7 +1075,7 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline search
|
||||||
|
|
||||||
sr := getStorageSearch()
|
sr := getStorageSearch()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
maxSeriesCount := sr.Init(vmstorage.Storage, tfss, tr, sq.MaxMetrics, deadline.Deadline())
|
maxSeriesCount := sr.Init(qt, vmstorage.Storage, tfss, tr, sq.MaxMetrics, deadline.Deadline())
|
||||||
indexSearchDuration.UpdateDuration(startTime)
|
indexSearchDuration.UpdateDuration(startTime)
|
||||||
m := make(map[string][]blockRef, maxSeriesCount)
|
m := make(map[string][]blockRef, maxSeriesCount)
|
||||||
orderedMetricNames := make([]string, 0, maxSeriesCount)
|
orderedMetricNames := make([]string, 0, maxSeriesCount)
|
||||||
|
@ -1085,6 +1132,7 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline search
|
||||||
putStorageSearch(sr)
|
putStorageSearch(sr)
|
||||||
return nil, fmt.Errorf("cannot finalize temporary file: %w", err)
|
return nil, fmt.Errorf("cannot finalize temporary file: %w", err)
|
||||||
}
|
}
|
||||||
|
qt.Printf("fetch unique series=%d, blocks=%d, samples=%d, bytes=%d", len(m), blocksRead, samples, tbf.Len())
|
||||||
|
|
||||||
var rss Results
|
var rss Results
|
||||||
rss.tr = tr
|
rss.tr = tr
|
||||||
|
|
|
@ -124,6 +124,11 @@ func (tbf *tmpBlocksFile) WriteBlockRefData(b []byte) (tmpBlockAddr, error) {
|
||||||
return addr, nil
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Len() returnt tbf size in bytes.
|
||||||
|
func (tbf *tmpBlocksFile) Len() uint64 {
|
||||||
|
return tbf.offset
|
||||||
|
}
|
||||||
|
|
||||||
func (tbf *tmpBlocksFile) Finalize() error {
|
func (tbf *tmpBlocksFile) Finalize() error {
|
||||||
if tbf.f == nil {
|
if tbf.f == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/valyala/quicktemplate"
|
"github.com/valyala/quicktemplate"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
) %}
|
) %}
|
||||||
|
|
||||||
|
@ -125,8 +126,12 @@
|
||||||
}
|
}
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
|
|
||||||
{% func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
{% func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) %}
|
||||||
{
|
{
|
||||||
|
{% code
|
||||||
|
lines := 0
|
||||||
|
bytesTotal := 0
|
||||||
|
%}
|
||||||
"status":"success",
|
"status":"success",
|
||||||
"data":{
|
"data":{
|
||||||
"resultType":"matrix",
|
"resultType":"matrix",
|
||||||
|
@ -134,18 +139,30 @@
|
||||||
{% code bb, ok := <-resultsCh %}
|
{% code bb, ok := <-resultsCh %}
|
||||||
{% if ok %}
|
{% if ok %}
|
||||||
{%z= bb.B %}
|
{%z= bb.B %}
|
||||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
{% code
|
||||||
|
lines++
|
||||||
|
bytesTotal += len(bb.B)
|
||||||
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
%}
|
||||||
{% for bb := range resultsCh %}
|
{% for bb := range resultsCh %}
|
||||||
,{%z= bb.B %}
|
,{%z= bb.B %}
|
||||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
{% code
|
||||||
|
lines++
|
||||||
|
bytesTotal += len(bb.B)
|
||||||
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
%}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
{% code
|
||||||
|
qt.Donef("export format=promapi: lines=%d, bytes=%d", lines, bytesTotal)
|
||||||
|
%}
|
||||||
|
{%= dumpQueryTrace(qt) %}
|
||||||
}
|
}
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
|
|
||||||
{% func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
{% func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) %}
|
||||||
{% for bb := range resultsCh %}
|
{% for bb := range resultsCh %}
|
||||||
{%z= bb.B %}
|
{%z= bb.B %}
|
||||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||||
|
|
|
@ -11,570 +11,588 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/valyala/quicktemplate"
|
"github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:13
|
//line app/vmselect/prometheus/export.qtpl:14
|
||||||
import (
|
import (
|
||||||
qtio422016 "io"
|
qtio422016 "io"
|
||||||
|
|
||||||
qt422016 "github.com/valyala/quicktemplate"
|
qt422016 "github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:13
|
//line app/vmselect/prometheus/export.qtpl:14
|
||||||
var (
|
var (
|
||||||
_ = qtio422016.Copy
|
_ = qtio422016.Copy
|
||||||
_ = qt422016.AcquireByteBuffer
|
_ = qt422016.AcquireByteBuffer
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:13
|
//line app/vmselect/prometheus/export.qtpl:14
|
||||||
func StreamExportCSVLine(qw422016 *qt422016.Writer, xb *exportBlock, fieldNames []string) {
|
func StreamExportCSVLine(qw422016 *qt422016.Writer, xb *exportBlock, fieldNames []string) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:14
|
|
||||||
if len(xb.timestamps) == 0 || len(fieldNames) == 0 {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:14
|
|
||||||
return
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:14
|
|
||||||
}
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:15
|
//line app/vmselect/prometheus/export.qtpl:15
|
||||||
for i, timestamp := range xb.timestamps {
|
if len(xb.timestamps) == 0 || len(fieldNames) == 0 {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:15
|
||||||
|
return
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:15
|
||||||
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:16
|
//line app/vmselect/prometheus/export.qtpl:16
|
||||||
|
for i, timestamp := range xb.timestamps {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:17
|
||||||
value := xb.values[i]
|
value := xb.values[i]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:17
|
//line app/vmselect/prometheus/export.qtpl:18
|
||||||
streamexportCSVField(qw422016, xb.mn, fieldNames[0], timestamp, value)
|
streamexportCSVField(qw422016, xb.mn, fieldNames[0], timestamp, value)
|
||||||
//line app/vmselect/prometheus/export.qtpl:18
|
//line app/vmselect/prometheus/export.qtpl:19
|
||||||
for _, fieldName := range fieldNames[1:] {
|
for _, fieldName := range fieldNames[1:] {
|
||||||
//line app/vmselect/prometheus/export.qtpl:18
|
//line app/vmselect/prometheus/export.qtpl:19
|
||||||
qw422016.N().S(`,`)
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:20
|
|
||||||
streamexportCSVField(qw422016, xb.mn, fieldName, timestamp, value)
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:21
|
//line app/vmselect/prometheus/export.qtpl:21
|
||||||
}
|
streamexportCSVField(qw422016, xb.mn, fieldName, timestamp, value)
|
||||||
//line app/vmselect/prometheus/export.qtpl:22
|
//line app/vmselect/prometheus/export.qtpl:22
|
||||||
|
}
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:23
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
`)
|
`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:23
|
|
||||||
}
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:24
|
||||||
|
}
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
func WriteExportCSVLine(qq422016 qtio422016.Writer, xb *exportBlock, fieldNames []string) {
|
func WriteExportCSVLine(qq422016 qtio422016.Writer, xb *exportBlock, fieldNames []string) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
StreamExportCSVLine(qw422016, xb, fieldNames)
|
StreamExportCSVLine(qw422016, xb, fieldNames)
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
func ExportCSVLine(xb *exportBlock, fieldNames []string) string {
|
func ExportCSVLine(xb *exportBlock, fieldNames []string) string {
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
WriteExportCSVLine(qb422016, xb, fieldNames)
|
WriteExportCSVLine(qb422016, xb, fieldNames)
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/export.qtpl:24
|
//line app/vmselect/prometheus/export.qtpl:25
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:26
|
|
||||||
func streamexportCSVField(qw422016 *qt422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:27
|
//line app/vmselect/prometheus/export.qtpl:27
|
||||||
if fieldName == "__value__" {
|
func streamexportCSVField(qw422016 *qt422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:28
|
//line app/vmselect/prometheus/export.qtpl:28
|
||||||
qw422016.N().F(value)
|
if fieldName == "__value__" {
|
||||||
//line app/vmselect/prometheus/export.qtpl:29
|
//line app/vmselect/prometheus/export.qtpl:29
|
||||||
return
|
qw422016.N().F(value)
|
||||||
//line app/vmselect/prometheus/export.qtpl:30
|
//line app/vmselect/prometheus/export.qtpl:30
|
||||||
}
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:31
|
|
||||||
if fieldName == "__timestamp__" {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:32
|
|
||||||
qw422016.N().DL(timestamp)
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:33
|
|
||||||
return
|
return
|
||||||
//line app/vmselect/prometheus/export.qtpl:34
|
//line app/vmselect/prometheus/export.qtpl:31
|
||||||
}
|
}
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:32
|
||||||
|
if fieldName == "__timestamp__" {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:33
|
||||||
|
qw422016.N().DL(timestamp)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:34
|
||||||
|
return
|
||||||
//line app/vmselect/prometheus/export.qtpl:35
|
//line app/vmselect/prometheus/export.qtpl:35
|
||||||
if strings.HasPrefix(fieldName, "__timestamp__:") {
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:36
|
//line app/vmselect/prometheus/export.qtpl:36
|
||||||
|
if strings.HasPrefix(fieldName, "__timestamp__:") {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:37
|
||||||
timeFormat := fieldName[len("__timestamp__:"):]
|
timeFormat := fieldName[len("__timestamp__:"):]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:37
|
|
||||||
switch timeFormat {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:38
|
//line app/vmselect/prometheus/export.qtpl:38
|
||||||
case "unix_s":
|
switch timeFormat {
|
||||||
//line app/vmselect/prometheus/export.qtpl:39
|
//line app/vmselect/prometheus/export.qtpl:39
|
||||||
qw422016.N().DL(timestamp / 1000)
|
case "unix_s":
|
||||||
//line app/vmselect/prometheus/export.qtpl:40
|
//line app/vmselect/prometheus/export.qtpl:40
|
||||||
case "unix_ms":
|
qw422016.N().DL(timestamp / 1000)
|
||||||
//line app/vmselect/prometheus/export.qtpl:41
|
//line app/vmselect/prometheus/export.qtpl:41
|
||||||
qw422016.N().DL(timestamp)
|
case "unix_ms":
|
||||||
//line app/vmselect/prometheus/export.qtpl:42
|
//line app/vmselect/prometheus/export.qtpl:42
|
||||||
case "unix_ns":
|
qw422016.N().DL(timestamp)
|
||||||
//line app/vmselect/prometheus/export.qtpl:43
|
//line app/vmselect/prometheus/export.qtpl:43
|
||||||
qw422016.N().DL(timestamp * 1e6)
|
case "unix_ns":
|
||||||
//line app/vmselect/prometheus/export.qtpl:44
|
//line app/vmselect/prometheus/export.qtpl:44
|
||||||
|
qw422016.N().DL(timestamp * 1e6)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:45
|
||||||
case "rfc3339":
|
case "rfc3339":
|
||||||
//line app/vmselect/prometheus/export.qtpl:46
|
//line app/vmselect/prometheus/export.qtpl:47
|
||||||
bb := quicktemplate.AcquireByteBuffer()
|
bb := quicktemplate.AcquireByteBuffer()
|
||||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], time.RFC3339)
|
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], time.RFC3339)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:49
|
//line app/vmselect/prometheus/export.qtpl:50
|
||||||
qw422016.N().Z(bb.B)
|
qw422016.N().Z(bb.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:51
|
//line app/vmselect/prometheus/export.qtpl:52
|
||||||
quicktemplate.ReleaseByteBuffer(bb)
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:53
|
|
||||||
default:
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:54
|
//line app/vmselect/prometheus/export.qtpl:54
|
||||||
|
default:
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:55
|
||||||
if strings.HasPrefix(timeFormat, "custom:") {
|
if strings.HasPrefix(timeFormat, "custom:") {
|
||||||
//line app/vmselect/prometheus/export.qtpl:56
|
//line app/vmselect/prometheus/export.qtpl:57
|
||||||
layout := timeFormat[len("custom:"):]
|
layout := timeFormat[len("custom:"):]
|
||||||
bb := quicktemplate.AcquireByteBuffer()
|
bb := quicktemplate.AcquireByteBuffer()
|
||||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], layout)
|
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], layout)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:60
|
|
||||||
if bytes.ContainsAny(bb.B, `"`+",\n") {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:61
|
//line app/vmselect/prometheus/export.qtpl:61
|
||||||
qw422016.E().QZ(bb.B)
|
if bytes.ContainsAny(bb.B, `"`+",\n") {
|
||||||
//line app/vmselect/prometheus/export.qtpl:62
|
//line app/vmselect/prometheus/export.qtpl:62
|
||||||
} else {
|
qw422016.E().QZ(bb.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:63
|
//line app/vmselect/prometheus/export.qtpl:63
|
||||||
qw422016.N().Z(bb.B)
|
} else {
|
||||||
//line app/vmselect/prometheus/export.qtpl:64
|
//line app/vmselect/prometheus/export.qtpl:64
|
||||||
|
qw422016.N().Z(bb.B)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:65
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:66
|
//line app/vmselect/prometheus/export.qtpl:67
|
||||||
quicktemplate.ReleaseByteBuffer(bb)
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:68
|
|
||||||
} else {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:68
|
|
||||||
qw422016.N().S(`Unsupported timeFormat=`)
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:69
|
//line app/vmselect/prometheus/export.qtpl:69
|
||||||
qw422016.N().S(timeFormat)
|
} else {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:69
|
||||||
|
qw422016.N().S(`Unsupported timeFormat=`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:70
|
//line app/vmselect/prometheus/export.qtpl:70
|
||||||
}
|
qw422016.N().S(timeFormat)
|
||||||
//line app/vmselect/prometheus/export.qtpl:71
|
//line app/vmselect/prometheus/export.qtpl:71
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:72
|
//line app/vmselect/prometheus/export.qtpl:72
|
||||||
return
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:73
|
//line app/vmselect/prometheus/export.qtpl:73
|
||||||
}
|
return
|
||||||
//line app/vmselect/prometheus/export.qtpl:74
|
//line app/vmselect/prometheus/export.qtpl:74
|
||||||
|
}
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:75
|
||||||
v := mn.GetTagValue(fieldName)
|
v := mn.GetTagValue(fieldName)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:75
|
|
||||||
if bytes.ContainsAny(v, `"`+",\n") {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:76
|
//line app/vmselect/prometheus/export.qtpl:76
|
||||||
qw422016.N().QZ(v)
|
if bytes.ContainsAny(v, `"`+",\n") {
|
||||||
//line app/vmselect/prometheus/export.qtpl:77
|
//line app/vmselect/prometheus/export.qtpl:77
|
||||||
} else {
|
qw422016.N().QZ(v)
|
||||||
//line app/vmselect/prometheus/export.qtpl:78
|
//line app/vmselect/prometheus/export.qtpl:78
|
||||||
qw422016.N().Z(v)
|
} else {
|
||||||
//line app/vmselect/prometheus/export.qtpl:79
|
//line app/vmselect/prometheus/export.qtpl:79
|
||||||
}
|
qw422016.N().Z(v)
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:80
|
||||||
|
}
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
func writeexportCSVField(qq422016 qtio422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
func writeexportCSVField(qq422016 qtio422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
streamexportCSVField(qw422016, mn, fieldName, timestamp, value)
|
streamexportCSVField(qw422016, mn, fieldName, timestamp, value)
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
func exportCSVField(mn *storage.MetricName, fieldName string, timestamp int64, value float64) string {
|
func exportCSVField(mn *storage.MetricName, fieldName string, timestamp int64, value float64) string {
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
writeexportCSVField(qb422016, mn, fieldName, timestamp, value)
|
writeexportCSVField(qb422016, mn, fieldName, timestamp, value)
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/export.qtpl:80
|
//line app/vmselect/prometheus/export.qtpl:81
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:82
|
//line app/vmselect/prometheus/export.qtpl:83
|
||||||
func StreamExportPrometheusLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
func StreamExportPrometheusLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:83
|
|
||||||
if len(xb.timestamps) == 0 {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:83
|
|
||||||
return
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:83
|
|
||||||
}
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:84
|
//line app/vmselect/prometheus/export.qtpl:84
|
||||||
|
if len(xb.timestamps) == 0 {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:84
|
||||||
|
return
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:84
|
||||||
|
}
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:85
|
||||||
bb := quicktemplate.AcquireByteBuffer()
|
bb := quicktemplate.AcquireByteBuffer()
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:85
|
//line app/vmselect/prometheus/export.qtpl:86
|
||||||
writeprometheusMetricName(bb, xb.mn)
|
writeprometheusMetricName(bb, xb.mn)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:86
|
//line app/vmselect/prometheus/export.qtpl:87
|
||||||
for i, ts := range xb.timestamps {
|
for i, ts := range xb.timestamps {
|
||||||
//line app/vmselect/prometheus/export.qtpl:87
|
//line app/vmselect/prometheus/export.qtpl:88
|
||||||
qw422016.N().Z(bb.B)
|
qw422016.N().Z(bb.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:87
|
|
||||||
qw422016.N().S(` `)
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:88
|
//line app/vmselect/prometheus/export.qtpl:88
|
||||||
|
qw422016.N().S(` `)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:89
|
||||||
qw422016.N().F(xb.values[i])
|
qw422016.N().F(xb.values[i])
|
||||||
//line app/vmselect/prometheus/export.qtpl:88
|
//line app/vmselect/prometheus/export.qtpl:89
|
||||||
qw422016.N().S(` `)
|
qw422016.N().S(` `)
|
||||||
//line app/vmselect/prometheus/export.qtpl:89
|
//line app/vmselect/prometheus/export.qtpl:90
|
||||||
qw422016.N().DL(ts)
|
qw422016.N().DL(ts)
|
||||||
//line app/vmselect/prometheus/export.qtpl:89
|
//line app/vmselect/prometheus/export.qtpl:90
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
`)
|
`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:90
|
|
||||||
}
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:91
|
//line app/vmselect/prometheus/export.qtpl:91
|
||||||
|
}
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:92
|
||||||
quicktemplate.ReleaseByteBuffer(bb)
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
func WriteExportPrometheusLine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
func WriteExportPrometheusLine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
StreamExportPrometheusLine(qw422016, xb)
|
StreamExportPrometheusLine(qw422016, xb)
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
func ExportPrometheusLine(xb *exportBlock) string {
|
func ExportPrometheusLine(xb *exportBlock) string {
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
WriteExportPrometheusLine(qb422016, xb)
|
WriteExportPrometheusLine(qb422016, xb)
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/export.qtpl:92
|
//line app/vmselect/prometheus/export.qtpl:93
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:94
|
//line app/vmselect/prometheus/export.qtpl:95
|
||||||
func StreamExportJSONLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
func StreamExportJSONLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:95
|
//line app/vmselect/prometheus/export.qtpl:96
|
||||||
if len(xb.timestamps) == 0 {
|
if len(xb.timestamps) == 0 {
|
||||||
//line app/vmselect/prometheus/export.qtpl:95
|
//line app/vmselect/prometheus/export.qtpl:96
|
||||||
return
|
return
|
||||||
//line app/vmselect/prometheus/export.qtpl:95
|
//line app/vmselect/prometheus/export.qtpl:96
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:95
|
//line app/vmselect/prometheus/export.qtpl:96
|
||||||
qw422016.N().S(`{"metric":`)
|
qw422016.N().S(`{"metric":`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:97
|
//line app/vmselect/prometheus/export.qtpl:98
|
||||||
streammetricNameObject(qw422016, xb.mn)
|
streammetricNameObject(qw422016, xb.mn)
|
||||||
//line app/vmselect/prometheus/export.qtpl:97
|
//line app/vmselect/prometheus/export.qtpl:98
|
||||||
qw422016.N().S(`,"values":[`)
|
qw422016.N().S(`,"values":[`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:99
|
|
||||||
if len(xb.values) > 0 {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:100
|
//line app/vmselect/prometheus/export.qtpl:100
|
||||||
|
if len(xb.values) > 0 {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:101
|
||||||
values := xb.values
|
values := xb.values
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:101
|
|
||||||
qw422016.N().F(values[0])
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:102
|
//line app/vmselect/prometheus/export.qtpl:102
|
||||||
|
qw422016.N().F(values[0])
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:103
|
||||||
values = values[1:]
|
values = values[1:]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:103
|
//line app/vmselect/prometheus/export.qtpl:104
|
||||||
for _, v := range values {
|
for _, v := range values {
|
||||||
//line app/vmselect/prometheus/export.qtpl:103
|
//line app/vmselect/prometheus/export.qtpl:104
|
||||||
qw422016.N().S(`,`)
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:104
|
|
||||||
if math.IsNaN(v) {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:104
|
|
||||||
qw422016.N().S(`null`)
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:104
|
|
||||||
} else {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:104
|
|
||||||
qw422016.N().F(v)
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:104
|
|
||||||
}
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:105
|
//line app/vmselect/prometheus/export.qtpl:105
|
||||||
|
if math.IsNaN(v) {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:105
|
||||||
|
qw422016.N().S(`null`)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:105
|
||||||
|
} else {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:105
|
||||||
|
qw422016.N().F(v)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:105
|
||||||
|
}
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:106
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:106
|
//line app/vmselect/prometheus/export.qtpl:107
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:106
|
//line app/vmselect/prometheus/export.qtpl:107
|
||||||
qw422016.N().S(`],"timestamps":[`)
|
qw422016.N().S(`],"timestamps":[`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:109
|
|
||||||
if len(xb.timestamps) > 0 {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:110
|
//line app/vmselect/prometheus/export.qtpl:110
|
||||||
|
if len(xb.timestamps) > 0 {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:111
|
||||||
timestamps := xb.timestamps
|
timestamps := xb.timestamps
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:111
|
|
||||||
qw422016.N().DL(timestamps[0])
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:112
|
//line app/vmselect/prometheus/export.qtpl:112
|
||||||
|
qw422016.N().DL(timestamps[0])
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:113
|
||||||
timestamps = timestamps[1:]
|
timestamps = timestamps[1:]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:113
|
|
||||||
for _, ts := range timestamps {
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:113
|
|
||||||
qw422016.N().S(`,`)
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:114
|
//line app/vmselect/prometheus/export.qtpl:114
|
||||||
qw422016.N().DL(ts)
|
for _, ts := range timestamps {
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:114
|
||||||
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:115
|
//line app/vmselect/prometheus/export.qtpl:115
|
||||||
|
qw422016.N().DL(ts)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:116
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:116
|
//line app/vmselect/prometheus/export.qtpl:117
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:116
|
//line app/vmselect/prometheus/export.qtpl:117
|
||||||
qw422016.N().S(`]}`)
|
qw422016.N().S(`]}`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:118
|
//line app/vmselect/prometheus/export.qtpl:119
|
||||||
qw422016.N().S(`
|
qw422016.N().S(`
|
||||||
`)
|
`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
func WriteExportJSONLine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
func WriteExportJSONLine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
StreamExportJSONLine(qw422016, xb)
|
StreamExportJSONLine(qw422016, xb)
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
func ExportJSONLine(xb *exportBlock) string {
|
func ExportJSONLine(xb *exportBlock) string {
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
WriteExportJSONLine(qb422016, xb)
|
WriteExportJSONLine(qb422016, xb)
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/export.qtpl:119
|
//line app/vmselect/prometheus/export.qtpl:120
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:121
|
//line app/vmselect/prometheus/export.qtpl:122
|
||||||
func StreamExportPromAPILine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
func StreamExportPromAPILine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:121
|
//line app/vmselect/prometheus/export.qtpl:122
|
||||||
qw422016.N().S(`{"metric":`)
|
qw422016.N().S(`{"metric":`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:123
|
//line app/vmselect/prometheus/export.qtpl:124
|
||||||
streammetricNameObject(qw422016, xb.mn)
|
streammetricNameObject(qw422016, xb.mn)
|
||||||
//line app/vmselect/prometheus/export.qtpl:123
|
//line app/vmselect/prometheus/export.qtpl:124
|
||||||
qw422016.N().S(`,"values":`)
|
qw422016.N().S(`,"values":`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:124
|
//line app/vmselect/prometheus/export.qtpl:125
|
||||||
streamvaluesWithTimestamps(qw422016, xb.values, xb.timestamps)
|
streamvaluesWithTimestamps(qw422016, xb.values, xb.timestamps)
|
||||||
//line app/vmselect/prometheus/export.qtpl:124
|
//line app/vmselect/prometheus/export.qtpl:125
|
||||||
qw422016.N().S(`}`)
|
qw422016.N().S(`}`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
func WriteExportPromAPILine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
func WriteExportPromAPILine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
StreamExportPromAPILine(qw422016, xb)
|
StreamExportPromAPILine(qw422016, xb)
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
func ExportPromAPILine(xb *exportBlock) string {
|
func ExportPromAPILine(xb *exportBlock) string {
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
WriteExportPromAPILine(qb422016, xb)
|
WriteExportPromAPILine(qb422016, xb)
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/export.qtpl:126
|
//line app/vmselect/prometheus/export.qtpl:127
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:128
|
//line app/vmselect/prometheus/export.qtpl:129
|
||||||
func StreamExportPromAPIResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
func StreamExportPromAPIResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:128
|
//line app/vmselect/prometheus/export.qtpl:129
|
||||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
qw422016.N().S(`{`)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:132
|
||||||
|
lines := 0
|
||||||
|
bytesTotal := 0
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:134
|
//line app/vmselect/prometheus/export.qtpl:134
|
||||||
|
qw422016.N().S(`"status":"success","data":{"resultType":"matrix","result":[`)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:139
|
||||||
bb, ok := <-resultsCh
|
bb, ok := <-resultsCh
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:135
|
//line app/vmselect/prometheus/export.qtpl:140
|
||||||
if ok {
|
if ok {
|
||||||
//line app/vmselect/prometheus/export.qtpl:136
|
//line app/vmselect/prometheus/export.qtpl:141
|
||||||
qw422016.N().Z(bb.B)
|
qw422016.N().Z(bb.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:137
|
//line app/vmselect/prometheus/export.qtpl:143
|
||||||
|
lines++
|
||||||
|
bytesTotal += len(bb.B)
|
||||||
quicktemplate.ReleaseByteBuffer(bb)
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:138
|
//line app/vmselect/prometheus/export.qtpl:147
|
||||||
for bb := range resultsCh {
|
for bb := range resultsCh {
|
||||||
//line app/vmselect/prometheus/export.qtpl:138
|
//line app/vmselect/prometheus/export.qtpl:147
|
||||||
qw422016.N().S(`,`)
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:139
|
//line app/vmselect/prometheus/export.qtpl:148
|
||||||
qw422016.N().Z(bb.B)
|
qw422016.N().Z(bb.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:140
|
//line app/vmselect/prometheus/export.qtpl:150
|
||||||
|
lines++
|
||||||
|
bytesTotal += len(bb.B)
|
||||||
quicktemplate.ReleaseByteBuffer(bb)
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:141
|
//line app/vmselect/prometheus/export.qtpl:154
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:142
|
//line app/vmselect/prometheus/export.qtpl:155
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:142
|
//line app/vmselect/prometheus/export.qtpl:155
|
||||||
qw422016.N().S(`]}}`)
|
qw422016.N().S(`]}`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:159
|
||||||
|
qt.Donef("export format=promapi: lines=%d, bytes=%d", lines, bytesTotal)
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:161
|
||||||
|
streamdumpQueryTrace(qw422016, qt)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:161
|
||||||
|
qw422016.N().S(`}`)
|
||||||
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
func WriteExportPromAPIResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
func WriteExportPromAPIResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
StreamExportPromAPIResponse(qw422016, resultsCh)
|
StreamExportPromAPIResponse(qw422016, resultsCh, qt)
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) string {
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
WriteExportPromAPIResponse(qb422016, resultsCh)
|
WriteExportPromAPIResponse(qb422016, resultsCh, qt)
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/export.qtpl:146
|
//line app/vmselect/prometheus/export.qtpl:163
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:148
|
//line app/vmselect/prometheus/export.qtpl:165
|
||||||
func StreamExportStdResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
func StreamExportStdResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:149
|
//line app/vmselect/prometheus/export.qtpl:166
|
||||||
for bb := range resultsCh {
|
for bb := range resultsCh {
|
||||||
//line app/vmselect/prometheus/export.qtpl:150
|
//line app/vmselect/prometheus/export.qtpl:167
|
||||||
qw422016.N().Z(bb.B)
|
qw422016.N().Z(bb.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:151
|
//line app/vmselect/prometheus/export.qtpl:168
|
||||||
quicktemplate.ReleaseByteBuffer(bb)
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:152
|
//line app/vmselect/prometheus/export.qtpl:169
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
func WriteExportStdResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
func WriteExportStdResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
StreamExportStdResponse(qw422016, resultsCh)
|
StreamExportStdResponse(qw422016, resultsCh, qt)
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) string {
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
WriteExportStdResponse(qb422016, resultsCh)
|
WriteExportStdResponse(qb422016, resultsCh, qt)
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/export.qtpl:153
|
//line app/vmselect/prometheus/export.qtpl:170
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:155
|
//line app/vmselect/prometheus/export.qtpl:172
|
||||||
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:156
|
//line app/vmselect/prometheus/export.qtpl:173
|
||||||
qw422016.N().Z(mn.MetricGroup)
|
qw422016.N().Z(mn.MetricGroup)
|
||||||
//line app/vmselect/prometheus/export.qtpl:157
|
//line app/vmselect/prometheus/export.qtpl:174
|
||||||
if len(mn.Tags) > 0 {
|
if len(mn.Tags) > 0 {
|
||||||
//line app/vmselect/prometheus/export.qtpl:157
|
//line app/vmselect/prometheus/export.qtpl:174
|
||||||
qw422016.N().S(`{`)
|
qw422016.N().S(`{`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:159
|
//line app/vmselect/prometheus/export.qtpl:176
|
||||||
tags := mn.Tags
|
tags := mn.Tags
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:160
|
//line app/vmselect/prometheus/export.qtpl:177
|
||||||
qw422016.N().Z(tags[0].Key)
|
qw422016.N().Z(tags[0].Key)
|
||||||
//line app/vmselect/prometheus/export.qtpl:160
|
//line app/vmselect/prometheus/export.qtpl:177
|
||||||
qw422016.N().S(`=`)
|
qw422016.N().S(`=`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:160
|
//line app/vmselect/prometheus/export.qtpl:177
|
||||||
qw422016.N().QZ(tags[0].Value)
|
qw422016.N().QZ(tags[0].Value)
|
||||||
//line app/vmselect/prometheus/export.qtpl:161
|
//line app/vmselect/prometheus/export.qtpl:178
|
||||||
tags = tags[1:]
|
tags = tags[1:]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:162
|
//line app/vmselect/prometheus/export.qtpl:179
|
||||||
for i := range tags {
|
for i := range tags {
|
||||||
//line app/vmselect/prometheus/export.qtpl:163
|
//line app/vmselect/prometheus/export.qtpl:180
|
||||||
tag := &tags[i]
|
tag := &tags[i]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:163
|
//line app/vmselect/prometheus/export.qtpl:180
|
||||||
qw422016.N().S(`,`)
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:164
|
//line app/vmselect/prometheus/export.qtpl:181
|
||||||
qw422016.N().Z(tag.Key)
|
qw422016.N().Z(tag.Key)
|
||||||
//line app/vmselect/prometheus/export.qtpl:164
|
//line app/vmselect/prometheus/export.qtpl:181
|
||||||
qw422016.N().S(`=`)
|
qw422016.N().S(`=`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:164
|
//line app/vmselect/prometheus/export.qtpl:181
|
||||||
qw422016.N().QZ(tag.Value)
|
qw422016.N().QZ(tag.Value)
|
||||||
//line app/vmselect/prometheus/export.qtpl:165
|
//line app/vmselect/prometheus/export.qtpl:182
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:165
|
//line app/vmselect/prometheus/export.qtpl:182
|
||||||
qw422016.N().S(`}`)
|
qw422016.N().S(`}`)
|
||||||
//line app/vmselect/prometheus/export.qtpl:167
|
//line app/vmselect/prometheus/export.qtpl:184
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
func writeprometheusMetricName(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
func writeprometheusMetricName(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
streamprometheusMetricName(qw422016, mn)
|
streamprometheusMetricName(qw422016, mn)
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
func prometheusMetricName(mn *storage.MetricName) string {
|
func prometheusMetricName(mn *storage.MetricName) string {
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
writeprometheusMetricName(qb422016, mn)
|
writeprometheusMetricName(qb422016, mn)
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/export.qtpl:168
|
//line app/vmselect/prometheus/export.qtpl:185
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,12 @@
|
||||||
{% stripspace %}
|
{% stripspace %}
|
||||||
|
|
||||||
|
{% import (
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
|
) %}
|
||||||
|
|
||||||
LabelValuesResponse generates response for /api/v1/label/<labelName>/values .
|
LabelValuesResponse generates response for /api/v1/label/<labelName>/values .
|
||||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||||
{% func LabelValuesResponse(labelValues []string) %}
|
{% func LabelValuesResponse(labelValues []string, qt *querytracer.Tracer, qtDone func()) %}
|
||||||
{
|
{
|
||||||
"status":"success",
|
"status":"success",
|
||||||
"data":[
|
"data":[
|
||||||
|
@ -10,6 +15,11 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-va
|
||||||
{% if i+1 < len(labelValues) %},{% endif %}
|
{% if i+1 < len(labelValues) %},{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
|
{% code
|
||||||
|
qt.Printf("generate response for %d label values", len(labelValues))
|
||||||
|
qtDone()
|
||||||
|
%}
|
||||||
|
{%= dumpQueryTrace(qt) %}
|
||||||
}
|
}
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
{% endstripspace %}
|
{% endstripspace %}
|
||||||
|
|
|
@ -1,67 +1,80 @@
|
||||||
// Code generated by qtc from "label_values_response.qtpl". DO NOT EDIT.
|
// Code generated by qtc from "label_values_response.qtpl". DO NOT EDIT.
|
||||||
// See https://github.com/valyala/quicktemplate for details.
|
// See https://github.com/valyala/quicktemplate for details.
|
||||||
|
|
||||||
// LabelValuesResponse generates response for /api/v1/label/<labelName>/values .See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
//line app/vmselect/prometheus/label_values_response.qtpl:3
|
||||||
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
//line app/vmselect/prometheus/label_values_response.qtpl:3
|
||||||
|
import (
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LabelValuesResponse generates response for /api/v1/label/<labelName>/values .See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/label_values_response.qtpl:9
|
||||||
import (
|
import (
|
||||||
qtio422016 "io"
|
qtio422016 "io"
|
||||||
|
|
||||||
qt422016 "github.com/valyala/quicktemplate"
|
qt422016 "github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
//line app/vmselect/prometheus/label_values_response.qtpl:9
|
||||||
var (
|
var (
|
||||||
_ = qtio422016.Copy
|
_ = qtio422016.Copy
|
||||||
_ = qt422016.AcquireByteBuffer
|
_ = qt422016.AcquireByteBuffer
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
|
||||||
func StreamLabelValuesResponse(qw422016 *qt422016.Writer, labelValues []string) {
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
|
||||||
qw422016.N().S(`{"status":"success","data":[`)
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:8
|
|
||||||
for i, labelValue := range labelValues {
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:9
|
//line app/vmselect/prometheus/label_values_response.qtpl:9
|
||||||
|
func StreamLabelValuesResponse(qw422016 *qt422016.Writer, labelValues []string, qt *querytracer.Tracer, qtDone func()) {
|
||||||
|
//line app/vmselect/prometheus/label_values_response.qtpl:9
|
||||||
|
qw422016.N().S(`{"status":"success","data":[`)
|
||||||
|
//line app/vmselect/prometheus/label_values_response.qtpl:13
|
||||||
|
for i, labelValue := range labelValues {
|
||||||
|
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||||
qw422016.N().Q(labelValue)
|
qw422016.N().Q(labelValue)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
//line app/vmselect/prometheus/label_values_response.qtpl:15
|
||||||
if i+1 < len(labelValues) {
|
if i+1 < len(labelValues) {
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
//line app/vmselect/prometheus/label_values_response.qtpl:15
|
||||||
qw422016.N().S(`,`)
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
//line app/vmselect/prometheus/label_values_response.qtpl:15
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:11
|
//line app/vmselect/prometheus/label_values_response.qtpl:16
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:11
|
//line app/vmselect/prometheus/label_values_response.qtpl:16
|
||||||
qw422016.N().S(`]}`)
|
qw422016.N().S(`]`)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:19
|
||||||
|
qt.Printf("generate response for %d label values", len(labelValues))
|
||||||
|
qtDone()
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/label_values_response.qtpl:22
|
||||||
|
streamdumpQueryTrace(qw422016, qt)
|
||||||
|
//line app/vmselect/prometheus/label_values_response.qtpl:22
|
||||||
|
qw422016.N().S(`}`)
|
||||||
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
func WriteLabelValuesResponse(qq422016 qtio422016.Writer, labelValues []string) {
|
func WriteLabelValuesResponse(qq422016 qtio422016.Writer, labelValues []string, qt *querytracer.Tracer, qtDone func()) {
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
StreamLabelValuesResponse(qw422016, labelValues)
|
StreamLabelValuesResponse(qw422016, labelValues, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
func LabelValuesResponse(labelValues []string) string {
|
func LabelValuesResponse(labelValues []string, qt *querytracer.Tracer, qtDone func()) string {
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
WriteLabelValuesResponse(qb422016, labelValues)
|
WriteLabelValuesResponse(qb422016, labelValues, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
//line app/vmselect/prometheus/label_values_response.qtpl:24
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,12 @@
|
||||||
{% stripspace %}
|
{% stripspace %}
|
||||||
|
|
||||||
|
{% import (
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
|
) %}
|
||||||
|
|
||||||
LabelsResponse generates response for /api/v1/labels .
|
LabelsResponse generates response for /api/v1/labels .
|
||||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||||
{% func LabelsResponse(labels []string) %}
|
{% func LabelsResponse(labels []string, qt *querytracer.Tracer, qtDone func()) %}
|
||||||
{
|
{
|
||||||
"status":"success",
|
"status":"success",
|
||||||
"data":[
|
"data":[
|
||||||
|
@ -10,6 +15,11 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-nam
|
||||||
{% if i+1 < len(labels) %},{% endif %}
|
{% if i+1 < len(labels) %},{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
|
{% code
|
||||||
|
qt.Printf("generate response for %d labels", len(labels))
|
||||||
|
qtDone()
|
||||||
|
%}
|
||||||
|
{%= dumpQueryTrace(qt) %}
|
||||||
}
|
}
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
{% endstripspace %}
|
{% endstripspace %}
|
||||||
|
|
|
@ -1,67 +1,80 @@
|
||||||
// Code generated by qtc from "labels_response.qtpl". DO NOT EDIT.
|
// Code generated by qtc from "labels_response.qtpl". DO NOT EDIT.
|
||||||
// See https://github.com/valyala/quicktemplate for details.
|
// See https://github.com/valyala/quicktemplate for details.
|
||||||
|
|
||||||
// LabelsResponse generates response for /api/v1/labels .See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
//line app/vmselect/prometheus/labels_response.qtpl:3
|
||||||
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
//line app/vmselect/prometheus/labels_response.qtpl:3
|
||||||
|
import (
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LabelsResponse generates response for /api/v1/labels .See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/labels_response.qtpl:9
|
||||||
import (
|
import (
|
||||||
qtio422016 "io"
|
qtio422016 "io"
|
||||||
|
|
||||||
qt422016 "github.com/valyala/quicktemplate"
|
qt422016 "github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
//line app/vmselect/prometheus/labels_response.qtpl:9
|
||||||
var (
|
var (
|
||||||
_ = qtio422016.Copy
|
_ = qtio422016.Copy
|
||||||
_ = qt422016.AcquireByteBuffer
|
_ = qt422016.AcquireByteBuffer
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
|
||||||
func StreamLabelsResponse(qw422016 *qt422016.Writer, labels []string) {
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
|
||||||
qw422016.N().S(`{"status":"success","data":[`)
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:8
|
|
||||||
for i, label := range labels {
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:9
|
//line app/vmselect/prometheus/labels_response.qtpl:9
|
||||||
|
func StreamLabelsResponse(qw422016 *qt422016.Writer, labels []string, qt *querytracer.Tracer, qtDone func()) {
|
||||||
|
//line app/vmselect/prometheus/labels_response.qtpl:9
|
||||||
|
qw422016.N().S(`{"status":"success","data":[`)
|
||||||
|
//line app/vmselect/prometheus/labels_response.qtpl:13
|
||||||
|
for i, label := range labels {
|
||||||
|
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||||
qw422016.N().Q(label)
|
qw422016.N().Q(label)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
//line app/vmselect/prometheus/labels_response.qtpl:15
|
||||||
if i+1 < len(labels) {
|
if i+1 < len(labels) {
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
//line app/vmselect/prometheus/labels_response.qtpl:15
|
||||||
qw422016.N().S(`,`)
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
//line app/vmselect/prometheus/labels_response.qtpl:15
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:11
|
//line app/vmselect/prometheus/labels_response.qtpl:16
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:11
|
//line app/vmselect/prometheus/labels_response.qtpl:16
|
||||||
qw422016.N().S(`]}`)
|
qw422016.N().S(`]`)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:19
|
||||||
|
qt.Printf("generate response for %d labels", len(labels))
|
||||||
|
qtDone()
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/labels_response.qtpl:22
|
||||||
|
streamdumpQueryTrace(qw422016, qt)
|
||||||
|
//line app/vmselect/prometheus/labels_response.qtpl:22
|
||||||
|
qw422016.N().S(`}`)
|
||||||
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
func WriteLabelsResponse(qq422016 qtio422016.Writer, labels []string) {
|
func WriteLabelsResponse(qq422016 qtio422016.Writer, labels []string, qt *querytracer.Tracer, qtDone func()) {
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
StreamLabelsResponse(qw422016, labels)
|
StreamLabelsResponse(qw422016, labels, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
func LabelsResponse(labels []string) string {
|
func LabelsResponse(labels []string, qt *querytracer.Tracer, qtDone func()) string {
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
WriteLabelsResponse(qb422016, labels)
|
WriteLabelsResponse(qb422016, labels, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
//line app/vmselect/prometheus/labels_response.qtpl:24
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
"github.com/valyala/fastjson/fastfloat"
|
"github.com/valyala/fastjson/fastfloat"
|
||||||
|
@ -85,7 +86,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sq := storage.NewSearchQuery(start, end, tagFilterss, *maxFederateSeries)
|
sq := storage.NewSearchQuery(start, end, tagFilterss, *maxFederateSeries)
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
rss, err := netstorage.ProcessSearchQuery(nil, sq, true, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -93,7 +94,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
||||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||||
bw := bufferedwriter.Get(w)
|
bw := bufferedwriter.Get(w)
|
||||||
defer bufferedwriter.Put(bw)
|
defer bufferedwriter.Put(bw)
|
||||||
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
err = rss.RunParallel(nil, func(rs *netstorage.Result, workerID uint) error {
|
||||||
if err := bw.Error(); err != nil {
|
if err := bw.Error(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -148,12 +149,12 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
||||||
}
|
}
|
||||||
doneCh := make(chan error, 1)
|
doneCh := make(chan error, 1)
|
||||||
if !reduceMemUsage {
|
if !reduceMemUsage {
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, true, ep.deadline)
|
rss, err := netstorage.ProcessSearchQuery(nil, sq, true, ep.deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
err := rss.RunParallel(nil, func(rs *netstorage.Result, workerID uint) error {
|
||||||
if err := bw.Error(); err != nil {
|
if err := bw.Error(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -171,7 +172,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
go func() {
|
go func() {
|
||||||
err := netstorage.ExportBlocks(sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
err := netstorage.ExportBlocks(nil, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
||||||
if err := bw.Error(); err != nil {
|
if err := bw.Error(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -232,7 +233,7 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
|
||||||
_, _ = bw.Write(trBuf)
|
_, _ = bw.Write(trBuf)
|
||||||
|
|
||||||
// Marshal native blocks.
|
// Marshal native blocks.
|
||||||
err = netstorage.ExportBlocks(sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
err = netstorage.ExportBlocks(nil, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
||||||
if err := bw.Error(); err != nil {
|
if err := bw.Error(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -287,7 +288,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
format := r.FormValue("format")
|
format := r.FormValue("format")
|
||||||
maxRowsPerLine := int(fastfloat.ParseInt64BestEffort(r.FormValue("max_rows_per_line")))
|
maxRowsPerLine := int(fastfloat.ParseInt64BestEffort(r.FormValue("max_rows_per_line")))
|
||||||
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
|
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
|
||||||
if err := exportHandler(w, ep, format, maxRowsPerLine, reduceMemUsage); err != nil {
|
if err := exportHandler(nil, w, ep, format, maxRowsPerLine, reduceMemUsage); err != nil {
|
||||||
return fmt.Errorf("error when exporting data on the time range (start=%d, end=%d): %w", ep.start, ep.end, err)
|
return fmt.Errorf("error when exporting data on the time range (start=%d, end=%d): %w", ep.start, ep.end, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -295,7 +296,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||||
|
|
||||||
func exportHandler(w http.ResponseWriter, ep *exportParams, format string, maxRowsPerLine int, reduceMemUsage bool) error {
|
func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, ep *exportParams, format string, maxRowsPerLine int, reduceMemUsage bool) error {
|
||||||
writeResponseFunc := WriteExportStdResponse
|
writeResponseFunc := WriteExportStdResponse
|
||||||
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||||
bb := quicktemplate.AcquireByteBuffer()
|
bb := quicktemplate.AcquireByteBuffer()
|
||||||
|
@ -356,12 +357,13 @@ func exportHandler(w http.ResponseWriter, ep *exportParams, format string, maxRo
|
||||||
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
|
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
|
||||||
doneCh := make(chan error, 1)
|
doneCh := make(chan error, 1)
|
||||||
if !reduceMemUsage {
|
if !reduceMemUsage {
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, true, ep.deadline)
|
rss, err := netstorage.ProcessSearchQuery(qt, sq, true, ep.deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
qtChild := qt.NewChild()
|
||||||
go func() {
|
go func() {
|
||||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
err := rss.RunParallel(qtChild, func(rs *netstorage.Result, workerID uint) error {
|
||||||
if err := bw.Error(); err != nil {
|
if err := bw.Error(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -374,12 +376,14 @@ func exportHandler(w http.ResponseWriter, ep *exportParams, format string, maxRo
|
||||||
exportBlockPool.Put(xb)
|
exportBlockPool.Put(xb)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
qtChild.Donef("background export format=%s", format)
|
||||||
close(resultsCh)
|
close(resultsCh)
|
||||||
doneCh <- err
|
doneCh <- err
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
|
qtChild := qt.NewChild()
|
||||||
go func() {
|
go func() {
|
||||||
err := netstorage.ExportBlocks(sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
err := netstorage.ExportBlocks(qtChild, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
||||||
if err := bw.Error(); err != nil {
|
if err := bw.Error(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -396,13 +400,14 @@ func exportHandler(w http.ResponseWriter, ep *exportParams, format string, maxRo
|
||||||
exportBlockPool.Put(xb)
|
exportBlockPool.Put(xb)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
qtChild.Donef("background export format=%s", format)
|
||||||
close(resultsCh)
|
close(resultsCh)
|
||||||
doneCh <- err
|
doneCh <- err
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeResponseFunc must consume all the data from resultsCh.
|
// writeResponseFunc must consume all the data from resultsCh.
|
||||||
writeResponseFunc(bw, resultsCh)
|
writeResponseFunc(bw, resultsCh, qt)
|
||||||
if err := bw.Flush(); err != nil {
|
if err := bw.Flush(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -450,7 +455,7 @@ func DeleteHandler(startTime time.Time, r *http.Request) error {
|
||||||
}
|
}
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
sq := storage.NewSearchQuery(0, ct, tagFilterss, 0)
|
sq := storage.NewSearchQuery(0, ct, tagFilterss, 0)
|
||||||
deletedCount, err := netstorage.DeleteSeries(sq, deadline)
|
deletedCount, err := netstorage.DeleteSeries(nil, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot delete time series: %w", err)
|
return fmt.Errorf("cannot delete time series: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -465,7 +470,7 @@ var deleteDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/
|
||||||
// LabelValuesHandler processes /api/v1/label/<labelName>/values request.
|
// LabelValuesHandler processes /api/v1/label/<labelName>/values request.
|
||||||
//
|
//
|
||||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||||
func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWriter, r *http.Request) error {
|
func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName string, w http.ResponseWriter, r *http.Request) error {
|
||||||
defer labelValuesDuration.UpdateDuration(startTime)
|
defer labelValuesDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
|
@ -481,7 +486,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
if len(matches) == 0 && len(etfs) == 0 {
|
if len(matches) == 0 && len(etfs) == 0 {
|
||||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||||
var err error
|
var err error
|
||||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
labelValues, err = netstorage.GetLabelValues(qt, labelName, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain label values for %q: %w`, labelName, err)
|
return fmt.Errorf(`cannot obtain label values for %q: %w`, labelName, err)
|
||||||
}
|
}
|
||||||
|
@ -499,7 +504,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
MinTimestamp: start,
|
MinTimestamp: start,
|
||||||
MaxTimestamp: end,
|
MaxTimestamp: end,
|
||||||
}
|
}
|
||||||
labelValues, err = netstorage.GetLabelValuesOnTimeRange(labelName, tr, deadline)
|
labelValues, err = netstorage.GetLabelValuesOnTimeRange(qt, labelName, tr, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain label values on time range for %q: %w`, labelName, err)
|
return fmt.Errorf(`cannot obtain label values on time range for %q: %w`, labelName, err)
|
||||||
}
|
}
|
||||||
|
@ -521,7 +526,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
labelValues, err = labelValuesWithMatches(labelName, matches, etfs, start, end, deadline)
|
labelValues, err = labelValuesWithMatches(qt, labelName, matches, etfs, start, end, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
||||||
}
|
}
|
||||||
|
@ -530,14 +535,18 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
bw := bufferedwriter.Get(w)
|
bw := bufferedwriter.Get(w)
|
||||||
defer bufferedwriter.Put(bw)
|
defer bufferedwriter.Put(bw)
|
||||||
WriteLabelValuesResponse(bw, labelValues)
|
qtDone := func() {
|
||||||
|
qt.Donef("/api/v1/labels")
|
||||||
|
}
|
||||||
|
WriteLabelValuesResponse(bw, labelValues, qt, qtDone)
|
||||||
if err := bw.Flush(); err != nil {
|
if err := bw.Flush(); err != nil {
|
||||||
return fmt.Errorf("canot flush label values to remote client: %w", err)
|
return fmt.Errorf("canot flush label values to remote client: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func labelValuesWithMatches(labelName string, matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
func labelValuesWithMatches(qt *querytracer.Tracer, labelName string, matches []string, etfs [][]storage.TagFilter,
|
||||||
|
start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -566,7 +575,7 @@ func labelValuesWithMatches(labelName string, matches []string, etfs [][]storage
|
||||||
m := make(map[string]struct{})
|
m := make(map[string]struct{})
|
||||||
if end-start > 24*3600*1000 {
|
if end-start > 24*3600*1000 {
|
||||||
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
|
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
|
||||||
mns, err := netstorage.SearchMetricNames(sq, deadline)
|
mns, err := netstorage.SearchMetricNames(qt, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
return nil, fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -578,12 +587,12 @@ func labelValuesWithMatches(labelName string, matches []string, etfs [][]storage
|
||||||
m[string(labelValue)] = struct{}{}
|
m[string(labelValue)] = struct{}{}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
rss, err := netstorage.ProcessSearchQuery(qt, sq, false, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
var mLock sync.Mutex
|
var mLock sync.Mutex
|
||||||
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
err = rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
|
||||||
labelValue := rs.MetricName.GetTagValue(labelName)
|
labelValue := rs.MetricName.GetTagValue(labelName)
|
||||||
if len(labelValue) == 0 {
|
if len(labelValue) == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
@ -602,6 +611,7 @@ func labelValuesWithMatches(labelName string, matches []string, etfs [][]storage
|
||||||
labelValues = append(labelValues, labelValue)
|
labelValues = append(labelValues, labelValue)
|
||||||
}
|
}
|
||||||
sort.Strings(labelValues)
|
sort.Strings(labelValues)
|
||||||
|
qt.Printf("sort %d label values", len(labelValues))
|
||||||
return labelValues, nil
|
return labelValues, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -612,7 +622,7 @@ func LabelsCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
|
||||||
defer labelsCountDuration.UpdateDuration(startTime)
|
defer labelsCountDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
|
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
|
||||||
labelEntries, err := netstorage.GetLabelEntries(deadline)
|
labelEntries, err := netstorage.GetLabelEntries(nil, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain label entries: %w`, err)
|
return fmt.Errorf(`cannot obtain label entries: %w`, err)
|
||||||
}
|
}
|
||||||
|
@ -674,7 +684,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
}
|
}
|
||||||
var status *storage.TSDBStatus
|
var status *storage.TSDBStatus
|
||||||
if len(matches) == 0 && len(etfs) == 0 {
|
if len(matches) == 0 && len(etfs) == 0 {
|
||||||
status, err = netstorage.GetTSDBStatusForDate(deadline, date, topN, *maxTSDBStatusSeries)
|
status, err = netstorage.GetTSDBStatusForDate(nil, deadline, date, topN, *maxTSDBStatusSeries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
||||||
}
|
}
|
||||||
|
@ -706,7 +716,7 @@ func tsdbStatusWithMatches(matches []string, etfs [][]storage.TagFilter, date ui
|
||||||
start := int64(date*secsPerDay) * 1000
|
start := int64(date*secsPerDay) * 1000
|
||||||
end := int64(date*secsPerDay+secsPerDay) * 1000
|
end := int64(date*secsPerDay+secsPerDay) * 1000
|
||||||
sq := storage.NewSearchQuery(start, end, tagFilterss, maxMetrics)
|
sq := storage.NewSearchQuery(start, end, tagFilterss, maxMetrics)
|
||||||
status, err := netstorage.GetTSDBStatusWithFilters(deadline, sq, topN)
|
status, err := netstorage.GetTSDBStatusWithFilters(nil, deadline, sq, topN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -718,7 +728,7 @@ var tsdbStatusDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/
|
||||||
// LabelsHandler processes /api/v1/labels request.
|
// LabelsHandler processes /api/v1/labels request.
|
||||||
//
|
//
|
||||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
// See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||||
func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
defer labelsDuration.UpdateDuration(startTime)
|
defer labelsDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
|
@ -734,7 +744,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
if len(matches) == 0 && len(etfs) == 0 {
|
if len(matches) == 0 && len(etfs) == 0 {
|
||||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||||
var err error
|
var err error
|
||||||
labels, err = netstorage.GetLabels(deadline)
|
labels, err = netstorage.GetLabels(qt, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain labels: %w", err)
|
return fmt.Errorf("cannot obtain labels: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -752,7 +762,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
MinTimestamp: start,
|
MinTimestamp: start,
|
||||||
MaxTimestamp: end,
|
MaxTimestamp: end,
|
||||||
}
|
}
|
||||||
labels, err = netstorage.GetLabelsOnTimeRange(tr, deadline)
|
labels, err = netstorage.GetLabelsOnTimeRange(qt, tr, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain labels on time range: %w", err)
|
return fmt.Errorf("cannot obtain labels on time range: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -772,7 +782,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
labels, err = labelsWithMatches(matches, etfs, start, end, deadline)
|
labels, err = labelsWithMatches(qt, matches, etfs, start, end, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
||||||
}
|
}
|
||||||
|
@ -781,14 +791,17 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
bw := bufferedwriter.Get(w)
|
bw := bufferedwriter.Get(w)
|
||||||
defer bufferedwriter.Put(bw)
|
defer bufferedwriter.Put(bw)
|
||||||
WriteLabelsResponse(bw, labels)
|
qtDone := func() {
|
||||||
|
qt.Donef("/api/v1/labels")
|
||||||
|
}
|
||||||
|
WriteLabelsResponse(bw, labels, qt, qtDone)
|
||||||
if err := bw.Flush(); err != nil {
|
if err := bw.Flush(); err != nil {
|
||||||
return fmt.Errorf("cannot send labels response to remote client: %w", err)
|
return fmt.Errorf("cannot send labels response to remote client: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func labelsWithMatches(matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
func labelsWithMatches(qt *querytracer.Tracer, matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -804,7 +817,7 @@ func labelsWithMatches(matches []string, etfs [][]storage.TagFilter, start, end
|
||||||
m := make(map[string]struct{})
|
m := make(map[string]struct{})
|
||||||
if end-start > 24*3600*1000 {
|
if end-start > 24*3600*1000 {
|
||||||
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
|
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
|
||||||
mns, err := netstorage.SearchMetricNames(sq, deadline)
|
mns, err := netstorage.SearchMetricNames(qt, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
return nil, fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -817,12 +830,12 @@ func labelsWithMatches(matches []string, etfs [][]storage.TagFilter, start, end
|
||||||
m["__name__"] = struct{}{}
|
m["__name__"] = struct{}{}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
rss, err := netstorage.ProcessSearchQuery(qt, sq, false, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
var mLock sync.Mutex
|
var mLock sync.Mutex
|
||||||
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
err = rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
|
||||||
mLock.Lock()
|
mLock.Lock()
|
||||||
for _, tag := range rs.MetricName.Tags {
|
for _, tag := range rs.MetricName.Tags {
|
||||||
m[string(tag.Key)] = struct{}{}
|
m[string(tag.Key)] = struct{}{}
|
||||||
|
@ -840,6 +853,7 @@ func labelsWithMatches(matches []string, etfs [][]storage.TagFilter, start, end
|
||||||
labels = append(labels, label)
|
labels = append(labels, label)
|
||||||
}
|
}
|
||||||
sort.Strings(labels)
|
sort.Strings(labels)
|
||||||
|
qt.Printf("sort %d labels", len(labels))
|
||||||
return labels, nil
|
return labels, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -850,7 +864,7 @@ func SeriesCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
|
||||||
defer seriesCountDuration.UpdateDuration(startTime)
|
defer seriesCountDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
|
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
|
||||||
n, err := netstorage.GetSeriesCount(deadline)
|
n, err := netstorage.GetSeriesCount(nil, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain series count: %w", err)
|
return fmt.Errorf("cannot obtain series count: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -869,7 +883,7 @@ var seriesCountDuration = metrics.NewSummary(`vm_request_duration_seconds{path="
|
||||||
// SeriesHandler processes /api/v1/series request.
|
// SeriesHandler processes /api/v1/series request.
|
||||||
//
|
//
|
||||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
// See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||||
func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
defer seriesDuration.UpdateDuration(startTime)
|
defer seriesDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
|
@ -899,9 +913,12 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
end = start + defaultStep
|
end = start + defaultStep
|
||||||
}
|
}
|
||||||
sq := storage.NewSearchQuery(start, end, tagFilterss, *maxSeriesLimit)
|
sq := storage.NewSearchQuery(start, end, tagFilterss, *maxSeriesLimit)
|
||||||
|
qtDone := func() {
|
||||||
|
qt.Donef("/api/v1/series: start=%d, end=%d", start, end)
|
||||||
|
}
|
||||||
if end-start > 24*3600*1000 {
|
if end-start > 24*3600*1000 {
|
||||||
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
|
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
|
||||||
mns, err := netstorage.SearchMetricNames(sq, deadline)
|
mns, err := netstorage.SearchMetricNames(qt, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -918,14 +935,14 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
close(resultsCh)
|
close(resultsCh)
|
||||||
}()
|
}()
|
||||||
// WriteSeriesResponse must consume all the data from resultsCh.
|
// WriteSeriesResponse must consume all the data from resultsCh.
|
||||||
WriteSeriesResponse(bw, resultsCh)
|
WriteSeriesResponse(bw, resultsCh, qt, qtDone)
|
||||||
if err := bw.Flush(); err != nil {
|
if err := bw.Flush(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
seriesDuration.UpdateDuration(startTime)
|
seriesDuration.UpdateDuration(startTime)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
rss, err := netstorage.ProcessSearchQuery(qt, sq, false, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -936,7 +953,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||||
doneCh := make(chan error)
|
doneCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
err := rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
|
||||||
if err := bw.Error(); err != nil {
|
if err := bw.Error(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -949,7 +966,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
doneCh <- err
|
doneCh <- err
|
||||||
}()
|
}()
|
||||||
// WriteSeriesResponse must consume all the data from resultsCh.
|
// WriteSeriesResponse must consume all the data from resultsCh.
|
||||||
WriteSeriesResponse(bw, resultsCh)
|
WriteSeriesResponse(bw, resultsCh, qt, qtDone)
|
||||||
if err := bw.Flush(); err != nil {
|
if err := bw.Flush(); err != nil {
|
||||||
return fmt.Errorf("cannot flush series response to remote client: %w", err)
|
return fmt.Errorf("cannot flush series response to remote client: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -965,10 +982,11 @@ var seriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/
|
||||||
// QueryHandler processes /api/v1/query request.
|
// QueryHandler processes /api/v1/query request.
|
||||||
//
|
//
|
||||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
// See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||||
func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
defer queryDuration.UpdateDuration(startTime)
|
defer queryDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
|
mayCache := !searchutils.GetBool(r, "nocache")
|
||||||
query := r.FormValue("query")
|
query := r.FormValue("query")
|
||||||
if len(query) == 0 {
|
if len(query) == 0 {
|
||||||
return fmt.Errorf("missing `query` arg")
|
return fmt.Errorf("missing `query` arg")
|
||||||
|
@ -1021,7 +1039,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
end: end,
|
end: end,
|
||||||
filterss: filterss,
|
filterss: filterss,
|
||||||
}
|
}
|
||||||
if err := exportHandler(w, ep, "promapi", 0, false); err != nil {
|
if err := exportHandler(qt, w, ep, "promapi", 0, false); err != nil {
|
||||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
||||||
}
|
}
|
||||||
queryDuration.UpdateDuration(startTime)
|
queryDuration.UpdateDuration(startTime)
|
||||||
|
@ -1037,7 +1055,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
start -= offset
|
start -= offset
|
||||||
end := start
|
end := start
|
||||||
start = end - window
|
start = end - window
|
||||||
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etfs); err != nil {
|
if err := queryRangeHandler(qt, startTime, w, childQuery, start, end, step, r, ct, etfs); err != nil {
|
||||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
||||||
}
|
}
|
||||||
queryDuration.UpdateDuration(startTime)
|
queryDuration.UpdateDuration(startTime)
|
||||||
|
@ -1061,11 +1079,12 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
MaxSeries: *maxUniqueTimeseries,
|
MaxSeries: *maxUniqueTimeseries,
|
||||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||||
Deadline: deadline,
|
Deadline: deadline,
|
||||||
|
MayCache: mayCache,
|
||||||
LookbackDelta: lookbackDelta,
|
LookbackDelta: lookbackDelta,
|
||||||
RoundDigits: getRoundDigits(r),
|
RoundDigits: getRoundDigits(r),
|
||||||
EnforcedTagFilterss: etfs,
|
EnforcedTagFilterss: etfs,
|
||||||
}
|
}
|
||||||
result, err := promql.Exec(&ec, query, true)
|
result, err := promql.Exec(qt, &ec, query, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %w", query, start, step, err)
|
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %w", query, start, step, err)
|
||||||
}
|
}
|
||||||
|
@ -1081,7 +1100,10 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
bw := bufferedwriter.Get(w)
|
bw := bufferedwriter.Get(w)
|
||||||
defer bufferedwriter.Put(bw)
|
defer bufferedwriter.Put(bw)
|
||||||
WriteQueryResponse(bw, result)
|
qtDone := func() {
|
||||||
|
qt.Donef("/api/v1/query: query=%s, time=%d: series=%d", query, start, len(result))
|
||||||
|
}
|
||||||
|
WriteQueryResponse(bw, result, qt, qtDone)
|
||||||
if err := bw.Flush(); err != nil {
|
if err := bw.Flush(); err != nil {
|
||||||
return fmt.Errorf("cannot flush query response to remote client: %w", err)
|
return fmt.Errorf("cannot flush query response to remote client: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1093,7 +1115,7 @@ var queryDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v
|
||||||
// QueryRangeHandler processes /api/v1/query_range request.
|
// QueryRangeHandler processes /api/v1/query_range request.
|
||||||
//
|
//
|
||||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
// See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||||
func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func QueryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
defer queryRangeDuration.UpdateDuration(startTime)
|
defer queryRangeDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
|
@ -1117,13 +1139,14 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etfs); err != nil {
|
if err := queryRangeHandler(qt, startTime, w, query, start, end, step, r, ct, etfs); err != nil {
|
||||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, query string,
|
||||||
|
start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
mayCache := !searchutils.GetBool(r, "nocache")
|
mayCache := !searchutils.GetBool(r, "nocache")
|
||||||
lookbackDelta, err := getMaxLookback(r)
|
lookbackDelta, err := getMaxLookback(r)
|
||||||
|
@ -1157,7 +1180,7 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string,
|
||||||
RoundDigits: getRoundDigits(r),
|
RoundDigits: getRoundDigits(r),
|
||||||
EnforcedTagFilterss: etfs,
|
EnforcedTagFilterss: etfs,
|
||||||
}
|
}
|
||||||
result, err := promql.Exec(&ec, query, false)
|
result, err := promql.Exec(qt, &ec, query, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot execute query: %w", err)
|
return fmt.Errorf("cannot execute query: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1175,7 +1198,10 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string,
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
bw := bufferedwriter.Get(w)
|
bw := bufferedwriter.Get(w)
|
||||||
defer bufferedwriter.Put(bw)
|
defer bufferedwriter.Put(bw)
|
||||||
WriteQueryRangeResponse(bw, result)
|
qtDone := func() {
|
||||||
|
qt.Donef("/api/v1/query_range: start=%d, end=%d, step=%d, query=%q: series=%d", start, end, step, query, len(result))
|
||||||
|
}
|
||||||
|
WriteQueryRangeResponse(bw, result, qt, qtDone)
|
||||||
if err := bw.Flush(); err != nil {
|
if err := bw.Flush(); err != nil {
|
||||||
return fmt.Errorf("cannot send query range response to remote client: %w", err)
|
return fmt.Errorf("cannot send query range response to remote client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,25 +1,37 @@
|
||||||
{% import (
|
{% import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
) %}
|
) %}
|
||||||
|
|
||||||
{% stripspace %}
|
{% stripspace %}
|
||||||
QueryRangeResponse generates response for /api/v1/query_range.
|
QueryRangeResponse generates response for /api/v1/query_range.
|
||||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||||
{% func QueryRangeResponse(rs []netstorage.Result) %}
|
{% func QueryRangeResponse(rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) %}
|
||||||
{
|
{
|
||||||
|
{% code
|
||||||
|
seriesCount := len(rs)
|
||||||
|
pointsCount := 0
|
||||||
|
%}
|
||||||
"status":"success",
|
"status":"success",
|
||||||
"data":{
|
"data":{
|
||||||
"resultType":"matrix",
|
"resultType":"matrix",
|
||||||
"result":[
|
"result":[
|
||||||
{% if len(rs) > 0 %}
|
{% if len(rs) > 0 %}
|
||||||
{%= queryRangeLine(&rs[0]) %}
|
{%= queryRangeLine(&rs[0]) %}
|
||||||
|
{% code pointsCount += len(rs[0].Values) %}
|
||||||
{% code rs = rs[1:] %}
|
{% code rs = rs[1:] %}
|
||||||
{% for i := range rs %}
|
{% for i := range rs %}
|
||||||
,{%= queryRangeLine(&rs[i]) %}
|
,{%= queryRangeLine(&rs[i]) %}
|
||||||
|
{% code pointsCount += len(rs[i].Values) %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
{% code
|
||||||
|
qt.Printf("generate /api/v1/query_range response for series=%d, points=%d", seriesCount, pointsCount)
|
||||||
|
qtDone()
|
||||||
|
%}
|
||||||
|
{%= dumpQueryTrace(qt) %}
|
||||||
}
|
}
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
|
|
||||||
|
|
|
@ -7,112 +7,133 @@ package prometheus
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:1
|
//line app/vmselect/prometheus/query_range_response.qtpl:1
|
||||||
import (
|
import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueryRangeResponse generates response for /api/v1/query_range.See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
// QueryRangeResponse generates response for /api/v1/query_range.See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
//line app/vmselect/prometheus/query_range_response.qtpl:9
|
||||||
import (
|
import (
|
||||||
qtio422016 "io"
|
qtio422016 "io"
|
||||||
|
|
||||||
qt422016 "github.com/valyala/quicktemplate"
|
qt422016 "github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
//line app/vmselect/prometheus/query_range_response.qtpl:9
|
||||||
var (
|
var (
|
||||||
_ = qtio422016.Copy
|
_ = qtio422016.Copy
|
||||||
_ = qt422016.AcquireByteBuffer
|
_ = qt422016.AcquireByteBuffer
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
//line app/vmselect/prometheus/query_range_response.qtpl:9
|
||||||
func StreamQueryRangeResponse(qw422016 *qt422016.Writer, rs []netstorage.Result) {
|
func StreamQueryRangeResponse(qw422016 *qt422016.Writer, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) {
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
//line app/vmselect/prometheus/query_range_response.qtpl:9
|
||||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
qw422016.N().S(`{`)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:12
|
||||||
|
seriesCount := len(rs)
|
||||||
|
pointsCount := 0
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:14
|
//line app/vmselect/prometheus/query_range_response.qtpl:14
|
||||||
|
qw422016.N().S(`"status":"success","data":{"resultType":"matrix","result":[`)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:19
|
||||||
if len(rs) > 0 {
|
if len(rs) > 0 {
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:15
|
//line app/vmselect/prometheus/query_range_response.qtpl:20
|
||||||
streamqueryRangeLine(qw422016, &rs[0])
|
streamqueryRangeLine(qw422016, &rs[0])
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:16
|
//line app/vmselect/prometheus/query_range_response.qtpl:21
|
||||||
|
pointsCount += len(rs[0].Values)
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:22
|
||||||
rs = rs[1:]
|
rs = rs[1:]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:17
|
//line app/vmselect/prometheus/query_range_response.qtpl:23
|
||||||
for i := range rs {
|
for i := range rs {
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:17
|
//line app/vmselect/prometheus/query_range_response.qtpl:23
|
||||||
qw422016.N().S(`,`)
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:18
|
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||||
streamqueryRangeLine(qw422016, &rs[i])
|
streamqueryRangeLine(qw422016, &rs[i])
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:19
|
//line app/vmselect/prometheus/query_range_response.qtpl:25
|
||||||
|
pointsCount += len(rs[i].Values)
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:26
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:20
|
//line app/vmselect/prometheus/query_range_response.qtpl:27
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:20
|
//line app/vmselect/prometheus/query_range_response.qtpl:27
|
||||||
qw422016.N().S(`]}}`)
|
qw422016.N().S(`]}`)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||||
}
|
qt.Printf("generate /api/v1/query_range response for series=%d, points=%d", seriesCount, pointsCount)
|
||||||
|
qtDone()
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
//line app/vmselect/prometheus/query_range_response.qtpl:34
|
||||||
func WriteQueryRangeResponse(qq422016 qtio422016.Writer, rs []netstorage.Result) {
|
streamdumpQueryTrace(qw422016, qt)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
//line app/vmselect/prometheus/query_range_response.qtpl:34
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
StreamQueryRangeResponse(qw422016, rs)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
qt422016.ReleaseWriter(qw422016)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
}
|
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
func QueryRangeResponse(rs []netstorage.Result) string {
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
WriteQueryRangeResponse(qb422016, rs)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
qs422016 := string(qb422016.B)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
return qs422016
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
|
||||||
}
|
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:26
|
|
||||||
func streamqueryRangeLine(qw422016 *qt422016.Writer, r *netstorage.Result) {
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:26
|
|
||||||
qw422016.N().S(`{"metric":`)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:28
|
|
||||||
streammetricNameObject(qw422016, &r.MetricName)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:28
|
|
||||||
qw422016.N().S(`,"values":`)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:29
|
|
||||||
streamvaluesWithTimestamps(qw422016, r.Values, r.Timestamps)
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:29
|
|
||||||
qw422016.N().S(`}`)
|
qw422016.N().S(`}`)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
func writequeryRangeLine(qq422016 qtio422016.Writer, r *netstorage.Result) {
|
func WriteQueryRangeResponse(qq422016 qtio422016.Writer, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) {
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
streamqueryRangeLine(qw422016, r)
|
StreamQueryRangeResponse(qw422016, rs, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
func queryRangeLine(r *netstorage.Result) string {
|
func QueryRangeResponse(rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) string {
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
writequeryRangeLine(qb422016, r)
|
WriteQueryRangeResponse(qb422016, rs, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||||
|
}
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:38
|
||||||
|
func streamqueryRangeLine(qw422016 *qt422016.Writer, r *netstorage.Result) {
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:38
|
||||||
|
qw422016.N().S(`{"metric":`)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:40
|
||||||
|
streammetricNameObject(qw422016, &r.MetricName)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:40
|
||||||
|
qw422016.N().S(`,"values":`)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:41
|
||||||
|
streamvaluesWithTimestamps(qw422016, r.Values, r.Timestamps)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:41
|
||||||
|
qw422016.N().S(`}`)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
}
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
func writequeryRangeLine(qq422016 qtio422016.Writer, r *netstorage.Result) {
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
streamqueryRangeLine(qw422016, r)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
qt422016.ReleaseWriter(qw422016)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
}
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
func queryRangeLine(r *netstorage.Result) string {
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
writequeryRangeLine(qb422016, r)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
qs422016 := string(qb422016.B)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
|
return qs422016
|
||||||
|
//line app/vmselect/prometheus/query_range_response.qtpl:43
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
{% import (
|
{% import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
) %}
|
) %}
|
||||||
|
|
||||||
{% stripspace %}
|
{% stripspace %}
|
||||||
QueryResponse generates response for /api/v1/query.
|
QueryResponse generates response for /api/v1/query.
|
||||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||||
{% func QueryResponse(rs []netstorage.Result) %}
|
{% func QueryResponse(rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) %}
|
||||||
{
|
{
|
||||||
|
{% code seriesCount := len(rs) %}
|
||||||
"status":"success",
|
"status":"success",
|
||||||
"data":{
|
"data":{
|
||||||
"resultType":"vector",
|
"resultType":"vector",
|
||||||
|
@ -27,6 +29,11 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||||
{% endif %}
|
{% endif %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
{% code
|
||||||
|
qt.Printf("generate /api/v1/query response for series=%d", seriesCount)
|
||||||
|
qtDone()
|
||||||
|
%}
|
||||||
|
{%= dumpQueryTrace(qt) %}
|
||||||
}
|
}
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
{% endstripspace %}
|
{% endstripspace %}
|
||||||
|
|
|
@ -7,88 +7,102 @@ package prometheus
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:1
|
//line app/vmselect/prometheus/query_response.qtpl:1
|
||||||
import (
|
import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueryResponse generates response for /api/v1/query.See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
// QueryResponse generates response for /api/v1/query.See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
//line app/vmselect/prometheus/query_response.qtpl:9
|
||||||
import (
|
import (
|
||||||
qtio422016 "io"
|
qtio422016 "io"
|
||||||
|
|
||||||
qt422016 "github.com/valyala/quicktemplate"
|
qt422016 "github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
//line app/vmselect/prometheus/query_response.qtpl:9
|
||||||
var (
|
var (
|
||||||
_ = qtio422016.Copy
|
_ = qtio422016.Copy
|
||||||
_ = qt422016.AcquireByteBuffer
|
_ = qt422016.AcquireByteBuffer
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
//line app/vmselect/prometheus/query_response.qtpl:9
|
||||||
func StreamQueryResponse(qw422016 *qt422016.Writer, rs []netstorage.Result) {
|
func StreamQueryResponse(qw422016 *qt422016.Writer, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) {
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
//line app/vmselect/prometheus/query_response.qtpl:9
|
||||||
qw422016.N().S(`{"status":"success","data":{"resultType":"vector","result":[`)
|
qw422016.N().S(`{`)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:14
|
//line app/vmselect/prometheus/query_response.qtpl:11
|
||||||
|
seriesCount := len(rs)
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:11
|
||||||
|
qw422016.N().S(`"status":"success","data":{"resultType":"vector","result":[`)
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:16
|
||||||
if len(rs) > 0 {
|
if len(rs) > 0 {
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:14
|
//line app/vmselect/prometheus/query_response.qtpl:16
|
||||||
qw422016.N().S(`{"metric":`)
|
qw422016.N().S(`{"metric":`)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:16
|
//line app/vmselect/prometheus/query_response.qtpl:18
|
||||||
streammetricNameObject(qw422016, &rs[0].MetricName)
|
streammetricNameObject(qw422016, &rs[0].MetricName)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:16
|
//line app/vmselect/prometheus/query_response.qtpl:18
|
||||||
qw422016.N().S(`,"value":`)
|
qw422016.N().S(`,"value":`)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:17
|
|
||||||
streammetricRow(qw422016, rs[0].Timestamps[0], rs[0].Values[0])
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:17
|
|
||||||
qw422016.N().S(`}`)
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:19
|
//line app/vmselect/prometheus/query_response.qtpl:19
|
||||||
|
streammetricRow(qw422016, rs[0].Timestamps[0], rs[0].Values[0])
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:19
|
||||||
|
qw422016.N().S(`}`)
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:21
|
||||||
rs = rs[1:]
|
rs = rs[1:]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:20
|
//line app/vmselect/prometheus/query_response.qtpl:22
|
||||||
for i := range rs {
|
for i := range rs {
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:21
|
//line app/vmselect/prometheus/query_response.qtpl:23
|
||||||
r := &rs[i]
|
r := &rs[i]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:21
|
//line app/vmselect/prometheus/query_response.qtpl:23
|
||||||
qw422016.N().S(`,{"metric":`)
|
qw422016.N().S(`,{"metric":`)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:23
|
//line app/vmselect/prometheus/query_response.qtpl:25
|
||||||
streammetricNameObject(qw422016, &r.MetricName)
|
streammetricNameObject(qw422016, &r.MetricName)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:23
|
//line app/vmselect/prometheus/query_response.qtpl:25
|
||||||
qw422016.N().S(`,"value":`)
|
qw422016.N().S(`,"value":`)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:24
|
|
||||||
streammetricRow(qw422016, r.Timestamps[0], r.Values[0])
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:24
|
|
||||||
qw422016.N().S(`}`)
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:26
|
//line app/vmselect/prometheus/query_response.qtpl:26
|
||||||
|
streammetricRow(qw422016, r.Timestamps[0], r.Values[0])
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:26
|
||||||
|
qw422016.N().S(`}`)
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:28
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:27
|
//line app/vmselect/prometheus/query_response.qtpl:29
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:27
|
//line app/vmselect/prometheus/query_response.qtpl:29
|
||||||
qw422016.N().S(`]}}`)
|
qw422016.N().S(`]}`)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:33
|
||||||
|
qt.Printf("generate /api/v1/query response for series=%d", seriesCount)
|
||||||
|
qtDone()
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:36
|
||||||
|
streamdumpQueryTrace(qw422016, qt)
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:36
|
||||||
|
qw422016.N().S(`}`)
|
||||||
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
func WriteQueryResponse(qq422016 qtio422016.Writer, rs []netstorage.Result) {
|
func WriteQueryResponse(qq422016 qtio422016.Writer, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) {
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
StreamQueryResponse(qw422016, rs)
|
StreamQueryResponse(qw422016, rs, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
func QueryResponse(rs []netstorage.Result) string {
|
func QueryResponse(rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) string {
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
WriteQueryResponse(qb422016, rs)
|
WriteQueryResponse(qb422016, rs, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,24 +1,37 @@
|
||||||
{% import (
|
{% import (
|
||||||
"github.com/valyala/quicktemplate"
|
"github.com/valyala/quicktemplate"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
) %}
|
) %}
|
||||||
|
|
||||||
{% stripspace %}
|
{% stripspace %}
|
||||||
SeriesResponse generates response for /api/v1/series.
|
SeriesResponse generates response for /api/v1/series.
|
||||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||||
{% func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
{% func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) %}
|
||||||
{
|
{
|
||||||
|
{% code seriesCount := 0 %}
|
||||||
"status":"success",
|
"status":"success",
|
||||||
"data":[
|
"data":[
|
||||||
{% code bb, ok := <-resultsCh %}
|
{% code bb, ok := <-resultsCh %}
|
||||||
{% if ok %}
|
{% if ok %}
|
||||||
{%z= bb.B %}
|
{%z= bb.B %}
|
||||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
{% code
|
||||||
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
seriesCount++
|
||||||
|
%}
|
||||||
{% for bb := range resultsCh %}
|
{% for bb := range resultsCh %}
|
||||||
,{%z= bb.B %}
|
,{%z= bb.B %}
|
||||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
{% code
|
||||||
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
seriesCount++
|
||||||
|
%}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
]
|
]
|
||||||
|
{% code
|
||||||
|
qt.Printf("generate response: series=%d", seriesCount)
|
||||||
|
qtDone()
|
||||||
|
%}
|
||||||
|
{%= dumpQueryTrace(qt) %}
|
||||||
}
|
}
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
{% endstripspace %}
|
{% endstripspace %}
|
||||||
|
|
|
@ -6,78 +6,94 @@ package prometheus
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:1
|
//line app/vmselect/prometheus/series_response.qtpl:1
|
||||||
import (
|
import (
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/valyala/quicktemplate"
|
"github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SeriesResponse generates response for /api/v1/series.See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
// SeriesResponse generates response for /api/v1/series.See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
//line app/vmselect/prometheus/series_response.qtpl:9
|
||||||
import (
|
import (
|
||||||
qtio422016 "io"
|
qtio422016 "io"
|
||||||
|
|
||||||
qt422016 "github.com/valyala/quicktemplate"
|
qt422016 "github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
//line app/vmselect/prometheus/series_response.qtpl:9
|
||||||
var (
|
var (
|
||||||
_ = qtio422016.Copy
|
_ = qtio422016.Copy
|
||||||
_ = qt422016.AcquireByteBuffer
|
_ = qt422016.AcquireByteBuffer
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
//line app/vmselect/prometheus/series_response.qtpl:9
|
||||||
func StreamSeriesResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
func StreamSeriesResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) {
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
//line app/vmselect/prometheus/series_response.qtpl:9
|
||||||
qw422016.N().S(`{"status":"success","data":[`)
|
qw422016.N().S(`{`)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:12
|
//line app/vmselect/prometheus/series_response.qtpl:11
|
||||||
|
seriesCount := 0
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:11
|
||||||
|
qw422016.N().S(`"status":"success","data":[`)
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:14
|
||||||
bb, ok := <-resultsCh
|
bb, ok := <-resultsCh
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:13
|
|
||||||
if ok {
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:14
|
|
||||||
qw422016.N().Z(bb.B)
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:15
|
//line app/vmselect/prometheus/series_response.qtpl:15
|
||||||
quicktemplate.ReleaseByteBuffer(bb)
|
if ok {
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
//line app/vmselect/prometheus/series_response.qtpl:16
|
||||||
for bb := range resultsCh {
|
qw422016.N().Z(bb.B)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
|
||||||
qw422016.N().S(`,`)
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:17
|
|
||||||
qw422016.N().Z(bb.B)
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:18
|
//line app/vmselect/prometheus/series_response.qtpl:18
|
||||||
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
seriesCount++
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:21
|
||||||
|
for bb := range resultsCh {
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:21
|
||||||
|
qw422016.N().S(`,`)
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:22
|
||||||
|
qw422016.N().Z(bb.B)
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||||
quicktemplate.ReleaseByteBuffer(bb)
|
quicktemplate.ReleaseByteBuffer(bb)
|
||||||
|
seriesCount++
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:19
|
//line app/vmselect/prometheus/series_response.qtpl:27
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:20
|
//line app/vmselect/prometheus/series_response.qtpl:28
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:20
|
//line app/vmselect/prometheus/series_response.qtpl:28
|
||||||
qw422016.N().S(`]}`)
|
qw422016.N().S(`]`)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:31
|
||||||
|
qt.Printf("generate response: series=%d", seriesCount)
|
||||||
|
qtDone()
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:34
|
||||||
|
streamdumpQueryTrace(qw422016, qt)
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:34
|
||||||
|
qw422016.N().S(`}`)
|
||||||
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
func WriteSeriesResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
func WriteSeriesResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) {
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
StreamSeriesResponse(qw422016, resultsCh)
|
StreamSeriesResponse(qw422016, resultsCh, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) string {
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
WriteSeriesResponse(qb422016, resultsCh)
|
WriteSeriesResponse(qb422016, resultsCh, qt, qtDone)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
{% import (
|
{% import (
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
) %}
|
) %}
|
||||||
|
|
||||||
|
@ -45,4 +46,9 @@
|
||||||
]
|
]
|
||||||
{% endfunc %}
|
{% endfunc %}
|
||||||
|
|
||||||
|
{% func dumpQueryTrace(qt *querytracer.Tracer) %}
|
||||||
|
{% code traceJSON := qt.ToJSON() %}
|
||||||
|
{% if traceJSON != "" %},"trace":{%s= traceJSON %}{% endif %}
|
||||||
|
{% endfunc %}
|
||||||
|
|
||||||
{% endstripspace %}
|
{% endstripspace %}
|
||||||
|
|
|
@ -6,212 +6,255 @@ package prometheus
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:1
|
//line app/vmselect/prometheus/util.qtpl:1
|
||||||
import (
|
import (
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:7
|
//line app/vmselect/prometheus/util.qtpl:8
|
||||||
import (
|
import (
|
||||||
qtio422016 "io"
|
qtio422016 "io"
|
||||||
|
|
||||||
qt422016 "github.com/valyala/quicktemplate"
|
qt422016 "github.com/valyala/quicktemplate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:7
|
//line app/vmselect/prometheus/util.qtpl:8
|
||||||
var (
|
var (
|
||||||
_ = qtio422016.Copy
|
_ = qtio422016.Copy
|
||||||
_ = qt422016.AcquireByteBuffer
|
_ = qt422016.AcquireByteBuffer
|
||||||
)
|
)
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:7
|
//line app/vmselect/prometheus/util.qtpl:8
|
||||||
func streammetricNameObject(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
func streammetricNameObject(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||||
//line app/vmselect/prometheus/util.qtpl:7
|
//line app/vmselect/prometheus/util.qtpl:8
|
||||||
qw422016.N().S(`{`)
|
qw422016.N().S(`{`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:9
|
//line app/vmselect/prometheus/util.qtpl:10
|
||||||
if len(mn.MetricGroup) > 0 {
|
if len(mn.MetricGroup) > 0 {
|
||||||
//line app/vmselect/prometheus/util.qtpl:9
|
//line app/vmselect/prometheus/util.qtpl:10
|
||||||
qw422016.N().S(`"__name__":`)
|
qw422016.N().S(`"__name__":`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:10
|
|
||||||
qw422016.N().QZ(mn.MetricGroup)
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:10
|
|
||||||
if len(mn.Tags) > 0 {
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:10
|
|
||||||
qw422016.N().S(`,`)
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:10
|
|
||||||
}
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:11
|
//line app/vmselect/prometheus/util.qtpl:11
|
||||||
}
|
qw422016.N().QZ(mn.MetricGroup)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:11
|
||||||
|
if len(mn.Tags) > 0 {
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:11
|
||||||
|
qw422016.N().S(`,`)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:11
|
||||||
|
}
|
||||||
//line app/vmselect/prometheus/util.qtpl:12
|
//line app/vmselect/prometheus/util.qtpl:12
|
||||||
for j := range mn.Tags {
|
}
|
||||||
//line app/vmselect/prometheus/util.qtpl:13
|
//line app/vmselect/prometheus/util.qtpl:13
|
||||||
|
for j := range mn.Tags {
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:14
|
||||||
tag := &mn.Tags[j]
|
tag := &mn.Tags[j]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:14
|
//line app/vmselect/prometheus/util.qtpl:15
|
||||||
qw422016.N().QZ(tag.Key)
|
qw422016.N().QZ(tag.Key)
|
||||||
//line app/vmselect/prometheus/util.qtpl:14
|
//line app/vmselect/prometheus/util.qtpl:15
|
||||||
qw422016.N().S(`:`)
|
qw422016.N().S(`:`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:14
|
//line app/vmselect/prometheus/util.qtpl:15
|
||||||
qw422016.N().QZ(tag.Value)
|
qw422016.N().QZ(tag.Value)
|
||||||
//line app/vmselect/prometheus/util.qtpl:14
|
//line app/vmselect/prometheus/util.qtpl:15
|
||||||
if j+1 < len(mn.Tags) {
|
if j+1 < len(mn.Tags) {
|
||||||
//line app/vmselect/prometheus/util.qtpl:14
|
//line app/vmselect/prometheus/util.qtpl:15
|
||||||
qw422016.N().S(`,`)
|
qw422016.N().S(`,`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:14
|
//line app/vmselect/prometheus/util.qtpl:15
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/util.qtpl:15
|
//line app/vmselect/prometheus/util.qtpl:16
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/util.qtpl:15
|
//line app/vmselect/prometheus/util.qtpl:16
|
||||||
qw422016.N().S(`}`)
|
qw422016.N().S(`}`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
func writemetricNameObject(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
func writemetricNameObject(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
streammetricNameObject(qw422016, mn)
|
streammetricNameObject(qw422016, mn)
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
func metricNameObject(mn *storage.MetricName) string {
|
func metricNameObject(mn *storage.MetricName) string {
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
writemetricNameObject(qb422016, mn)
|
writemetricNameObject(qb422016, mn)
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/util.qtpl:17
|
//line app/vmselect/prometheus/util.qtpl:18
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:19
|
//line app/vmselect/prometheus/util.qtpl:20
|
||||||
func streammetricRow(qw422016 *qt422016.Writer, timestamp int64, value float64) {
|
func streammetricRow(qw422016 *qt422016.Writer, timestamp int64, value float64) {
|
||||||
//line app/vmselect/prometheus/util.qtpl:19
|
|
||||||
qw422016.N().S(`[`)
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:20
|
//line app/vmselect/prometheus/util.qtpl:20
|
||||||
|
qw422016.N().S(`[`)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:21
|
||||||
qw422016.N().F(float64(timestamp) / 1e3)
|
qw422016.N().F(float64(timestamp) / 1e3)
|
||||||
//line app/vmselect/prometheus/util.qtpl:20
|
//line app/vmselect/prometheus/util.qtpl:21
|
||||||
qw422016.N().S(`,"`)
|
qw422016.N().S(`,"`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:20
|
//line app/vmselect/prometheus/util.qtpl:21
|
||||||
qw422016.N().F(value)
|
qw422016.N().F(value)
|
||||||
//line app/vmselect/prometheus/util.qtpl:20
|
//line app/vmselect/prometheus/util.qtpl:21
|
||||||
qw422016.N().S(`"]`)
|
qw422016.N().S(`"]`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
func writemetricRow(qq422016 qtio422016.Writer, timestamp int64, value float64) {
|
func writemetricRow(qq422016 qtio422016.Writer, timestamp int64, value float64) {
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
streammetricRow(qw422016, timestamp, value)
|
streammetricRow(qw422016, timestamp, value)
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
func metricRow(timestamp int64, value float64) string {
|
func metricRow(timestamp int64, value float64) string {
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
writemetricRow(qb422016, timestamp, value)
|
writemetricRow(qb422016, timestamp, value)
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/util.qtpl:21
|
//line app/vmselect/prometheus/util.qtpl:22
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:23
|
//line app/vmselect/prometheus/util.qtpl:24
|
||||||
func streamvaluesWithTimestamps(qw422016 *qt422016.Writer, values []float64, timestamps []int64) {
|
func streamvaluesWithTimestamps(qw422016 *qt422016.Writer, values []float64, timestamps []int64) {
|
||||||
//line app/vmselect/prometheus/util.qtpl:24
|
//line app/vmselect/prometheus/util.qtpl:25
|
||||||
if len(values) == 0 {
|
if len(values) == 0 {
|
||||||
//line app/vmselect/prometheus/util.qtpl:24
|
//line app/vmselect/prometheus/util.qtpl:25
|
||||||
qw422016.N().S(`[]`)
|
qw422016.N().S(`[]`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:26
|
//line app/vmselect/prometheus/util.qtpl:27
|
||||||
return
|
return
|
||||||
//line app/vmselect/prometheus/util.qtpl:27
|
//line app/vmselect/prometheus/util.qtpl:28
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/util.qtpl:27
|
//line app/vmselect/prometheus/util.qtpl:28
|
||||||
qw422016.N().S(`[`)
|
qw422016.N().S(`[`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:29
|
//line app/vmselect/prometheus/util.qtpl:30
|
||||||
/* inline metricRow call here for the sake of performance optimization */
|
/* inline metricRow call here for the sake of performance optimization */
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:29
|
//line app/vmselect/prometheus/util.qtpl:30
|
||||||
qw422016.N().S(`[`)
|
qw422016.N().S(`[`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:30
|
//line app/vmselect/prometheus/util.qtpl:31
|
||||||
qw422016.N().F(float64(timestamps[0]) / 1e3)
|
qw422016.N().F(float64(timestamps[0]) / 1e3)
|
||||||
//line app/vmselect/prometheus/util.qtpl:30
|
//line app/vmselect/prometheus/util.qtpl:31
|
||||||
qw422016.N().S(`,"`)
|
qw422016.N().S(`,"`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:30
|
//line app/vmselect/prometheus/util.qtpl:31
|
||||||
qw422016.N().F(values[0])
|
qw422016.N().F(values[0])
|
||||||
//line app/vmselect/prometheus/util.qtpl:30
|
//line app/vmselect/prometheus/util.qtpl:31
|
||||||
qw422016.N().S(`"]`)
|
qw422016.N().S(`"]`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:32
|
//line app/vmselect/prometheus/util.qtpl:33
|
||||||
timestamps = timestamps[1:]
|
timestamps = timestamps[1:]
|
||||||
values = values[1:]
|
values = values[1:]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:35
|
//line app/vmselect/prometheus/util.qtpl:36
|
||||||
if len(values) > 0 {
|
if len(values) > 0 {
|
||||||
//line app/vmselect/prometheus/util.qtpl:37
|
//line app/vmselect/prometheus/util.qtpl:38
|
||||||
// Remove bounds check inside the loop below
|
// Remove bounds check inside the loop below
|
||||||
_ = timestamps[len(values)-1]
|
_ = timestamps[len(values)-1]
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:40
|
|
||||||
for i, v := range values {
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:41
|
//line app/vmselect/prometheus/util.qtpl:41
|
||||||
|
for i, v := range values {
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:42
|
||||||
/* inline metricRow call here for the sake of performance optimization */
|
/* inline metricRow call here for the sake of performance optimization */
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:41
|
//line app/vmselect/prometheus/util.qtpl:42
|
||||||
qw422016.N().S(`,[`)
|
qw422016.N().S(`,[`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:42
|
|
||||||
qw422016.N().F(float64(timestamps[i]) / 1e3)
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:42
|
|
||||||
qw422016.N().S(`,"`)
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:42
|
|
||||||
qw422016.N().F(v)
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:42
|
|
||||||
qw422016.N().S(`"]`)
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:43
|
//line app/vmselect/prometheus/util.qtpl:43
|
||||||
|
qw422016.N().F(float64(timestamps[i]) / 1e3)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:43
|
||||||
|
qw422016.N().S(`,"`)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:43
|
||||||
|
qw422016.N().F(v)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:43
|
||||||
|
qw422016.N().S(`"]`)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:44
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/util.qtpl:44
|
//line app/vmselect/prometheus/util.qtpl:45
|
||||||
}
|
}
|
||||||
//line app/vmselect/prometheus/util.qtpl:44
|
//line app/vmselect/prometheus/util.qtpl:45
|
||||||
qw422016.N().S(`]`)
|
qw422016.N().S(`]`)
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
func writevaluesWithTimestamps(qq422016 qtio422016.Writer, values []float64, timestamps []int64) {
|
func writevaluesWithTimestamps(qq422016 qtio422016.Writer, values []float64, timestamps []int64) {
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
streamvaluesWithTimestamps(qw422016, values, timestamps)
|
streamvaluesWithTimestamps(qw422016, values, timestamps)
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
qt422016.ReleaseWriter(qw422016)
|
qt422016.ReleaseWriter(qw422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
}
|
}
|
||||||
|
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
func valuesWithTimestamps(values []float64, timestamps []int64) string {
|
func valuesWithTimestamps(values []float64, timestamps []int64) string {
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
qb422016 := qt422016.AcquireByteBuffer()
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
writevaluesWithTimestamps(qb422016, values, timestamps)
|
writevaluesWithTimestamps(qb422016, values, timestamps)
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
qs422016 := string(qb422016.B)
|
qs422016 := string(qb422016.B)
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
qt422016.ReleaseByteBuffer(qb422016)
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
return qs422016
|
return qs422016
|
||||||
//line app/vmselect/prometheus/util.qtpl:46
|
//line app/vmselect/prometheus/util.qtpl:47
|
||||||
|
}
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:49
|
||||||
|
func streamdumpQueryTrace(qw422016 *qt422016.Writer, qt *querytracer.Tracer) {
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:50
|
||||||
|
traceJSON := qt.ToJSON()
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:51
|
||||||
|
if traceJSON != "" {
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:51
|
||||||
|
qw422016.N().S(`,"trace":`)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:51
|
||||||
|
qw422016.N().S(traceJSON)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:51
|
||||||
|
}
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
}
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
func writedumpQueryTrace(qq422016 qtio422016.Writer, qt *querytracer.Tracer) {
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
streamdumpQueryTrace(qw422016, qt)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
qt422016.ReleaseWriter(qw422016)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
}
|
||||||
|
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
func dumpQueryTrace(qt *querytracer.Tracer) string {
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
qb422016 := qt422016.AcquireByteBuffer()
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
writedumpQueryTrace(qb422016, qt)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
qs422016 := string(qb422016.B)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
qt422016.ReleaseByteBuffer(qb422016)
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
|
return qs422016
|
||||||
|
//line app/vmselect/prometheus/util.qtpl:52
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
"github.com/VictoriaMetrics/metricsql"
|
"github.com/VictoriaMetrics/metricsql"
|
||||||
|
@ -102,6 +103,7 @@ type EvalConfig struct {
|
||||||
|
|
||||||
Deadline searchutils.Deadline
|
Deadline searchutils.Deadline
|
||||||
|
|
||||||
|
// Whether the response can be cached.
|
||||||
MayCache bool
|
MayCache bool
|
||||||
|
|
||||||
// LookbackDelta is analog to `-query.lookback-delta` from Prometheus.
|
// LookbackDelta is analog to `-query.lookback-delta` from Prometheus.
|
||||||
|
@ -190,19 +192,40 @@ func getTimestamps(start, end, step int64) []int64 {
|
||||||
return timestamps
|
return timestamps
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
func evalExpr(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
rv, err := evalExprInternal(qt, ec, e)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if qt.Enabled() {
|
||||||
|
query := e.AppendString(nil)
|
||||||
|
seriesCount := len(rv)
|
||||||
|
pointsPerSeries := 0
|
||||||
|
if len(rv) > 0 {
|
||||||
|
pointsPerSeries = len(rv[0].Timestamps)
|
||||||
|
}
|
||||||
|
pointsCount := seriesCount * pointsPerSeries
|
||||||
|
mayCache := ec.mayCache()
|
||||||
|
qt.Donef("eval: query=%s, timeRange=[%d..%d], step=%d, mayCache=%v: series=%d, points=%d, pointsPerSeries=%d",
|
||||||
|
query, ec.Start, ec.End, ec.Step, mayCache, seriesCount, pointsCount, pointsPerSeries)
|
||||||
|
}
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evalExprInternal(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
if me, ok := e.(*metricsql.MetricExpr); ok {
|
if me, ok := e.(*metricsql.MetricExpr); ok {
|
||||||
re := &metricsql.RollupExpr{
|
re := &metricsql.RollupExpr{
|
||||||
Expr: me,
|
Expr: me,
|
||||||
}
|
}
|
||||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
|
rv, err := evalRollupFunc(qt, ec, "default_rollup", rollupDefault, e, re, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, me.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, me.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
if re, ok := e.(*metricsql.RollupExpr); ok {
|
if re, ok := e.(*metricsql.RollupExpr); ok {
|
||||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
|
rv, err := evalRollupFunc(qt, ec, "default_rollup", rollupDefault, e, re, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, re.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, re.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
|
@ -211,26 +234,12 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
if fe, ok := e.(*metricsql.FuncExpr); ok {
|
if fe, ok := e.(*metricsql.FuncExpr); ok {
|
||||||
nrf := getRollupFunc(fe.Name)
|
nrf := getRollupFunc(fe.Name)
|
||||||
if nrf == nil {
|
if nrf == nil {
|
||||||
args, err := evalExprs(ec, fe.Args)
|
qtChild := qt.NewChild()
|
||||||
if err != nil {
|
rv, err := evalTransformFunc(qtChild, ec, fe)
|
||||||
return nil, err
|
qtChild.Donef("transform %s(): series=%d", fe.Name, len(rv))
|
||||||
}
|
return rv, err
|
||||||
tf := getTransformFunc(fe.Name)
|
|
||||||
if tf == nil {
|
|
||||||
return nil, fmt.Errorf(`unknown func %q`, fe.Name)
|
|
||||||
}
|
|
||||||
tfa := &transformFuncArg{
|
|
||||||
ec: ec,
|
|
||||||
fe: fe,
|
|
||||||
args: args,
|
|
||||||
}
|
|
||||||
rv, err := tf(tfa)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
|
||||||
}
|
|
||||||
return rv, nil
|
|
||||||
}
|
}
|
||||||
args, re, err := evalRollupFuncArgs(ec, fe)
|
args, re, err := evalRollupFuncArgs(qt, ec, fe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -238,79 +247,23 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rv, err := evalRollupFunc(ec, fe.Name, rf, e, re, nil)
|
rv, err := evalRollupFunc(qt, ec, fe.Name, rf, e, re, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
if ae, ok := e.(*metricsql.AggrFuncExpr); ok {
|
if ae, ok := e.(*metricsql.AggrFuncExpr); ok {
|
||||||
if callbacks := getIncrementalAggrFuncCallbacks(ae.Name); callbacks != nil {
|
qtChild := qt.NewChild()
|
||||||
fe, nrf := tryGetArgRollupFuncWithMetricExpr(ae)
|
rv, err := evalAggrFunc(qtChild, ec, ae)
|
||||||
if fe != nil {
|
qtChild.Donef("aggregate %s(): series=%d", ae.Name, len(rv))
|
||||||
// There is an optimized path for calculating metricsql.AggrFuncExpr over rollupFunc over metricsql.MetricExpr.
|
return rv, err
|
||||||
// The optimized path saves RAM for aggregates over big number of time series.
|
|
||||||
args, re, err := evalRollupFuncArgs(ec, fe)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rf, err := nrf(args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
iafc := newIncrementalAggrFuncContext(ae, callbacks)
|
|
||||||
return evalRollupFunc(ec, fe.Name, rf, e, re, iafc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
args, err := evalExprs(ec, ae.Args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
af := getAggrFunc(ae.Name)
|
|
||||||
if af == nil {
|
|
||||||
return nil, fmt.Errorf(`unknown func %q`, ae.Name)
|
|
||||||
}
|
|
||||||
afa := &aggrFuncArg{
|
|
||||||
ae: ae,
|
|
||||||
args: args,
|
|
||||||
ec: ec,
|
|
||||||
}
|
|
||||||
rv, err := af(afa)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, ae.AppendString(nil), err)
|
|
||||||
}
|
|
||||||
return rv, nil
|
|
||||||
}
|
}
|
||||||
if be, ok := e.(*metricsql.BinaryOpExpr); ok {
|
if be, ok := e.(*metricsql.BinaryOpExpr); ok {
|
||||||
bf := getBinaryOpFunc(be.Op)
|
qtChild := qt.NewChild()
|
||||||
if bf == nil {
|
rv, err := evalBinaryOp(qtChild, ec, be)
|
||||||
return nil, fmt.Errorf(`unknown binary op %q`, be.Op)
|
qtChild.Donef("binary op %q: series=%d", be.Op, len(rv))
|
||||||
}
|
return rv, err
|
||||||
var err error
|
|
||||||
var tssLeft, tssRight []*timeseries
|
|
||||||
switch strings.ToLower(be.Op) {
|
|
||||||
case "and", "if":
|
|
||||||
// Fetch right-side series at first, since it usually contains
|
|
||||||
// lower number of time series for `and` and `if` operator.
|
|
||||||
// This should produce more specific label filters for the left side of the query.
|
|
||||||
// This, in turn, should reduce the time to select series for the left side of the query.
|
|
||||||
tssRight, tssLeft, err = execBinaryOpArgs(ec, be.Right, be.Left, be)
|
|
||||||
default:
|
|
||||||
tssLeft, tssRight, err = execBinaryOpArgs(ec, be.Left, be.Right, be)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot execute %q: %w", be.AppendString(nil), err)
|
|
||||||
}
|
|
||||||
bfa := &binaryOpFuncArg{
|
|
||||||
be: be,
|
|
||||||
left: tssLeft,
|
|
||||||
right: tssRight,
|
|
||||||
}
|
|
||||||
rv, err := bf(bfa)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, be.AppendString(nil), err)
|
|
||||||
}
|
|
||||||
return rv, nil
|
|
||||||
}
|
}
|
||||||
if ne, ok := e.(*metricsql.NumberExpr); ok {
|
if ne, ok := e.(*metricsql.NumberExpr); ok {
|
||||||
rv := evalNumber(ec, ne.N)
|
rv := evalNumber(ec, ne.N)
|
||||||
|
@ -329,7 +282,98 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
return nil, fmt.Errorf("unexpected expression %q", e.AppendString(nil))
|
return nil, fmt.Errorf("unexpected expression %q", e.AppendString(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func execBinaryOpArgs(ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *metricsql.BinaryOpExpr) ([]*timeseries, []*timeseries, error) {
|
func evalTransformFunc(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]*timeseries, error) {
|
||||||
|
args, err := evalExprs(qt, ec, fe.Args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tf := getTransformFunc(fe.Name)
|
||||||
|
if tf == nil {
|
||||||
|
return nil, fmt.Errorf(`unknown func %q`, fe.Name)
|
||||||
|
}
|
||||||
|
tfa := &transformFuncArg{
|
||||||
|
ec: ec,
|
||||||
|
fe: fe,
|
||||||
|
args: args,
|
||||||
|
}
|
||||||
|
rv, err := tf(tfa)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
||||||
|
}
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evalAggrFunc(qt *querytracer.Tracer, ec *EvalConfig, ae *metricsql.AggrFuncExpr) ([]*timeseries, error) {
|
||||||
|
if callbacks := getIncrementalAggrFuncCallbacks(ae.Name); callbacks != nil {
|
||||||
|
fe, nrf := tryGetArgRollupFuncWithMetricExpr(ae)
|
||||||
|
if fe != nil {
|
||||||
|
// There is an optimized path for calculating metricsql.AggrFuncExpr over rollupFunc over metricsql.MetricExpr.
|
||||||
|
// The optimized path saves RAM for aggregates over big number of time series.
|
||||||
|
args, re, err := evalRollupFuncArgs(qt, ec, fe)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rf, err := nrf(args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
iafc := newIncrementalAggrFuncContext(ae, callbacks)
|
||||||
|
return evalRollupFunc(qt, ec, fe.Name, rf, ae, re, iafc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args, err := evalExprs(qt, ec, ae.Args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
af := getAggrFunc(ae.Name)
|
||||||
|
if af == nil {
|
||||||
|
return nil, fmt.Errorf(`unknown func %q`, ae.Name)
|
||||||
|
}
|
||||||
|
afa := &aggrFuncArg{
|
||||||
|
ae: ae,
|
||||||
|
args: args,
|
||||||
|
ec: ec,
|
||||||
|
}
|
||||||
|
rv, err := af(afa)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, ae.AppendString(nil), err)
|
||||||
|
}
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func evalBinaryOp(qt *querytracer.Tracer, ec *EvalConfig, be *metricsql.BinaryOpExpr) ([]*timeseries, error) {
|
||||||
|
bf := getBinaryOpFunc(be.Op)
|
||||||
|
if bf == nil {
|
||||||
|
return nil, fmt.Errorf(`unknown binary op %q`, be.Op)
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
var tssLeft, tssRight []*timeseries
|
||||||
|
switch strings.ToLower(be.Op) {
|
||||||
|
case "and", "if":
|
||||||
|
// Fetch right-side series at first, since it usually contains
|
||||||
|
// lower number of time series for `and` and `if` operator.
|
||||||
|
// This should produce more specific label filters for the left side of the query.
|
||||||
|
// This, in turn, should reduce the time to select series for the left side of the query.
|
||||||
|
tssRight, tssLeft, err = execBinaryOpArgs(qt, ec, be.Right, be.Left, be)
|
||||||
|
default:
|
||||||
|
tssLeft, tssRight, err = execBinaryOpArgs(qt, ec, be.Left, be.Right, be)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot execute %q: %w", be.AppendString(nil), err)
|
||||||
|
}
|
||||||
|
bfa := &binaryOpFuncArg{
|
||||||
|
be: be,
|
||||||
|
left: tssLeft,
|
||||||
|
right: tssRight,
|
||||||
|
}
|
||||||
|
rv, err := bf(bfa)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, be.AppendString(nil), err)
|
||||||
|
}
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func execBinaryOpArgs(qt *querytracer.Tracer, ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *metricsql.BinaryOpExpr) ([]*timeseries, []*timeseries, error) {
|
||||||
// Execute binary operation in the following way:
|
// Execute binary operation in the following way:
|
||||||
//
|
//
|
||||||
// 1) execute the exprFirst
|
// 1) execute the exprFirst
|
||||||
|
@ -353,7 +397,7 @@ func execBinaryOpArgs(ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *
|
||||||
//
|
//
|
||||||
// - Queries, which get additional labels from `info` metrics.
|
// - Queries, which get additional labels from `info` metrics.
|
||||||
// See https://www.robustperception.io/exposing-the-software-version-to-prometheus
|
// See https://www.robustperception.io/exposing-the-software-version-to-prometheus
|
||||||
tssFirst, err := evalExpr(ec, exprFirst)
|
tssFirst, err := evalExpr(qt, ec, exprFirst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -366,7 +410,7 @@ func execBinaryOpArgs(ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *
|
||||||
lfs = metricsql.TrimFiltersByGroupModifier(lfs, be)
|
lfs = metricsql.TrimFiltersByGroupModifier(lfs, be)
|
||||||
exprSecond = metricsql.PushdownBinaryOpFilters(exprSecond, lfs)
|
exprSecond = metricsql.PushdownBinaryOpFilters(exprSecond, lfs)
|
||||||
}
|
}
|
||||||
tssSecond, err := evalExpr(ec, exprSecond)
|
tssSecond, err := evalExpr(qt, ec, exprSecond)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -503,10 +547,10 @@ func tryGetArgRollupFuncWithMetricExpr(ae *metricsql.AggrFuncExpr) (*metricsql.F
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalExprs(ec *EvalConfig, es []metricsql.Expr) ([][]*timeseries, error) {
|
func evalExprs(qt *querytracer.Tracer, ec *EvalConfig, es []metricsql.Expr) ([][]*timeseries, error) {
|
||||||
var rvs [][]*timeseries
|
var rvs [][]*timeseries
|
||||||
for _, e := range es {
|
for _, e := range es {
|
||||||
rv, err := evalExpr(ec, e)
|
rv, err := evalExpr(qt, ec, e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -515,7 +559,7 @@ func evalExprs(ec *EvalConfig, es []metricsql.Expr) ([][]*timeseries, error) {
|
||||||
return rvs, nil
|
return rvs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, *metricsql.RollupExpr, error) {
|
func evalRollupFuncArgs(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, *metricsql.RollupExpr, error) {
|
||||||
var re *metricsql.RollupExpr
|
var re *metricsql.RollupExpr
|
||||||
rollupArgIdx := metricsql.GetRollupArgIdx(fe)
|
rollupArgIdx := metricsql.GetRollupArgIdx(fe)
|
||||||
if len(fe.Args) <= rollupArgIdx {
|
if len(fe.Args) <= rollupArgIdx {
|
||||||
|
@ -528,7 +572,7 @@ func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{},
|
||||||
args[i] = re
|
args[i] = re
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ts, err := evalExpr(ec, arg)
|
ts, err := evalExpr(qt, ec, arg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %w", i+1, fe.AppendString(nil), err)
|
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %w", i+1, fe.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
|
@ -568,11 +612,12 @@ func getRollupExprArg(arg metricsql.Expr) *metricsql.RollupExpr {
|
||||||
// expr may contain:
|
// expr may contain:
|
||||||
// - rollupFunc(m) if iafc is nil
|
// - rollupFunc(m) if iafc is nil
|
||||||
// - aggrFunc(rollupFunc(m)) if iafc isn't nil
|
// - aggrFunc(rollupFunc(m)) if iafc isn't nil
|
||||||
func evalRollupFunc(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
|
func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr,
|
||||||
|
re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
|
||||||
if re.At == nil {
|
if re.At == nil {
|
||||||
return evalRollupFuncWithoutAt(ec, funcName, rf, expr, re, iafc)
|
return evalRollupFuncWithoutAt(qt, ec, funcName, rf, expr, re, iafc)
|
||||||
}
|
}
|
||||||
tssAt, err := evalExpr(ec, re.At)
|
tssAt, err := evalExpr(qt, ec, re.At)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot evaluate `@` modifier: %w", err)
|
return nil, fmt.Errorf("cannot evaluate `@` modifier: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -583,7 +628,7 @@ func evalRollupFunc(ec *EvalConfig, funcName string, rf rollupFunc, expr metrics
|
||||||
ecNew := copyEvalConfig(ec)
|
ecNew := copyEvalConfig(ec)
|
||||||
ecNew.Start = atTimestamp
|
ecNew.Start = atTimestamp
|
||||||
ecNew.End = atTimestamp
|
ecNew.End = atTimestamp
|
||||||
tss, err := evalRollupFuncWithoutAt(ecNew, funcName, rf, expr, re, iafc)
|
tss, err := evalRollupFuncWithoutAt(qt, ecNew, funcName, rf, expr, re, iafc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -601,7 +646,8 @@ func evalRollupFunc(ec *EvalConfig, funcName string, rf rollupFunc, expr metrics
|
||||||
return tss, nil
|
return tss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalRollupFuncWithoutAt(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
|
func evalRollupFuncWithoutAt(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
|
||||||
|
expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
|
||||||
funcName = strings.ToLower(funcName)
|
funcName = strings.ToLower(funcName)
|
||||||
ecNew := ec
|
ecNew := ec
|
||||||
var offset int64
|
var offset int64
|
||||||
|
@ -628,12 +674,12 @@ func evalRollupFuncWithoutAt(ec *EvalConfig, funcName string, rf rollupFunc, exp
|
||||||
var rvs []*timeseries
|
var rvs []*timeseries
|
||||||
var err error
|
var err error
|
||||||
if me, ok := re.Expr.(*metricsql.MetricExpr); ok {
|
if me, ok := re.Expr.(*metricsql.MetricExpr); ok {
|
||||||
rvs, err = evalRollupFuncWithMetricExpr(ecNew, funcName, rf, expr, me, iafc, re.Window)
|
rvs, err = evalRollupFuncWithMetricExpr(qt, ecNew, funcName, rf, expr, me, iafc, re.Window)
|
||||||
} else {
|
} else {
|
||||||
if iafc != nil {
|
if iafc != nil {
|
||||||
logger.Panicf("BUG: iafc must be nil for rollup %q over subquery %q", funcName, re.AppendString(nil))
|
logger.Panicf("BUG: iafc must be nil for rollup %q over subquery %q", funcName, re.AppendString(nil))
|
||||||
}
|
}
|
||||||
rvs, err = evalRollupFuncWithSubquery(ecNew, funcName, rf, expr, re)
|
rvs, err = evalRollupFuncWithSubquery(qt, ecNew, funcName, rf, expr, re)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -676,8 +722,10 @@ func aggregateAbsentOverTime(ec *EvalConfig, expr metricsql.Expr, tss []*timeser
|
||||||
return rvs
|
return rvs
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr) ([]*timeseries, error) {
|
func evalRollupFuncWithSubquery(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr) ([]*timeseries, error) {
|
||||||
// TODO: determine whether to use rollupResultCacheV here.
|
// TODO: determine whether to use rollupResultCacheV here.
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("subquery")
|
||||||
step := re.Step.Duration(ec.Step)
|
step := re.Step.Duration(ec.Step)
|
||||||
if step == 0 {
|
if step == 0 {
|
||||||
step = ec.Step
|
step = ec.Step
|
||||||
|
@ -693,7 +741,7 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc,
|
||||||
}
|
}
|
||||||
// unconditionally align start and end args to step for subquery as Prometheus does.
|
// unconditionally align start and end args to step for subquery as Prometheus does.
|
||||||
ecSQ.Start, ecSQ.End = alignStartEnd(ecSQ.Start, ecSQ.End, ecSQ.Step)
|
ecSQ.Start, ecSQ.End = alignStartEnd(ecSQ.Start, ecSQ.End, ecSQ.Step)
|
||||||
tssSQ, err := evalExpr(ecSQ, re.Expr)
|
tssSQ, err := evalExpr(qt, ecSQ, re.Expr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -727,6 +775,7 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc,
|
||||||
}
|
}
|
||||||
return values, timestamps
|
return values, timestamps
|
||||||
})
|
})
|
||||||
|
qt.Printf("rollup %s() over %d series returned by subquery: series=%d", funcName, len(tssSQ), len(tss))
|
||||||
return tss, nil
|
return tss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -802,15 +851,20 @@ var (
|
||||||
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
|
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
|
||||||
)
|
)
|
||||||
|
|
||||||
func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc,
|
func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
|
||||||
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
|
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
|
||||||
|
var rollupMemorySize int64
|
||||||
|
window := windowExpr.Duration(ec.Step)
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer func() {
|
||||||
|
qt.Donef("rollup %s(): timeRange=[%d..%d], step=%d, window=%d, neededMemoryBytes=%d", funcName, ec.Start, ec.End, ec.Step, window, rollupMemorySize)
|
||||||
|
}()
|
||||||
if me.IsEmpty() {
|
if me.IsEmpty() {
|
||||||
return evalNumber(ec, nan), nil
|
return evalNumber(ec, nan), nil
|
||||||
}
|
}
|
||||||
window := windowExpr.Duration(ec.Step)
|
|
||||||
|
|
||||||
// Search for partial results in cache.
|
// Search for partial results in cache.
|
||||||
tssCached, start := rollupResultCacheV.Get(ec, expr, window)
|
tssCached, start := rollupResultCacheV.Get(qt, ec, expr, window)
|
||||||
if start > ec.End {
|
if start > ec.End {
|
||||||
// The result is fully cached.
|
// The result is fully cached.
|
||||||
rollupResultCacheFullHits.Inc()
|
rollupResultCacheFullHits.Inc()
|
||||||
|
@ -840,7 +894,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
|
||||||
minTimestamp -= ec.Step
|
minTimestamp -= ec.Step
|
||||||
}
|
}
|
||||||
sq := storage.NewSearchQuery(minTimestamp, ec.End, tfss, ec.MaxSeries)
|
sq := storage.NewSearchQuery(minTimestamp, ec.End, tfss, ec.MaxSeries)
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, true, ec.Deadline)
|
rss, err := netstorage.ProcessSearchQuery(qt, sq, true, ec.Deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -874,7 +928,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rollupPoints := mulNoOverflow(pointsPerTimeseries, int64(timeseriesLen*len(rcs)))
|
rollupPoints := mulNoOverflow(pointsPerTimeseries, int64(timeseriesLen*len(rcs)))
|
||||||
rollupMemorySize := mulNoOverflow(rollupPoints, 16)
|
rollupMemorySize = mulNoOverflow(rollupPoints, 16)
|
||||||
rml := getRollupMemoryLimiter()
|
rml := getRollupMemoryLimiter()
|
||||||
if !rml.Get(uint64(rollupMemorySize)) {
|
if !rml.Get(uint64(rollupMemorySize)) {
|
||||||
rss.Cancel()
|
rss.Cancel()
|
||||||
|
@ -891,15 +945,15 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
|
||||||
keepMetricNames := getKeepMetricNames(expr)
|
keepMetricNames := getKeepMetricNames(expr)
|
||||||
var tss []*timeseries
|
var tss []*timeseries
|
||||||
if iafc != nil {
|
if iafc != nil {
|
||||||
tss, err = evalRollupWithIncrementalAggregate(funcName, keepMetricNames, iafc, rss, rcs, preFunc, sharedTimestamps)
|
tss, err = evalRollupWithIncrementalAggregate(qt, funcName, keepMetricNames, iafc, rss, rcs, preFunc, sharedTimestamps)
|
||||||
} else {
|
} else {
|
||||||
tss, err = evalRollupNoIncrementalAggregate(funcName, keepMetricNames, rss, rcs, preFunc, sharedTimestamps)
|
tss, err = evalRollupNoIncrementalAggregate(qt, funcName, keepMetricNames, rss, rcs, preFunc, sharedTimestamps)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tss = mergeTimeseries(tssCached, tss, start, ec)
|
tss = mergeTimeseries(tssCached, tss, start, ec)
|
||||||
rollupResultCacheV.Put(ec, expr, window, tss)
|
rollupResultCacheV.Put(qt, ec, expr, window, tss)
|
||||||
return tss, nil
|
return tss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -915,9 +969,12 @@ func getRollupMemoryLimiter() *memoryLimiter {
|
||||||
return &rollupMemoryLimiter
|
return &rollupMemoryLimiter
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalRollupWithIncrementalAggregate(funcName string, keepMetricNames bool, iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
|
func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool,
|
||||||
|
iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
|
||||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
|
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
|
||||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("rollup %s() with incremental aggregation %s() over %d series", funcName, iafc.ae.Name, rss.Len())
|
||||||
|
err := rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
|
||||||
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
|
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
|
||||||
preFunc(rs.Values, rs.Timestamps)
|
preFunc(rs.Values, rs.Timestamps)
|
||||||
ts := getTimeseries()
|
ts := getTimeseries()
|
||||||
|
@ -944,14 +1001,17 @@ func evalRollupWithIncrementalAggregate(funcName string, keepMetricNames bool, i
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tss := iafc.finalizeTimeseries()
|
tss := iafc.finalizeTimeseries()
|
||||||
|
qt.Printf("series after aggregation with %s(): %d", iafc.ae.Name, len(tss))
|
||||||
return tss, nil
|
return tss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalRollupNoIncrementalAggregate(funcName string, keepMetricNames bool, rss *netstorage.Results, rcs []*rollupConfig,
|
func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool, rss *netstorage.Results, rcs []*rollupConfig,
|
||||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
|
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("rollup %s() over %d series", funcName, rss.Len())
|
||||||
tss := make([]*timeseries, 0, rss.Len()*len(rcs))
|
tss := make([]*timeseries, 0, rss.Len()*len(rcs))
|
||||||
var tssLock sync.Mutex
|
var tssLock sync.Mutex
|
||||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
err := rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
|
||||||
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
|
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
|
||||||
preFunc(rs.Values, rs.Timestamps)
|
preFunc(rs.Values, rs.Timestamps)
|
||||||
for _, rc := range rcs {
|
for _, rc := range rcs {
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
"github.com/VictoriaMetrics/metricsql"
|
"github.com/VictoriaMetrics/metricsql"
|
||||||
|
@ -26,7 +27,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Exec executes q for the given ec.
|
// Exec executes q for the given ec.
|
||||||
func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result, error) {
|
func Exec(qt *querytracer.Tracer, ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result, error) {
|
||||||
if querystats.Enabled() {
|
if querystats.Enabled() {
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
defer querystats.RegisterQuery(q, ec.End-ec.Start, startTime)
|
defer querystats.RegisterQuery(q, ec.End-ec.Start, startTime)
|
||||||
|
@ -40,25 +41,29 @@ func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result,
|
||||||
}
|
}
|
||||||
|
|
||||||
qid := activeQueriesV.Add(ec, q)
|
qid := activeQueriesV.Add(ec, q)
|
||||||
rv, err := evalExpr(ec, e)
|
rv, err := evalExpr(qt, ec, e)
|
||||||
activeQueriesV.Remove(qid)
|
activeQueriesV.Remove(qid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFirstPointOnly {
|
if isFirstPointOnly {
|
||||||
// Remove all the points except the first one from every time series.
|
// Remove all the points except the first one from every time series.
|
||||||
for _, ts := range rv {
|
for _, ts := range rv {
|
||||||
ts.Values = ts.Values[:1]
|
ts.Values = ts.Values[:1]
|
||||||
ts.Timestamps = ts.Timestamps[:1]
|
ts.Timestamps = ts.Timestamps[:1]
|
||||||
}
|
}
|
||||||
|
qt.Printf("leave only the first point in every series")
|
||||||
}
|
}
|
||||||
|
|
||||||
maySort := maySortResults(e, rv)
|
maySort := maySortResults(e, rv)
|
||||||
result, err := timeseriesToResult(rv, maySort)
|
result, err := timeseriesToResult(rv, maySort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if maySort {
|
||||||
|
qt.Printf("sort series by metric name and labels")
|
||||||
|
} else {
|
||||||
|
qt.Printf("do not sort series by metric name and labels")
|
||||||
|
}
|
||||||
if n := ec.RoundDigits; n < 100 {
|
if n := ec.RoundDigits; n < 100 {
|
||||||
for i := range result {
|
for i := range result {
|
||||||
values := result[i].Values
|
values := result[i].Values
|
||||||
|
@ -66,6 +71,7 @@ func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result,
|
||||||
values[j] = decimal.RoundToDecimalDigits(v, n)
|
values[j] = decimal.RoundToDecimalDigits(v, n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
qt.Printf("round series values to %d decimal digits after the point", n)
|
||||||
}
|
}
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ func TestExecSuccess(t *testing.T) {
|
||||||
RoundDigits: 100,
|
RoundDigits: 100,
|
||||||
}
|
}
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
result, err := Exec(ec, q, false)
|
result, err := Exec(nil, ec, q, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(`unexpected error when executing %q: %s`, q, err)
|
t.Fatalf(`unexpected error when executing %q: %s`, q, err)
|
||||||
}
|
}
|
||||||
|
@ -7728,14 +7728,14 @@ func TestExecError(t *testing.T) {
|
||||||
RoundDigits: 100,
|
RoundDigits: 100,
|
||||||
}
|
}
|
||||||
for i := 0; i < 4; i++ {
|
for i := 0; i < 4; i++ {
|
||||||
rv, err := Exec(ec, q, false)
|
rv, err := Exec(nil, ec, q, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf(`expecting non-nil error on %q`, q)
|
t.Fatalf(`expecting non-nil error on %q`, q)
|
||||||
}
|
}
|
||||||
if rv != nil {
|
if rv != nil {
|
||||||
t.Fatalf(`expecting nil rv`)
|
t.Fatalf(`expecting nil rv`)
|
||||||
}
|
}
|
||||||
rv, err = Exec(ec, q, true)
|
rv, err = Exec(nil, ec, q, true)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf(`expecting non-nil error on %q`, q)
|
t.Fatalf(`expecting non-nil error on %q`, q)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
@ -11,8 +12,10 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
||||||
"github.com/VictoriaMetrics/fastcache"
|
"github.com/VictoriaMetrics/fastcache"
|
||||||
|
@ -110,8 +113,10 @@ func InitRollupResultCache(cachePath string) {
|
||||||
if len(rollupResultCachePath) > 0 {
|
if len(rollupResultCachePath) > 0 {
|
||||||
logger.Infof("loading rollupResult cache from %q...", rollupResultCachePath)
|
logger.Infof("loading rollupResult cache from %q...", rollupResultCachePath)
|
||||||
c = workingsetcache.Load(rollupResultCachePath, cacheSize)
|
c = workingsetcache.Load(rollupResultCachePath, cacheSize)
|
||||||
|
mustLoadRollupResultCacheKeyPrefix(rollupResultCachePath)
|
||||||
} else {
|
} else {
|
||||||
c = workingsetcache.New(cacheSize)
|
c = workingsetcache.New(cacheSize)
|
||||||
|
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||||
}
|
}
|
||||||
if *disableCache {
|
if *disableCache {
|
||||||
c.Reset()
|
c.Reset()
|
||||||
|
@ -169,9 +174,10 @@ func StopRollupResultCache() {
|
||||||
logger.Infof("saving rollupResult cache to %q...", rollupResultCachePath)
|
logger.Infof("saving rollupResult cache to %q...", rollupResultCachePath)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
if err := rollupResultCacheV.c.Save(rollupResultCachePath); err != nil {
|
if err := rollupResultCacheV.c.Save(rollupResultCachePath); err != nil {
|
||||||
logger.Errorf("cannot close rollupResult cache at %q: %s", rollupResultCachePath, err)
|
logger.Errorf("cannot save rollupResult cache at %q: %s", rollupResultCachePath, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
mustSaveRollupResultCacheKeyPrefix(rollupResultCachePath)
|
||||||
var fcs fastcache.Stats
|
var fcs fastcache.Stats
|
||||||
rollupResultCacheV.c.UpdateStats(&fcs)
|
rollupResultCacheV.c.UpdateStats(&fcs)
|
||||||
rollupResultCacheV.c.Stop()
|
rollupResultCacheV.c.Stop()
|
||||||
|
@ -193,8 +199,14 @@ func ResetRollupResultCache() {
|
||||||
logger.Infof("rollupResult cache has been cleared")
|
logger.Infof("rollupResult cache has been cleared")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
|
func (rrc *rollupResultCache) Get(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
if qt.Enabled() {
|
||||||
|
query := expr.AppendString(nil)
|
||||||
|
defer qt.Donef("rollup cache get: query=%s, timeRange=[%d..%d], step=%d, window=%d", query, ec.Start, ec.End, ec.Step, window)
|
||||||
|
}
|
||||||
if !ec.mayCache() {
|
if !ec.mayCache() {
|
||||||
|
qt.Printf("do not fetch series from cache, since it is disabled in the current context")
|
||||||
return nil, ec.Start
|
return nil, ec.Start
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,6 +217,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||||
if len(metainfoBuf) == 0 {
|
if len(metainfoBuf) == 0 {
|
||||||
|
qt.Printf("nothing found")
|
||||||
return nil, ec.Start
|
return nil, ec.Start
|
||||||
}
|
}
|
||||||
var mi rollupResultCacheMetainfo
|
var mi rollupResultCacheMetainfo
|
||||||
|
@ -213,6 +226,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
}
|
}
|
||||||
key := mi.GetBestKey(ec.Start, ec.End)
|
key := mi.GetBestKey(ec.Start, ec.End)
|
||||||
if key.prefix == 0 && key.suffix == 0 {
|
if key.prefix == 0 && key.suffix == 0 {
|
||||||
|
qt.Printf("nothing found on the timeRange")
|
||||||
return nil, ec.Start
|
return nil, ec.Start
|
||||||
}
|
}
|
||||||
bb.B = key.Marshal(bb.B[:0])
|
bb.B = key.Marshal(bb.B[:0])
|
||||||
|
@ -224,18 +238,22 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
||||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||||
rrc.c.Set(bb.B, metainfoBuf)
|
rrc.c.Set(bb.B, metainfoBuf)
|
||||||
|
qt.Printf("missing cache entry")
|
||||||
return nil, ec.Start
|
return nil, ec.Start
|
||||||
}
|
}
|
||||||
// Decompress into newly allocated byte slice, since tss returned from unmarshalTimeseriesFast
|
// Decompress into newly allocated byte slice, since tss returned from unmarshalTimeseriesFast
|
||||||
// refers to the byte slice, so it cannot be returned to the resultBufPool.
|
// refers to the byte slice, so it cannot be returned to the resultBufPool.
|
||||||
|
qt.Printf("load compressed entry from cache with size %d bytes", len(compressedResultBuf.B))
|
||||||
resultBuf, err := encoding.DecompressZSTD(nil, compressedResultBuf.B)
|
resultBuf, err := encoding.DecompressZSTD(nil, compressedResultBuf.B)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Panicf("BUG: cannot decompress resultBuf from rollupResultCache: %s; it looks like it was improperly saved", err)
|
logger.Panicf("BUG: cannot decompress resultBuf from rollupResultCache: %s; it looks like it was improperly saved", err)
|
||||||
}
|
}
|
||||||
|
qt.Printf("unpack the entry into %d bytes", len(resultBuf))
|
||||||
tss, err = unmarshalTimeseriesFast(resultBuf)
|
tss, err = unmarshalTimeseriesFast(resultBuf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Panicf("BUG: cannot unmarshal timeseries from rollupResultCache: %s; it looks like it was improperly saved", err)
|
logger.Panicf("BUG: cannot unmarshal timeseries from rollupResultCache: %s; it looks like it was improperly saved", err)
|
||||||
}
|
}
|
||||||
|
qt.Printf("unmarshal %d series", len(tss))
|
||||||
|
|
||||||
// Extract values for the matching timestamps
|
// Extract values for the matching timestamps
|
||||||
timestamps := tss[0].Timestamps
|
timestamps := tss[0].Timestamps
|
||||||
|
@ -245,10 +263,12 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
}
|
}
|
||||||
if i == len(timestamps) {
|
if i == len(timestamps) {
|
||||||
// no matches.
|
// no matches.
|
||||||
|
qt.Printf("no datapoints found in the cached series on the given timeRange")
|
||||||
return nil, ec.Start
|
return nil, ec.Start
|
||||||
}
|
}
|
||||||
if timestamps[i] != ec.Start {
|
if timestamps[i] != ec.Start {
|
||||||
// The cached range doesn't cover the requested range.
|
// The cached range doesn't cover the requested range.
|
||||||
|
qt.Printf("cached series don't cover the given timeRange")
|
||||||
return nil, ec.Start
|
return nil, ec.Start
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,13 +289,20 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
|
|
||||||
timestamps = tss[0].Timestamps
|
timestamps = tss[0].Timestamps
|
||||||
newStart = timestamps[len(timestamps)-1] + ec.Step
|
newStart = timestamps[len(timestamps)-1] + ec.Step
|
||||||
|
qt.Printf("return %d series on a timeRange=[%d..%d]", len(tss), ec.Start, newStart-ec.Step)
|
||||||
return tss, newStart
|
return tss, newStart
|
||||||
}
|
}
|
||||||
|
|
||||||
var resultBufPool bytesutil.ByteBufferPool
|
var resultBufPool bytesutil.ByteBufferPool
|
||||||
|
|
||||||
func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
|
func (rrc *rollupResultCache) Put(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
if qt.Enabled() {
|
||||||
|
query := expr.AppendString(nil)
|
||||||
|
defer qt.Donef("rollup cache put: query=%s, timeRange=[%d..%d], step=%d, window=%d, series=%d", query, ec.Start, ec.End, ec.Step, window, len(tss))
|
||||||
|
}
|
||||||
if len(tss) == 0 || !ec.mayCache() {
|
if len(tss) == 0 || !ec.mayCache() {
|
||||||
|
qt.Printf("do not store series to cache, since it is disabled in the current context")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,6 +317,7 @@ func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
i++
|
i++
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
// Nothing to store in the cache.
|
// Nothing to store in the cache.
|
||||||
|
qt.Printf("nothing to store in the cache, since all the points have timestamps bigger than %d", deadline)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if i < len(timestamps) {
|
if i < len(timestamps) {
|
||||||
|
@ -304,52 +332,96 @@ func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window in
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store tss in the cache.
|
// Store tss in the cache.
|
||||||
|
metainfoKey := bbPool.Get()
|
||||||
|
defer bbPool.Put(metainfoKey)
|
||||||
|
metainfoBuf := bbPool.Get()
|
||||||
|
defer bbPool.Put(metainfoBuf)
|
||||||
|
|
||||||
|
metainfoKey.B = marshalRollupResultCacheKey(metainfoKey.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||||
|
metainfoBuf.B = rrc.c.Get(metainfoBuf.B[:0], metainfoKey.B)
|
||||||
|
var mi rollupResultCacheMetainfo
|
||||||
|
if len(metainfoBuf.B) > 0 {
|
||||||
|
if err := mi.Unmarshal(metainfoBuf.B); err != nil {
|
||||||
|
logger.Panicf("BUG: cannot unmarshal rollupResultCacheMetainfo: %s; it looks like it was improperly saved", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
start := timestamps[0]
|
||||||
|
end := timestamps[len(timestamps)-1]
|
||||||
|
if mi.CoversTimeRange(start, end) {
|
||||||
|
qt.Printf("series on the given timeRange=[%d..%d] already exist in the cache", start, end)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
maxMarshaledSize := getRollupResultCacheSize() / 4
|
maxMarshaledSize := getRollupResultCacheSize() / 4
|
||||||
resultBuf := resultBufPool.Get()
|
resultBuf := resultBufPool.Get()
|
||||||
defer resultBufPool.Put(resultBuf)
|
defer resultBufPool.Put(resultBuf)
|
||||||
resultBuf.B = marshalTimeseriesFast(resultBuf.B[:0], tss, maxMarshaledSize, ec.Step)
|
resultBuf.B = marshalTimeseriesFast(resultBuf.B[:0], tss, maxMarshaledSize, ec.Step)
|
||||||
if len(resultBuf.B) == 0 {
|
if len(resultBuf.B) == 0 {
|
||||||
tooBigRollupResults.Inc()
|
tooBigRollupResults.Inc()
|
||||||
|
qt.Printf("cannot store series in the cache, since they would occupy more than %d bytes", maxMarshaledSize)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
qt.Printf("marshal %d series on a timeRange=[%d..%d] into %d bytes", len(tss), start, end, len(resultBuf.B))
|
||||||
compressedResultBuf := resultBufPool.Get()
|
compressedResultBuf := resultBufPool.Get()
|
||||||
defer resultBufPool.Put(compressedResultBuf)
|
defer resultBufPool.Put(compressedResultBuf)
|
||||||
compressedResultBuf.B = encoding.CompressZSTDLevel(compressedResultBuf.B[:0], resultBuf.B, 1)
|
compressedResultBuf.B = encoding.CompressZSTDLevel(compressedResultBuf.B[:0], resultBuf.B, 1)
|
||||||
|
qt.Printf("compress %d bytes into %d bytes", len(resultBuf.B), len(compressedResultBuf.B))
|
||||||
bb := bbPool.Get()
|
|
||||||
defer bbPool.Put(bb)
|
|
||||||
|
|
||||||
var key rollupResultCacheKey
|
var key rollupResultCacheKey
|
||||||
key.prefix = rollupResultCacheKeyPrefix
|
key.prefix = rollupResultCacheKeyPrefix
|
||||||
key.suffix = atomic.AddUint64(&rollupResultCacheKeySuffix, 1)
|
key.suffix = atomic.AddUint64(&rollupResultCacheKeySuffix, 1)
|
||||||
bb.B = key.Marshal(bb.B[:0])
|
rollupResultKey := key.Marshal(nil)
|
||||||
rrc.c.SetBig(bb.B, compressedResultBuf.B)
|
rrc.c.SetBig(rollupResultKey, compressedResultBuf.B)
|
||||||
|
qt.Printf("store %d bytes in the cache", len(compressedResultBuf.B))
|
||||||
|
|
||||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
|
||||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
|
||||||
var mi rollupResultCacheMetainfo
|
|
||||||
if len(metainfoBuf) > 0 {
|
|
||||||
if err := mi.Unmarshal(metainfoBuf); err != nil {
|
|
||||||
logger.Panicf("BUG: cannot unmarshal rollupResultCacheMetainfo: %s; it looks like it was improperly saved", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mi.AddKey(key, timestamps[0], timestamps[len(timestamps)-1])
|
mi.AddKey(key, timestamps[0], timestamps[len(timestamps)-1])
|
||||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
metainfoBuf.B = mi.Marshal(metainfoBuf.B[:0])
|
||||||
rrc.c.Set(bb.B, metainfoBuf)
|
rrc.c.Set(metainfoKey.B, metainfoBuf.B)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
rollupResultCacheKeyPrefix = func() uint64 {
|
rollupResultCacheKeyPrefix uint64
|
||||||
var buf [8]byte
|
|
||||||
if _, err := rand.Read(buf[:]); err != nil {
|
|
||||||
// do not use logger.Panicf, since it isn't initialized yet.
|
|
||||||
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %w", err))
|
|
||||||
}
|
|
||||||
return encoding.UnmarshalUint64(buf[:])
|
|
||||||
}()
|
|
||||||
rollupResultCacheKeySuffix = uint64(time.Now().UnixNano())
|
rollupResultCacheKeySuffix = uint64(time.Now().UnixNano())
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func newRollupResultCacheKeyPrefix() uint64 {
|
||||||
|
var buf [8]byte
|
||||||
|
if _, err := rand.Read(buf[:]); err != nil {
|
||||||
|
// do not use logger.Panicf, since it isn't initialized yet.
|
||||||
|
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %w", err))
|
||||||
|
}
|
||||||
|
return encoding.UnmarshalUint64(buf[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustLoadRollupResultCacheKeyPrefix(path string) {
|
||||||
|
path = path + ".key.prefix"
|
||||||
|
if !fs.IsPathExist(path) {
|
||||||
|
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
data, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("cannot load %s: %s; reset rollupResult cache", path, err)
|
||||||
|
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(data) != 8 {
|
||||||
|
logger.Errorf("unexpected size of %s; want 8 bytes; got %d bytes; reset rollupResult cache", path, len(data))
|
||||||
|
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rollupResultCacheKeyPrefix = encoding.UnmarshalUint64(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustSaveRollupResultCacheKeyPrefix(path string) {
|
||||||
|
path = path + ".key.prefix"
|
||||||
|
data := encoding.MarshalUint64(nil, rollupResultCacheKeyPrefix)
|
||||||
|
fs.MustRemoveAll(path)
|
||||||
|
if err := fs.WriteFileAtomically(path, data); err != nil {
|
||||||
|
logger.Fatalf("cannot store rollupResult cache key prefix to %q: %s", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
|
var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
|
||||||
|
|
||||||
// Increment this value every time the format of the cache changes.
|
// Increment this value every time the format of the cache changes.
|
||||||
|
@ -490,20 +562,36 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mi *rollupResultCacheMetainfo) CoversTimeRange(start, end int64) bool {
|
||||||
|
if start > end {
|
||||||
|
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", start, end)
|
||||||
|
}
|
||||||
|
for i := range mi.entries {
|
||||||
|
e := &mi.entries[i]
|
||||||
|
if start >= e.start && end <= e.end {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (mi *rollupResultCacheMetainfo) GetBestKey(start, end int64) rollupResultCacheKey {
|
func (mi *rollupResultCacheMetainfo) GetBestKey(start, end int64) rollupResultCacheKey {
|
||||||
if start > end {
|
if start > end {
|
||||||
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", start, end)
|
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", start, end)
|
||||||
}
|
}
|
||||||
var bestKey rollupResultCacheKey
|
var bestKey rollupResultCacheKey
|
||||||
bestD := int64(1<<63 - 1)
|
dMax := int64(0)
|
||||||
for i := range mi.entries {
|
for i := range mi.entries {
|
||||||
e := &mi.entries[i]
|
e := &mi.entries[i]
|
||||||
if start < e.start || end <= e.start {
|
if start < e.start {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
d := start - e.start
|
d := e.end - start
|
||||||
if d < bestD {
|
if end <= e.end {
|
||||||
bestD = d
|
d = end - start
|
||||||
|
}
|
||||||
|
if d >= dMax {
|
||||||
|
dMax = d
|
||||||
bestKey = e.key
|
bestKey = e.key
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ func TestRollupResultCacheInitStop(t *testing.T) {
|
||||||
StopRollupResultCache()
|
StopRollupResultCache()
|
||||||
}
|
}
|
||||||
fs.MustRemoveAll(cacheFilePath)
|
fs.MustRemoveAll(cacheFilePath)
|
||||||
|
fs.MustRemoveAll(cacheFilePath + ".key.prefix")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +56,7 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
|
|
||||||
// Try obtaining an empty value.
|
// Try obtaining an empty value.
|
||||||
t.Run("empty", func(t *testing.T) {
|
t.Run("empty", func(t *testing.T) {
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != ec.Start {
|
if newStart != ec.Start {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, ec.Start)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, ec.Start)
|
||||||
}
|
}
|
||||||
|
@ -73,8 +74,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{0, 1, 2},
|
Values: []float64{0, 1, 2},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 1400 {
|
if newStart != 1400 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
|
||||||
}
|
}
|
||||||
|
@ -94,8 +95,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{0, 1, 2},
|
Values: []float64{0, 1, 2},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, ae, window, tss)
|
rollupResultCacheV.Put(nil, ec, ae, window, tss)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, ae, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, ae, window)
|
||||||
if newStart != 1400 {
|
if newStart != 1400 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
|
||||||
}
|
}
|
||||||
|
@ -117,8 +118,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{333, 0, 1, 2},
|
Values: []float64{333, 0, 1, 2},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 1000 {
|
if newStart != 1000 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
||||||
}
|
}
|
||||||
|
@ -136,8 +137,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{0, 1, 2},
|
Values: []float64{0, 1, 2},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 1000 {
|
if newStart != 1000 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
||||||
}
|
}
|
||||||
|
@ -155,8 +156,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{0, 1, 2},
|
Values: []float64{0, 1, 2},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 1000 {
|
if newStart != 1000 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
||||||
}
|
}
|
||||||
|
@ -174,8 +175,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{0, 1, 2},
|
Values: []float64{0, 1, 2},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 1000 {
|
if newStart != 1000 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
||||||
}
|
}
|
||||||
|
@ -193,8 +194,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{0, 1, 2, 3, 4, 5, 6, 7},
|
Values: []float64{0, 1, 2, 3, 4, 5, 6, 7},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 2200 {
|
if newStart != 2200 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
||||||
}
|
}
|
||||||
|
@ -216,8 +217,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{1, 2, 3, 4, 5, 6},
|
Values: []float64{1, 2, 3, 4, 5, 6},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 2200 {
|
if newStart != 2200 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
||||||
}
|
}
|
||||||
|
@ -241,8 +242,8 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
}
|
}
|
||||||
tss = append(tss, ts)
|
tss = append(tss, ts)
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss)
|
||||||
tssResult, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tssResult, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 2200 {
|
if newStart != 2200 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
||||||
}
|
}
|
||||||
|
@ -270,10 +271,10 @@ func TestRollupResultCache(t *testing.T) {
|
||||||
Values: []float64{0, 1, 2},
|
Values: []float64{0, 1, 2},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss1)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss1)
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss2)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss2)
|
||||||
rollupResultCacheV.Put(ec, fe, window, tss3)
|
rollupResultCacheV.Put(nil, ec, fe, window, tss3)
|
||||||
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
|
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
|
||||||
if newStart != 1400 {
|
if newStart != 1400 {
|
||||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
|
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
|
@ -170,9 +171,9 @@ func DeleteMetrics(tfss []*storage.TagFilters) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchMetricNames returns metric names for the given tfss on the given tr.
|
// SearchMetricNames returns metric names for the given tfss on the given tr.
|
||||||
func SearchMetricNames(tfss []*storage.TagFilters, tr storage.TimeRange, maxMetrics int, deadline uint64) ([]storage.MetricName, error) {
|
func SearchMetricNames(qt *querytracer.Tracer, tfss []*storage.TagFilters, tr storage.TimeRange, maxMetrics int, deadline uint64) ([]storage.MetricName, error) {
|
||||||
WG.Add(1)
|
WG.Add(1)
|
||||||
mns, err := Storage.SearchMetricNames(tfss, tr, maxMetrics, deadline)
|
mns, err := Storage.SearchMetricNames(qt, tfss, tr, maxMetrics, deadline)
|
||||||
WG.Done()
|
WG.Done()
|
||||||
return mns, err
|
return mns, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
* FEATURE: support query tracing, which allows determining bottlenecks during query processing. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1403).
|
||||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): remove dependency on Internet access in `http://vmagent:8429/targets` page. Previously the page layout was broken without Internet access. See [shis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2594).
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): remove dependency on Internet access in `http://vmagent:8429/targets` page. Previously the page layout was broken without Internet access. See [shis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2594).
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): remove dependency on Internet access in [web API pages](https://docs.victoriametrics.com/vmalert.html#web). Previously the functionality and the layout of these pages was broken without Internet access. See [shis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2594).
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): remove dependency on Internet access in [web API pages](https://docs.victoriametrics.com/vmalert.html#web). Previously the functionality and the layout of these pages was broken without Internet access. See [shis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2594).
|
||||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): expose `/api/v1/status/config` endpoint in the same way as Prometheus does. See [these docs](https://prometheus.io/docs/prometheus/latest/querying/api/#config).
|
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): expose `/api/v1/status/config` endpoint in the same way as Prometheus does. See [these docs](https://prometheus.io/docs/prometheus/latest/querying/api/#config).
|
||||||
|
|
|
@ -1365,6 +1365,69 @@ VictoriaMetrics returns TSDB stats at `/api/v1/status/tsdb` page in the way simi
|
||||||
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
|
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
|
||||||
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.
|
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.
|
||||||
|
|
||||||
|
## Query tracing
|
||||||
|
|
||||||
|
VictoriaMetrics supports query tracing, which can be used for determining bottlenecks during query processing.
|
||||||
|
|
||||||
|
Query tracing can be enabled for a specific query by passing `trace=1` query arg.
|
||||||
|
In this case VictoriaMetrics puts query trace into `trace` field in the output JSON.
|
||||||
|
|
||||||
|
For example, the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://localhost:8428/api/v1/query_range -d 'query=2*rand()' -d 'start=-1h' -d 'step=1m' -d 'trace=1' | jq -r '.trace'
|
||||||
|
```
|
||||||
|
|
||||||
|
would return the following trace:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"duration_msec": 0.099,
|
||||||
|
"message": "/api/v1/query_range: start=1654034340000, end=1654037880000, step=60000, query=\"2*rand()\": series=1",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.034,
|
||||||
|
"message": "eval: query=2 * rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.032,
|
||||||
|
"message": "binary op \"*\": series=1",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.009,
|
||||||
|
"message": "eval: query=2, timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.017,
|
||||||
|
"message": "eval: query=rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.015,
|
||||||
|
"message": "transform rand(): series=1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.004,
|
||||||
|
"message": "sort series by metric name and labels"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.044,
|
||||||
|
"message": "generate /api/v1/query_range response for series=1, points=60"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All the durations and timestamps in traces are in milliseconds.
|
||||||
|
|
||||||
|
Query tracing is allowed by default. It can be denied by passing `-denyQueryTracing` command-line flag to VictoriaMetrics.
|
||||||
|
|
||||||
|
|
||||||
## Cardinality limiter
|
## Cardinality limiter
|
||||||
|
|
||||||
By default VictoriaMetrics doesn't limit the number of stored time series. The limit can be enforced by setting the following command-line flags:
|
By default VictoriaMetrics doesn't limit the number of stored time series. The limit can be enforced by setting the following command-line flags:
|
||||||
|
|
|
@ -1369,6 +1369,69 @@ VictoriaMetrics returns TSDB stats at `/api/v1/status/tsdb` page in the way simi
|
||||||
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
|
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
|
||||||
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.
|
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.
|
||||||
|
|
||||||
|
## Query tracing
|
||||||
|
|
||||||
|
VictoriaMetrics supports query tracing, which can be used for determining bottlenecks during query processing.
|
||||||
|
|
||||||
|
Query tracing can be enabled for a specific query by passing `trace=1` query arg.
|
||||||
|
In this case VictoriaMetrics puts query trace into `trace` field in the output JSON.
|
||||||
|
|
||||||
|
For example, the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://localhost:8428/api/v1/query_range -d 'query=2*rand()' -d 'start=-1h' -d 'step=1m' -d 'trace=1' | jq -r '.trace'
|
||||||
|
```
|
||||||
|
|
||||||
|
would return the following trace:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"duration_msec": 0.099,
|
||||||
|
"message": "/api/v1/query_range: start=1654034340000, end=1654037880000, step=60000, query=\"2*rand()\": series=1",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.034,
|
||||||
|
"message": "eval: query=2 * rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.032,
|
||||||
|
"message": "binary op \"*\": series=1",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.009,
|
||||||
|
"message": "eval: query=2, timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.017,
|
||||||
|
"message": "eval: query=rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"duration_msec": 0.015,
|
||||||
|
"message": "transform rand(): series=1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.004,
|
||||||
|
"message": "sort series by metric name and labels"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"duration_msec": 0.044,
|
||||||
|
"message": "generate /api/v1/query_range response for series=1, points=60"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All the durations and timestamps in traces are in milliseconds.
|
||||||
|
|
||||||
|
Query tracing is allowed by default. It can be denied by passing `-denyQueryTracing` command-line flag to VictoriaMetrics.
|
||||||
|
|
||||||
|
|
||||||
## Cardinality limiter
|
## Cardinality limiter
|
||||||
|
|
||||||
By default VictoriaMetrics doesn't limit the number of stored time series. The limit can be enforced by setting the following command-line flags:
|
By default VictoriaMetrics doesn't limit the number of stored time series. The limit can be enforced by setting the following command-line flags:
|
||||||
|
|
245
lib/querytracer/tracer.go
Normal file
245
lib/querytracer/tracer.go
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
package querytracer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var denyQueryTracing = flag.Bool("denyQueryTracing", false, "Whether to disable the ability to trace queries. See https://docs.victoriametrics.com/#query-tracing")
|
||||||
|
|
||||||
|
// Tracer represents query tracer.
|
||||||
|
//
|
||||||
|
// It must be created via New call.
|
||||||
|
// Each created tracer must be finalized via Donef call.
|
||||||
|
//
|
||||||
|
// Tracer may contain sub-tracers (branches) in order to build tree-like execution order.
|
||||||
|
// Call Tracer.NewChild func for adding sub-tracer.
|
||||||
|
type Tracer struct {
|
||||||
|
// startTime is the time when Tracer was created
|
||||||
|
startTime time.Time
|
||||||
|
// doneTime is the time when Donef was called
|
||||||
|
doneTime time.Time
|
||||||
|
// message is the message generated by Printf or Donef call.
|
||||||
|
message string
|
||||||
|
// children is a list of children Tracer objects
|
||||||
|
children []*Tracer
|
||||||
|
// span contains span for the given Tracer. It is added via Tracer.AddSpan().
|
||||||
|
// If span is non-nil, then the remaining fields aren't used.
|
||||||
|
span *span
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new instance of the tracer.
|
||||||
|
//
|
||||||
|
// If enabled isn't set, then all function calls to the returned object will be no-op.
|
||||||
|
//
|
||||||
|
// Donef must be called when the tracer should be finished.
|
||||||
|
func New(enabled bool) *Tracer {
|
||||||
|
if *denyQueryTracing || !enabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &Tracer{
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled returns true if the t is enabled.
|
||||||
|
func (t *Tracer) Enabled() bool {
|
||||||
|
return t != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChild adds a new child Tracer to t.
|
||||||
|
//
|
||||||
|
// NewChild cannot be called from concurrent goroutines.
|
||||||
|
// Create children tracers from a single goroutine and then pass them
|
||||||
|
// to concurrent goroutines.
|
||||||
|
func (t *Tracer) NewChild() *Tracer {
|
||||||
|
if t == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if t.message != "" {
|
||||||
|
panic(fmt.Errorf("BUG: NewChild() cannot be called after Donef(%q) call", t.message))
|
||||||
|
}
|
||||||
|
child := &Tracer{
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
t.children = append(t.children, child)
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
// Donef finishes t.
|
||||||
|
//
|
||||||
|
// Donef cannot be called multiple times.
|
||||||
|
// Other Tracer functions cannot be called after Donef call.
|
||||||
|
func (t *Tracer) Donef(format string, args ...interface{}) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if t.message != "" {
|
||||||
|
panic(fmt.Errorf("BUG: Donef() already called with message %q", t.message))
|
||||||
|
}
|
||||||
|
t.message = fmt.Sprintf(format, args...)
|
||||||
|
t.doneTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf adds new message to t.
|
||||||
|
//
|
||||||
|
// Printf cannot be called from concurrent goroutines.
|
||||||
|
func (t *Tracer) Printf(format string, args ...interface{}) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if t.message != "" {
|
||||||
|
panic(fmt.Errorf("BUG: Printf() cannot be called after Done(%q) call", t.message))
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
child := &Tracer{
|
||||||
|
startTime: now,
|
||||||
|
doneTime: now,
|
||||||
|
message: fmt.Sprintf(format, args...),
|
||||||
|
}
|
||||||
|
t.children = append(t.children, child)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddJSON adds a sub-trace to t.
|
||||||
|
//
|
||||||
|
// The jsonTrace must be encoded with ToJSON.
|
||||||
|
//
|
||||||
|
// AddJSON cannot be called from concurrent goroutines.
|
||||||
|
func (t *Tracer) AddJSON(jsonTrace []byte) error {
|
||||||
|
if t == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(jsonTrace) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var s *span
|
||||||
|
if err := json.Unmarshal(jsonTrace, &s); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal json trace: %s", err)
|
||||||
|
}
|
||||||
|
child := &Tracer{
|
||||||
|
span: s,
|
||||||
|
}
|
||||||
|
t.children = append(t.children, child)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns string representation of t.
|
||||||
|
//
|
||||||
|
// String must be called when t methods aren't called by other goroutines.
|
||||||
|
func (t *Tracer) String() string {
|
||||||
|
if t == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
s := t.toSpan()
|
||||||
|
var bb bytes.Buffer
|
||||||
|
s.writePlaintextWithIndent(&bb, 0)
|
||||||
|
return bb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToJSON returns JSON representation of t.
|
||||||
|
//
|
||||||
|
// ToJSON must be called when t methods aren't called by other goroutines.
|
||||||
|
func (t *Tracer) ToJSON() string {
|
||||||
|
if t == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
s := t.toSpan()
|
||||||
|
data, err := json.Marshal(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("BUG: unexpected error from json.Marshal: %w", err))
|
||||||
|
}
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tracer) toSpan() *span {
|
||||||
|
s, _ := t.toSpanInternal(time.Now())
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tracer) toSpanInternal(prevTime time.Time) (*span, time.Time) {
|
||||||
|
if t.span != nil {
|
||||||
|
return t.span, prevTime
|
||||||
|
}
|
||||||
|
if t.doneTime == t.startTime {
|
||||||
|
// a single-line trace
|
||||||
|
d := t.startTime.Sub(prevTime)
|
||||||
|
s := &span{
|
||||||
|
DurationMsec: float64(d.Microseconds()) / 1000,
|
||||||
|
Message: t.message,
|
||||||
|
}
|
||||||
|
return s, t.doneTime
|
||||||
|
}
|
||||||
|
// tracer with children
|
||||||
|
msg := t.message
|
||||||
|
doneTime := t.doneTime
|
||||||
|
if msg == "" {
|
||||||
|
msg = "missing Tracer.Donef() call"
|
||||||
|
doneTime = t.getLastChildDoneTime(t.startTime)
|
||||||
|
}
|
||||||
|
d := doneTime.Sub(t.startTime)
|
||||||
|
var children []*span
|
||||||
|
var sChild *span
|
||||||
|
prevChildTime := t.startTime
|
||||||
|
for _, child := range t.children {
|
||||||
|
sChild, prevChildTime = child.toSpanInternal(prevChildTime)
|
||||||
|
children = append(children, sChild)
|
||||||
|
}
|
||||||
|
s := &span{
|
||||||
|
DurationMsec: float64(d.Microseconds()) / 1000,
|
||||||
|
Message: msg,
|
||||||
|
Children: children,
|
||||||
|
}
|
||||||
|
return s, doneTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tracer) getLastChildDoneTime(defaultTime time.Time) time.Time {
|
||||||
|
if len(t.children) == 0 {
|
||||||
|
return defaultTime
|
||||||
|
}
|
||||||
|
lastChild := t.children[len(t.children)-1]
|
||||||
|
return lastChild.getLastChildDoneTime(lastChild.startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// span represents a single trace span
|
||||||
|
type span struct {
|
||||||
|
// DurationMsec is the duration for the current trace span in microseconds.
|
||||||
|
DurationMsec float64 `json:"duration_msec"`
|
||||||
|
// Message is a trace message
|
||||||
|
Message string `json:"message"`
|
||||||
|
// Children contains children spans
|
||||||
|
Children []*span `json:"children,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) writePlaintextWithIndent(w io.Writer, indent int) {
|
||||||
|
prefix := ""
|
||||||
|
for i := 0; i < indent; i++ {
|
||||||
|
prefix += "| "
|
||||||
|
}
|
||||||
|
prefix += "- "
|
||||||
|
msg := s.messageWithPrefix(prefix)
|
||||||
|
fmt.Fprintf(w, "%s%.03fms: %s\n", prefix, s.DurationMsec, msg)
|
||||||
|
childIndent := indent + 1
|
||||||
|
for _, sChild := range s.Children {
|
||||||
|
sChild.writePlaintextWithIndent(w, childIndent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) messageWithPrefix(prefix string) string {
|
||||||
|
prefix = strings.Replace(prefix, "-", "|", 1)
|
||||||
|
lines := strings.Split(s.Message, "\n")
|
||||||
|
result := lines[:1]
|
||||||
|
for i := range lines[1:] {
|
||||||
|
ln := lines[i+1]
|
||||||
|
if ln == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ln = prefix + ln
|
||||||
|
result = append(result, ln)
|
||||||
|
}
|
||||||
|
return strings.Join(result, "\n")
|
||||||
|
}
|
209
lib/querytracer/tracer_test.go
Normal file
209
lib/querytracer/tracer_test.go
Normal file
|
@ -0,0 +1,209 @@
|
||||||
|
package querytracer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTracerDisabled(t *testing.T) {
|
||||||
|
qt := New(false)
|
||||||
|
if qt.Enabled() {
|
||||||
|
t.Fatalf("query tracer must be disabled")
|
||||||
|
}
|
||||||
|
qtChild := qt.NewChild()
|
||||||
|
if qtChild.Enabled() {
|
||||||
|
t.Fatalf("query tracer must be disabled")
|
||||||
|
}
|
||||||
|
qtChild.Printf("foo %d", 123)
|
||||||
|
qtChild.Donef("child done %d", 456)
|
||||||
|
qt.Printf("parent %d", 789)
|
||||||
|
if err := qt.AddJSON([]byte("foobar")); err != nil {
|
||||||
|
t.Fatalf("unexpected error in AddJSON: %s", err)
|
||||||
|
}
|
||||||
|
qt.Donef("test")
|
||||||
|
s := qt.String()
|
||||||
|
if s != "" {
|
||||||
|
t.Fatalf("unexpected trace; got %s; want empty", s)
|
||||||
|
}
|
||||||
|
s = qt.ToJSON()
|
||||||
|
if s != "" {
|
||||||
|
t.Fatalf("unexpected json trace; got %s; want empty", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTracerEnabled(t *testing.T) {
|
||||||
|
qt := New(true)
|
||||||
|
if !qt.Enabled() {
|
||||||
|
t.Fatalf("query tracer must be enabled")
|
||||||
|
}
|
||||||
|
qtChild := qt.NewChild()
|
||||||
|
if !qtChild.Enabled() {
|
||||||
|
t.Fatalf("child query tracer must be enabled")
|
||||||
|
}
|
||||||
|
qtChild.Printf("foo %d", 123)
|
||||||
|
qtChild.Donef("child done %d", 456)
|
||||||
|
qt.Printf("parent %d", 789)
|
||||||
|
qt.Donef("test")
|
||||||
|
s := qt.String()
|
||||||
|
sExpected := `- 0ms: test
|
||||||
|
| - 0ms: child done 456
|
||||||
|
| | - 0ms: foo 123
|
||||||
|
| - 0ms: parent 789
|
||||||
|
`
|
||||||
|
if !areEqualTracesSkipDuration(s, sExpected) {
|
||||||
|
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTracerMultiline(t *testing.T) {
|
||||||
|
qt := New(true)
|
||||||
|
qt.Printf("line3\nline4\n")
|
||||||
|
qt.Donef("line1\nline2")
|
||||||
|
s := qt.String()
|
||||||
|
sExpected := `- 0ms: line1
|
||||||
|
| line2
|
||||||
|
| - 0ms: line3
|
||||||
|
| | line4
|
||||||
|
`
|
||||||
|
if !areEqualTracesSkipDuration(s, sExpected) {
|
||||||
|
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTracerToJSON(t *testing.T) {
|
||||||
|
qt := New(true)
|
||||||
|
if !qt.Enabled() {
|
||||||
|
t.Fatalf("query tracer must be enabled")
|
||||||
|
}
|
||||||
|
qtChild := qt.NewChild()
|
||||||
|
if !qtChild.Enabled() {
|
||||||
|
t.Fatalf("child query tracer must be enabled")
|
||||||
|
}
|
||||||
|
qtChild.Printf("foo %d", 123)
|
||||||
|
qtChild.Donef("child done %d", 456)
|
||||||
|
qt.Printf("parent %d", 789)
|
||||||
|
qt.Donef("test")
|
||||||
|
s := qt.ToJSON()
|
||||||
|
sExpected := `{"duration_msec":0,"message":"test","children":[` +
|
||||||
|
`{"duration_msec":0,"message":"child done 456","children":[` +
|
||||||
|
`{"duration_msec":0,"message":"foo 123"}]},` +
|
||||||
|
`{"duration_msec":0,"message":"parent 789"}]}`
|
||||||
|
if !areEqualJSONTracesSkipDuration(s, sExpected) {
|
||||||
|
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTraceAddJSON(t *testing.T) {
|
||||||
|
qtChild := New(true)
|
||||||
|
qtChild.Printf("foo")
|
||||||
|
qtChild.Donef("child")
|
||||||
|
jsonTrace := qtChild.ToJSON()
|
||||||
|
qt := New(true)
|
||||||
|
qt.Printf("first_line")
|
||||||
|
if err := qt.AddJSON([]byte(jsonTrace)); err != nil {
|
||||||
|
t.Fatalf("unexpected error in AddJSON: %s", err)
|
||||||
|
}
|
||||||
|
qt.Printf("last_line")
|
||||||
|
if err := qt.AddJSON(nil); err != nil {
|
||||||
|
t.Fatalf("unexpected error in AddJSON(nil): %s", err)
|
||||||
|
}
|
||||||
|
qt.Donef("parent")
|
||||||
|
s := qt.String()
|
||||||
|
sExpected := `- 0ms: parent
|
||||||
|
| - 0ms: first_line
|
||||||
|
| - 0ms: child
|
||||||
|
| | - 0ms: foo
|
||||||
|
| - 0ms: last_line
|
||||||
|
`
|
||||||
|
if !areEqualTracesSkipDuration(s, sExpected) {
|
||||||
|
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonS := qt.ToJSON()
|
||||||
|
jsonSExpected := `{"duration_msec":0,"message":"parent","children":[` +
|
||||||
|
`{"duration_msec":0,"message":"first_line"},` +
|
||||||
|
`{"duration_msec":0,"message":"child","children":[` +
|
||||||
|
`{"duration_msec":0,"message":"foo"}]},` +
|
||||||
|
`{"duration_msec":0,"message":"last_line"}]}`
|
||||||
|
if !areEqualJSONTracesSkipDuration(jsonS, jsonSExpected) {
|
||||||
|
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", jsonS, jsonSExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTraceMissingDonef(t *testing.T) {
|
||||||
|
qt := New(true)
|
||||||
|
qt.Printf("parent printf")
|
||||||
|
qtChild := qt.NewChild()
|
||||||
|
qtChild.Printf("child printf")
|
||||||
|
qt.Printf("another parent printf")
|
||||||
|
s := qt.String()
|
||||||
|
sExpected := `- 0ms: missing Tracer.Donef() call
|
||||||
|
| - 0ms: parent printf
|
||||||
|
| - 0ms: missing Tracer.Donef() call
|
||||||
|
| | - 0ms: child printf
|
||||||
|
| - 0ms: another parent printf
|
||||||
|
`
|
||||||
|
if !areEqualTracesSkipDuration(s, sExpected) {
|
||||||
|
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestZeroDurationInTrace(t *testing.T) {
|
||||||
|
s := `- 123ms: missing Tracer.Donef() call
|
||||||
|
| - 0ms: parent printf
|
||||||
|
| - 54ms: missing Tracer.Donef() call
|
||||||
|
| | - 45ms: child printf
|
||||||
|
| - 0ms: another parent printf
|
||||||
|
`
|
||||||
|
result := zeroDurationsInTrace(s)
|
||||||
|
resultExpected := `- 0ms: missing Tracer.Donef() call
|
||||||
|
| - 0ms: parent printf
|
||||||
|
| - 0ms: missing Tracer.Donef() call
|
||||||
|
| | - 0ms: child printf
|
||||||
|
| - 0ms: another parent printf
|
||||||
|
`
|
||||||
|
if result != resultExpected {
|
||||||
|
t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestZeroJSONDurationInTrace(t *testing.T) {
|
||||||
|
s := `{"duration_msec":123,"message":"parent","children":[` +
|
||||||
|
`{"duration_msec":0,"message":"first_line"},` +
|
||||||
|
`{"duration_msec":434,"message":"child","children":[` +
|
||||||
|
`{"duration_msec":343,"message":"foo"}]},` +
|
||||||
|
`{"duration_msec":0,"message":"last_line"}]}`
|
||||||
|
result := zeroJSONDurationsInTrace(s)
|
||||||
|
resultExpected := `{"duration_msec":0,"message":"parent","children":[` +
|
||||||
|
`{"duration_msec":0,"message":"first_line"},` +
|
||||||
|
`{"duration_msec":0,"message":"child","children":[` +
|
||||||
|
`{"duration_msec":0,"message":"foo"}]},` +
|
||||||
|
`{"duration_msec":0,"message":"last_line"}]}`
|
||||||
|
if result != resultExpected {
|
||||||
|
t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func areEqualTracesSkipDuration(s1, s2 string) bool {
|
||||||
|
s1 = zeroDurationsInTrace(s1)
|
||||||
|
s2 = zeroDurationsInTrace(s2)
|
||||||
|
return s1 == s2
|
||||||
|
}
|
||||||
|
|
||||||
|
func zeroDurationsInTrace(s string) string {
|
||||||
|
return skipDurationRe.ReplaceAllString(s, " 0ms: ")
|
||||||
|
}
|
||||||
|
|
||||||
|
var skipDurationRe = regexp.MustCompile(" [0-9.]+ms: ")
|
||||||
|
|
||||||
|
func areEqualJSONTracesSkipDuration(s1, s2 string) bool {
|
||||||
|
s1 = zeroJSONDurationsInTrace(s1)
|
||||||
|
s2 = zeroJSONDurationsInTrace(s2)
|
||||||
|
return s1 == s2
|
||||||
|
}
|
||||||
|
|
||||||
|
func zeroJSONDurationsInTrace(s string) string {
|
||||||
|
return skipJSONDurationRe.ReplaceAllString(s, `"duration_msec":0`)
|
||||||
|
}
|
||||||
|
|
||||||
|
var skipJSONDurationRe = regexp.MustCompile(`"duration_msec":[0-9.]+`)
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
||||||
"github.com/VictoriaMetrics/fastcache"
|
"github.com/VictoriaMetrics/fastcache"
|
||||||
|
@ -1622,7 +1623,7 @@ func (db *indexDB) DeleteTSIDs(tfss []*TagFilters) (int, error) {
|
||||||
MaxTimestamp: (1 << 63) - 1,
|
MaxTimestamp: (1 << 63) - 1,
|
||||||
}
|
}
|
||||||
is := db.getIndexSearch(noDeadline)
|
is := db.getIndexSearch(noDeadline)
|
||||||
metricIDs, err := is.searchMetricIDs(tfss, tr, 2e9)
|
metricIDs, err := is.searchMetricIDs(nil, tfss, tr, 2e9)
|
||||||
db.putIndexSearch(is)
|
db.putIndexSearch(is)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -1713,7 +1714,7 @@ func (is *indexSearch) loadDeletedMetricIDs() (*uint64set.Set, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// searchTSIDs returns sorted tsids matching the given tfss over the given tr.
|
// searchTSIDs returns sorted tsids matching the given tfss over the given tr.
|
||||||
func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
|
func (db *indexDB) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
|
||||||
if len(tfss) == 0 {
|
if len(tfss) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -1727,13 +1728,14 @@ func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
|
||||||
tfKeyBuf.B = marshalTagFiltersKey(tfKeyBuf.B[:0], tfss, tr, true)
|
tfKeyBuf.B = marshalTagFiltersKey(tfKeyBuf.B[:0], tfss, tr, true)
|
||||||
tsids, ok := db.getFromTagFiltersCache(tfKeyBuf.B)
|
tsids, ok := db.getFromTagFiltersCache(tfKeyBuf.B)
|
||||||
if ok {
|
if ok {
|
||||||
// Fast path - tsids found in the cache.
|
// Fast path - tsids found in the cache
|
||||||
|
qt.Printf("found %d matching series ids in the cache; they occupy %d bytes of memory", len(tsids), memorySizeForTSIDs(tsids))
|
||||||
return tsids, nil
|
return tsids, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow path - search for tsids in the db and extDB.
|
// Slow path - search for tsids in the db and extDB.
|
||||||
is := db.getIndexSearch(deadline)
|
is := db.getIndexSearch(deadline)
|
||||||
localTSIDs, err := is.searchTSIDs(tfss, tr, maxMetrics)
|
localTSIDs, err := is.searchTSIDs(qt, tfss, tr, maxMetrics)
|
||||||
db.putIndexSearch(is)
|
db.putIndexSearch(is)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1752,7 +1754,7 @@ func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
is := extDB.getIndexSearch(deadline)
|
is := extDB.getIndexSearch(deadline)
|
||||||
extTSIDs, err = is.searchTSIDs(tfss, tr, maxMetrics)
|
extTSIDs, err = is.searchTSIDs(qt, tfss, tr, maxMetrics)
|
||||||
extDB.putIndexSearch(is)
|
extDB.putIndexSearch(is)
|
||||||
|
|
||||||
sort.Slice(extTSIDs, func(i, j int) bool { return extTSIDs[i].Less(&extTSIDs[j]) })
|
sort.Slice(extTSIDs, func(i, j int) bool { return extTSIDs[i].Less(&extTSIDs[j]) })
|
||||||
|
@ -1769,13 +1771,19 @@ func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
|
||||||
// Sort the found tsids, since they must be passed to TSID search
|
// Sort the found tsids, since they must be passed to TSID search
|
||||||
// in the sorted order.
|
// in the sorted order.
|
||||||
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })
|
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })
|
||||||
|
qt.Printf("sort the found %d series ids", len(tsids))
|
||||||
|
|
||||||
// Store TSIDs in the cache.
|
// Store TSIDs in the cache.
|
||||||
db.putToTagFiltersCache(tsids, tfKeyBuf.B)
|
db.putToTagFiltersCache(tsids, tfKeyBuf.B)
|
||||||
|
qt.Printf("store the found %d series ids in cache; they occupy %d bytes of memory", len(tsids), memorySizeForTSIDs(tsids))
|
||||||
|
|
||||||
return tsids, err
|
return tsids, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func memorySizeForTSIDs(tsids []TSID) int {
|
||||||
|
return len(tsids) * int(unsafe.Sizeof(TSID{}))
|
||||||
|
}
|
||||||
|
|
||||||
var tagFiltersKeyBufPool bytesutil.ByteBufferPool
|
var tagFiltersKeyBufPool bytesutil.ByteBufferPool
|
||||||
|
|
||||||
func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error {
|
func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error {
|
||||||
|
@ -1900,7 +1908,7 @@ func (is *indexSearch) containsTimeRange(tr TimeRange) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (is *indexSearch) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]TSID, error) {
|
func (is *indexSearch) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]TSID, error) {
|
||||||
ok, err := is.containsTimeRange(tr)
|
ok, err := is.containsTimeRange(tr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1909,7 +1917,7 @@ func (is *indexSearch) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics
|
||||||
// Fast path - the index doesn't contain data for the given tr.
|
// Fast path - the index doesn't contain data for the given tr.
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
metricIDs, err := is.searchMetricIDs(tfss, tr, maxMetrics)
|
metricIDs, err := is.searchMetricIDs(qt, tfss, tr, maxMetrics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1954,6 +1962,7 @@ func (is *indexSearch) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
tsids = tsids[:i]
|
tsids = tsids[:i]
|
||||||
|
qt.Printf("load %d series ids from %d metric ids", len(tsids), len(metricIDs))
|
||||||
|
|
||||||
// Do not sort the found tsids, since they will be sorted later.
|
// Do not sort the found tsids, since they will be sorted later.
|
||||||
return tsids, nil
|
return tsids, nil
|
||||||
|
@ -2186,17 +2195,19 @@ func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (is *indexSearch) searchMetricIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]uint64, error) {
|
func (is *indexSearch) searchMetricIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]uint64, error) {
|
||||||
metricIDs, err := is.searchMetricIDsInternal(tfss, tr, maxMetrics)
|
metricIDs, err := is.searchMetricIDsInternal(tfss, tr, maxMetrics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
qt.Printf("found %d matching metric ids", metricIDs.Len())
|
||||||
if metricIDs.Len() == 0 {
|
if metricIDs.Len() == 0 {
|
||||||
// Nothing found
|
// Nothing found
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
sortedMetricIDs := metricIDs.AppendTo(nil)
|
sortedMetricIDs := metricIDs.AppendTo(nil)
|
||||||
|
qt.Printf("sort %d matching metric ids", len(sortedMetricIDs))
|
||||||
|
|
||||||
// Filter out deleted metricIDs.
|
// Filter out deleted metricIDs.
|
||||||
dmis := is.db.s.getDeletedMetricIDs()
|
dmis := is.db.s.getDeletedMetricIDs()
|
||||||
|
@ -2207,6 +2218,7 @@ func (is *indexSearch) searchMetricIDs(tfss []*TagFilters, tr TimeRange, maxMetr
|
||||||
metricIDsFiltered = append(metricIDsFiltered, metricID)
|
metricIDsFiltered = append(metricIDsFiltered, metricID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
qt.Printf("%d metric ids after removing deleted metric ids", len(metricIDsFiltered))
|
||||||
sortedMetricIDs = metricIDsFiltered
|
sortedMetricIDs = metricIDsFiltered
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -829,7 +829,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add(nil, nil, true, false); err != nil {
|
if err := tfs.Add(nil, nil, true, false); err != nil {
|
||||||
return fmt.Errorf("cannot add no-op negative filter: %w", err)
|
return fmt.Errorf("cannot add no-op negative filter: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err := db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err := db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search by exact tag filter: %w", err)
|
return fmt.Errorf("cannot search by exact tag filter: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -838,7 +838,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify tag cache.
|
// Verify tag cache.
|
||||||
tsidsCached, err := db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsCached, err := db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search by exact tag filter: %w", err)
|
return fmt.Errorf("cannot search by exact tag filter: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -850,7 +850,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add(nil, mn.MetricGroup, true, false); err != nil {
|
if err := tfs.Add(nil, mn.MetricGroup, true, false); err != nil {
|
||||||
return fmt.Errorf("cannot add negative filter for zeroing search results: %w", err)
|
return fmt.Errorf("cannot add negative filter for zeroing search results: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search by exact tag filter with full negative: %w", err)
|
return fmt.Errorf("cannot search by exact tag filter with full negative: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -868,7 +868,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add(nil, []byte(re), false, true); err != nil {
|
if err := tfs.Add(nil, []byte(re), false, true); err != nil {
|
||||||
return fmt.Errorf("cannot create regexp tag filter for Graphite wildcard")
|
return fmt.Errorf("cannot create regexp tag filter for Graphite wildcard")
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search by regexp tag filter for Graphite wildcard: %w", err)
|
return fmt.Errorf("cannot search by regexp tag filter for Graphite wildcard: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -885,7 +885,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add([]byte("non-existent-tag"), []byte("foo|"), false, true); err != nil {
|
if err := tfs.Add([]byte("non-existent-tag"), []byte("foo|"), false, true); err != nil {
|
||||||
return fmt.Errorf("cannot create regexp tag filter for non-existing tag: %w", err)
|
return fmt.Errorf("cannot create regexp tag filter for non-existing tag: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search with a filter matching empty tag: %w", err)
|
return fmt.Errorf("cannot search with a filter matching empty tag: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -905,7 +905,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add([]byte("non-existent-tag2"), []byte("bar|"), false, true); err != nil {
|
if err := tfs.Add([]byte("non-existent-tag2"), []byte("bar|"), false, true); err != nil {
|
||||||
return fmt.Errorf("cannot create regexp tag filter for non-existing tag2: %w", err)
|
return fmt.Errorf("cannot create regexp tag filter for non-existing tag2: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search with multipel filters matching empty tags: %w", err)
|
return fmt.Errorf("cannot search with multipel filters matching empty tags: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -933,7 +933,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add(nil, nil, true, true); err != nil {
|
if err := tfs.Add(nil, nil, true, true); err != nil {
|
||||||
return fmt.Errorf("cannot add no-op negative filter with regexp: %w", err)
|
return fmt.Errorf("cannot add no-op negative filter with regexp: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search by regexp tag filter: %w", err)
|
return fmt.Errorf("cannot search by regexp tag filter: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -943,7 +943,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add(nil, mn.MetricGroup, true, true); err != nil {
|
if err := tfs.Add(nil, mn.MetricGroup, true, true); err != nil {
|
||||||
return fmt.Errorf("cannot add negative filter for zeroing search results: %w", err)
|
return fmt.Errorf("cannot add negative filter for zeroing search results: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search by regexp tag filter with full negative: %w", err)
|
return fmt.Errorf("cannot search by regexp tag filter with full negative: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -959,7 +959,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add(nil, mn.MetricGroup, false, true); err != nil {
|
if err := tfs.Add(nil, mn.MetricGroup, false, true); err != nil {
|
||||||
return fmt.Errorf("cannot create tag filter for MetricGroup matching zero results: %w", err)
|
return fmt.Errorf("cannot create tag filter for MetricGroup matching zero results: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search by non-existing tag filter: %w", err)
|
return fmt.Errorf("cannot search by non-existing tag filter: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -975,7 +975,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
|
|
||||||
// Search with empty filter. It should match all the results.
|
// Search with empty filter. It should match all the results.
|
||||||
tfs.Reset()
|
tfs.Reset()
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search for common prefix: %w", err)
|
return fmt.Errorf("cannot search for common prefix: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -988,7 +988,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs.Add(nil, nil, false, false); err != nil {
|
if err := tfs.Add(nil, nil, false, false); err != nil {
|
||||||
return fmt.Errorf("cannot create tag filter for empty metricGroup: %w", err)
|
return fmt.Errorf("cannot create tag filter for empty metricGroup: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search for empty metricGroup: %w", err)
|
return fmt.Errorf("cannot search for empty metricGroup: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1005,7 +1005,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
if err := tfs2.Add(nil, mn.MetricGroup, false, false); err != nil {
|
if err := tfs2.Add(nil, mn.MetricGroup, false, false); err != nil {
|
||||||
return fmt.Errorf("cannot create tag filter for MetricGroup: %w", err)
|
return fmt.Errorf("cannot create tag filter for MetricGroup: %w", err)
|
||||||
}
|
}
|
||||||
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs1, tfs2}, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs1, tfs2}, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search for empty metricGroup: %w", err)
|
return fmt.Errorf("cannot search for empty metricGroup: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1014,7 +1014,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify empty tfss
|
// Verify empty tfss
|
||||||
tsidsFound, err = db.searchTSIDs(nil, tr, 1e5, noDeadline)
|
tsidsFound, err = db.searchTSIDs(nil, nil, tr, 1e5, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot search for nil tfss: %w", err)
|
return fmt.Errorf("cannot search for nil tfss: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1743,7 +1743,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
|
||||||
MinTimestamp: int64(now - 2*msecPerHour - 1),
|
MinTimestamp: int64(now - 2*msecPerHour - 1),
|
||||||
MaxTimestamp: int64(now),
|
MaxTimestamp: int64(now),
|
||||||
}
|
}
|
||||||
matchedTSIDs, err := db.searchTSIDs([]*TagFilters{tfs}, tr, 10000, noDeadline)
|
matchedTSIDs, err := db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 10000, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error searching tsids: %v", err)
|
t.Fatalf("error searching tsids: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1757,7 +1757,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
|
||||||
MaxTimestamp: int64(now),
|
MaxTimestamp: int64(now),
|
||||||
}
|
}
|
||||||
|
|
||||||
matchedTSIDs, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 10000, noDeadline)
|
matchedTSIDs, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 10000, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error searching tsids: %v", err)
|
t.Fatalf("error searching tsids: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,7 +157,7 @@ func BenchmarkHeadPostingForMatchers(b *testing.B) {
|
||||||
MaxTimestamp: timestampFromTime(time.Now()),
|
MaxTimestamp: timestampFromTime(time.Now()),
|
||||||
}
|
}
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
metricIDs, err := is.searchMetricIDs(tfss, tr, 2e9)
|
metricIDs, err := is.searchMetricIDs(nil, tfss, tr, 2e9)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("unexpected error in searchMetricIDs: %s", err)
|
b.Fatalf("unexpected error in searchMetricIDs: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,11 +3,13 @@ package storage
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -138,7 +140,9 @@ func (s *Search) reset() {
|
||||||
// MustClose must be called when the search is done.
|
// MustClose must be called when the search is done.
|
||||||
//
|
//
|
||||||
// Init returns the upper bound on the number of found time series.
|
// Init returns the upper bound on the number of found time series.
|
||||||
func (s *Search) Init(storage *Storage, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) int {
|
func (s *Search) Init(qt *querytracer.Tracer, storage *Storage, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) int {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("init series search: filters=%s, timeRange=%s", tfss, &tr)
|
||||||
if s.needClosing {
|
if s.needClosing {
|
||||||
logger.Panicf("BUG: missing MustClose call before the next call to Init")
|
logger.Panicf("BUG: missing MustClose call before the next call to Init")
|
||||||
}
|
}
|
||||||
|
@ -149,15 +153,15 @@ func (s *Search) Init(storage *Storage, tfss []*TagFilters, tr TimeRange, maxMet
|
||||||
s.deadline = deadline
|
s.deadline = deadline
|
||||||
s.needClosing = true
|
s.needClosing = true
|
||||||
|
|
||||||
tsids, err := storage.searchTSIDs(tfss, tr, maxMetrics, deadline)
|
tsids, err := storage.searchTSIDs(qt, tfss, tr, maxMetrics, deadline)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = storage.prefetchMetricNames(tsids, deadline)
|
err = storage.prefetchMetricNames(qt, tsids, deadline)
|
||||||
}
|
}
|
||||||
// It is ok to call Init on error from storage.searchTSIDs.
|
// It is ok to call Init on error from storage.searchTSIDs.
|
||||||
// Init must be called before returning because it will fail
|
// Init must be called before returning because it will fail
|
||||||
// on Seach.MustClose otherwise.
|
// on Seach.MustClose otherwise.
|
||||||
s.ts.Init(storage.tb, tsids, tr)
|
s.ts.Init(storage.tb, tsids, tr)
|
||||||
|
qt.Printf("search for parts with data for %d series", len(tsids))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.err = err
|
s.err = err
|
||||||
return 0
|
return 0
|
||||||
|
@ -260,9 +264,24 @@ type TagFilter struct {
|
||||||
|
|
||||||
// String returns string representation of tf.
|
// String returns string representation of tf.
|
||||||
func (tf *TagFilter) String() string {
|
func (tf *TagFilter) String() string {
|
||||||
var bb bytesutil.ByteBuffer
|
op := tf.getOp()
|
||||||
fmt.Fprintf(&bb, "{Key=%q, Value=%q, IsNegative: %v, IsRegexp: %v}", tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp)
|
if len(tf.Key) == 0 {
|
||||||
return string(bb.B)
|
return fmt.Sprintf("__name__%s%q", op, tf.Value)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s%s%q", tf.Key, op, tf.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tf *TagFilter) getOp() string {
|
||||||
|
if tf.IsNegative {
|
||||||
|
if tf.IsRegexp {
|
||||||
|
return "!~"
|
||||||
|
}
|
||||||
|
return "!="
|
||||||
|
}
|
||||||
|
if tf.IsRegexp {
|
||||||
|
return "=~"
|
||||||
|
}
|
||||||
|
return "="
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshal appends marshaled tf to dst and returns the result.
|
// Marshal appends marshaled tf to dst and returns the result.
|
||||||
|
@ -325,17 +344,19 @@ func (tf *TagFilter) Unmarshal(src []byte) ([]byte, error) {
|
||||||
|
|
||||||
// String returns string representation of the search query.
|
// String returns string representation of the search query.
|
||||||
func (sq *SearchQuery) String() string {
|
func (sq *SearchQuery) String() string {
|
||||||
var bb bytesutil.ByteBuffer
|
a := make([]string, len(sq.TagFilterss))
|
||||||
fmt.Fprintf(&bb, "MinTimestamp=%s, MaxTimestamp=%s, TagFilters=[\n",
|
for i, tfs := range sq.TagFilterss {
|
||||||
timestampToTime(sq.MinTimestamp), timestampToTime(sq.MaxTimestamp))
|
a[i] = tagFiltersToString(tfs)
|
||||||
for _, tagFilters := range sq.TagFilterss {
|
|
||||||
for _, tf := range tagFilters {
|
|
||||||
fmt.Fprintf(&bb, "%s", tf.String())
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&bb, "\n")
|
|
||||||
}
|
}
|
||||||
fmt.Fprintf(&bb, "]")
|
return fmt.Sprintf("filters=%s, timeRange=[%d..%d]", a, sq.MinTimestamp, sq.MaxTimestamp)
|
||||||
return string(bb.B)
|
}
|
||||||
|
|
||||||
|
func tagFiltersToString(tfs []TagFilter) string {
|
||||||
|
a := make([]string, len(tfs))
|
||||||
|
for i, tf := range tfs {
|
||||||
|
a[i] = tf.String()
|
||||||
|
}
|
||||||
|
return "{" + strings.Join(a, ",") + "}"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshal appends marshaled sq to dst and returns the result.
|
// Marshal appends marshaled sq to dst and returns the result.
|
||||||
|
|
|
@ -202,7 +202,7 @@ func testSearchInternal(st *Storage, tr TimeRange, mrs []MetricRow, accountsCoun
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search
|
// Search
|
||||||
s.Init(st, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
s.Init(nil, st, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
var mbs []metricBlock
|
var mbs []metricBlock
|
||||||
for s.NextMetricBlock() {
|
for s.NextMetricBlock() {
|
||||||
var b Block
|
var b Block
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||||
|
@ -1059,12 +1060,17 @@ func nextRetentionDuration(retentionMsecs int64) time.Duration {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchMetricNames returns metric names matching the given tfss on the given tr.
|
// SearchMetricNames returns metric names matching the given tfss on the given tr.
|
||||||
func (s *Storage) SearchMetricNames(tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]MetricName, error) {
|
func (s *Storage) SearchMetricNames(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]MetricName, error) {
|
||||||
tsids, err := s.searchTSIDs(tfss, tr, maxMetrics, deadline)
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("search for matching metric names")
|
||||||
|
tsids, err := s.searchTSIDs(qt, tfss, tr, maxMetrics, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err = s.prefetchMetricNames(tsids, deadline); err != nil {
|
if len(tsids) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err = s.prefetchMetricNames(qt, tsids, deadline); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
idb := s.idb()
|
idb := s.idb()
|
||||||
|
@ -1097,7 +1103,9 @@ func (s *Storage) SearchMetricNames(tfss []*TagFilters, tr TimeRange, maxMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// searchTSIDs returns sorted TSIDs for the given tfss and the given tr.
|
// searchTSIDs returns sorted TSIDs for the given tfss and the given tr.
|
||||||
func (s *Storage) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
|
func (s *Storage) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("search for matching series ids")
|
||||||
// Do not cache tfss -> tsids here, since the caching is performed
|
// Do not cache tfss -> tsids here, since the caching is performed
|
||||||
// on idb level.
|
// on idb level.
|
||||||
|
|
||||||
|
@ -1118,6 +1126,7 @@ func (s *Storage) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
|
||||||
t := timerpool.Get(timeout)
|
t := timerpool.Get(timeout)
|
||||||
select {
|
select {
|
||||||
case searchTSIDsConcurrencyCh <- struct{}{}:
|
case searchTSIDsConcurrencyCh <- struct{}{}:
|
||||||
|
qt.Printf("wait in the queue because %d concurrent search requests are already performed", cap(searchTSIDsConcurrencyCh))
|
||||||
timerpool.Put(t)
|
timerpool.Put(t)
|
||||||
case <-t.C:
|
case <-t.C:
|
||||||
timerpool.Put(t)
|
timerpool.Put(t)
|
||||||
|
@ -1126,7 +1135,7 @@ func (s *Storage) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
|
||||||
cap(searchTSIDsConcurrencyCh), timeout.Seconds())
|
cap(searchTSIDsConcurrencyCh), timeout.Seconds())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tsids, err := s.idb().searchTSIDs(tfss, tr, maxMetrics, deadline)
|
tsids, err := s.idb().searchTSIDs(qt, tfss, tr, maxMetrics, deadline)
|
||||||
<-searchTSIDsConcurrencyCh
|
<-searchTSIDsConcurrencyCh
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error when searching tsids: %w", err)
|
return nil, fmt.Errorf("error when searching tsids: %w", err)
|
||||||
|
@ -1144,8 +1153,11 @@ var (
|
||||||
// prefetchMetricNames pre-fetches metric names for the given tsids into metricID->metricName cache.
|
// prefetchMetricNames pre-fetches metric names for the given tsids into metricID->metricName cache.
|
||||||
//
|
//
|
||||||
// This should speed-up further searchMetricNameWithCache calls for metricIDs from tsids.
|
// This should speed-up further searchMetricNameWithCache calls for metricIDs from tsids.
|
||||||
func (s *Storage) prefetchMetricNames(tsids []TSID, deadline uint64) error {
|
func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, tsids []TSID, deadline uint64) error {
|
||||||
|
qt = qt.NewChild()
|
||||||
|
defer qt.Donef("prefetch metric names for %d series ids", len(tsids))
|
||||||
if len(tsids) == 0 {
|
if len(tsids) == 0 {
|
||||||
|
qt.Printf("nothing to prefetch")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var metricIDs uint64Sorter
|
var metricIDs uint64Sorter
|
||||||
|
@ -1158,8 +1170,10 @@ func (s *Storage) prefetchMetricNames(tsids []TSID, deadline uint64) error {
|
||||||
}
|
}
|
||||||
metricIDs = append(metricIDs, metricID)
|
metricIDs = append(metricIDs, metricID)
|
||||||
}
|
}
|
||||||
|
qt.Printf("%d out of %d metric names must be pre-fetched", len(metricIDs), len(tsids))
|
||||||
if len(metricIDs) < 500 {
|
if len(metricIDs) < 500 {
|
||||||
// It is cheaper to skip pre-fetching and obtain metricNames inline.
|
// It is cheaper to skip pre-fetching and obtain metricNames inline.
|
||||||
|
qt.Printf("skip pre-fetching metric names for low number of metrid ids=%d", len(metricIDs))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&s.slowMetricNameLoads, uint64(len(metricIDs)))
|
atomic.AddUint64(&s.slowMetricNameLoads, uint64(len(metricIDs)))
|
||||||
|
@ -1206,6 +1220,7 @@ func (s *Storage) prefetchMetricNames(tsids []TSID, deadline uint64) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
qt.Printf("pre-fetch metric names for %d metric ids", len(metricIDs))
|
||||||
|
|
||||||
// Store the pre-fetched metricIDs, so they aren't pre-fetched next time.
|
// Store the pre-fetched metricIDs, so they aren't pre-fetched next time.
|
||||||
s.prefetchedMetricIDsLock.Lock()
|
s.prefetchedMetricIDsLock.Lock()
|
||||||
|
@ -1224,6 +1239,7 @@ func (s *Storage) prefetchMetricNames(tsids []TSID, deadline uint64) error {
|
||||||
}
|
}
|
||||||
s.prefetchedMetricIDs.Store(prefetchedMetricIDsNew)
|
s.prefetchedMetricIDs.Store(prefetchedMetricIDsNew)
|
||||||
s.prefetchedMetricIDsLock.Unlock()
|
s.prefetchedMetricIDsLock.Unlock()
|
||||||
|
qt.Printf("cache metric ids for pre-fetched metric names")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -635,7 +635,7 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error {
|
||||||
metricBlocksCount := func(tfs *TagFilters) int {
|
metricBlocksCount := func(tfs *TagFilters) int {
|
||||||
// Verify the number of blocks
|
// Verify the number of blocks
|
||||||
n := 0
|
n := 0
|
||||||
sr.Init(s, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
sr.Init(nil, s, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
||||||
for sr.NextMetricBlock() {
|
for sr.NextMetricBlock() {
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
|
@ -854,7 +854,7 @@ func testStorageRegisterMetricNames(s *Storage) error {
|
||||||
if err := tfs.Add([]byte("add_id"), []byte("0"), false, false); err != nil {
|
if err := tfs.Add([]byte("add_id"), []byte("0"), false, false); err != nil {
|
||||||
return fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
|
return fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
|
||||||
}
|
}
|
||||||
mns, err := s.SearchMetricNames([]*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline)
|
mns, err := s.SearchMetricNames(nil, []*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error in SearchMetricNames: %w", err)
|
return fmt.Errorf("error in SearchMetricNames: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,7 @@ type TimeRange struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tr *TimeRange) String() string {
|
func (tr *TimeRange) String() string {
|
||||||
minTime := timestampToTime(tr.MinTimestamp)
|
return fmt.Sprintf("[%d..%d]", tr.MinTimestamp, tr.MaxTimestamp)
|
||||||
maxTime := timestampToTime(tr.MaxTimestamp)
|
|
||||||
return fmt.Sprintf("[%s - %s]", minTime, maxTime)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// timestampToPartitionName returns partition name for the given timestamp.
|
// timestampToPartitionName returns partition name for the given timestamp.
|
||||||
|
|
Loading…
Reference in a new issue