Aliaksandr Valialkin 2022-06-01 02:31:40 +03:00
parent 386f6110ec
commit afced37c0b
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
43 changed files with 2242 additions and 1065 deletions

View file

@ -525,7 +525,7 @@ There are following non-required `replay` flags:
(rules which depend on each other) rules. It is expected, that remote storage will be able to persist
previously accepted data during the delay, so data will be available for the subsequent queries.
Keep it equal or bigger than `-remoteWrite.flushInterval`.
* `replay.disableProgressBar` - whether to disable progress bar which shows progress work.
* `-replay.disableProgressBar` - whether to disable progress bar which shows progress work.
Progress bar may generate a lot of log records, which is not formatted as standard VictoriaMetrics logger.
It could break logs parsing by external system and generate additional load on it.

View file

@ -214,7 +214,7 @@ func MetricsIndexHandler(startTime time.Time, at *auth.Token, w http.ResponseWri
}
jsonp := r.FormValue("jsonp")
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
metricNames, isPartial, err := netstorage.GetLabelValues(at, denyPartialResponse, "__name__", deadline)
metricNames, isPartial, err := netstorage.GetLabelValues(nil, at, denyPartialResponse, "__name__", deadline)
if err != nil {
return fmt.Errorf(`cannot obtain metric names: %w`, err)
}
@ -236,7 +236,7 @@ func metricsFind(at *auth.Token, denyPartialResponse bool, tr storage.TimeRange,
n := strings.IndexAny(qTail, "*{[")
if n < 0 {
query := qHead + qTail
suffixes, isPartial, err := netstorage.GetTagValueSuffixes(at, denyPartialResponse, tr, label, query, delimiter, deadline)
suffixes, isPartial, err := netstorage.GetTagValueSuffixes(nil, at, denyPartialResponse, tr, label, query, delimiter, deadline)
if err != nil {
return nil, false, err
}
@ -256,7 +256,7 @@ func metricsFind(at *auth.Token, denyPartialResponse bool, tr storage.TimeRange,
}
if n == len(qTail)-1 && strings.HasSuffix(qTail, "*") {
query := qHead + qTail[:len(qTail)-1]
suffixes, isPartial, err := netstorage.GetTagValueSuffixes(at, denyPartialResponse, tr, label, query, delimiter, deadline)
suffixes, isPartial, err := netstorage.GetTagValueSuffixes(nil, at, denyPartialResponse, tr, label, query, delimiter, deadline)
if err != nil {
return nil, false, err
}

View file

@ -55,7 +55,7 @@ func TagsDelSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWr
}
tfss := joinTagFilterss(tfs, etfs)
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, 0, ct, tfss, 0)
n, err := netstorage.DeleteSeries(at, sq, deadline)
n, err := netstorage.DeleteSeries(nil, at, sq, deadline)
if err != nil {
return fmt.Errorf("cannot delete series for %q: %w", sq, err)
}
@ -135,7 +135,7 @@ func registerMetrics(startTime time.Time, at *auth.Token, w http.ResponseWriter,
mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], at.AccountID, at.ProjectID, labels)
mr.Timestamp = ct
}
if err := netstorage.RegisterMetricNames(at, mrs, deadline); err != nil {
if err := netstorage.RegisterMetricNames(nil, at, mrs, deadline); err != nil {
return fmt.Errorf("cannot register paths: %w", err)
}
@ -193,7 +193,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, at *auth.Token, w http.R
// Escape special chars in tagPrefix as Graphite does.
// See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L228
filter := regexp.QuoteMeta(valuePrefix)
tagValues, isPartial, err = netstorage.GetGraphiteTagValues(at, denyPartialResponse, tag, filter, limit, deadline)
tagValues, isPartial, err = netstorage.GetGraphiteTagValues(nil, at, denyPartialResponse, tag, filter, limit, deadline)
if err != nil {
return err
}
@ -203,7 +203,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, at *auth.Token, w http.R
if err != nil {
return err
}
mns, isPartialResponse, err := netstorage.SearchMetricNames(at, denyPartialResponse, sq, deadline)
mns, isPartialResponse, err := netstorage.SearchMetricNames(nil, at, denyPartialResponse, sq, deadline)
if err != nil {
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
}
@ -282,7 +282,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, at *auth.Token, w http.Res
// Escape special chars in tagPrefix as Graphite does.
// See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L181
filter := regexp.QuoteMeta(tagPrefix)
labels, isPartial, err = netstorage.GetGraphiteTags(at, denyPartialResponse, filter, limit, deadline)
labels, isPartial, err = netstorage.GetGraphiteTags(nil, at, denyPartialResponse, filter, limit, deadline)
if err != nil {
return err
}
@ -292,7 +292,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, at *auth.Token, w http.Res
if err != nil {
return err
}
mns, isPartialResponse, err := netstorage.SearchMetricNames(at, denyPartialResponse, sq, deadline)
mns, isPartialResponse, err := netstorage.SearchMetricNames(nil, at, denyPartialResponse, sq, deadline)
if err != nil {
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
}
@ -361,7 +361,7 @@ func TagsFindSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseW
return err
}
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
mns, isPartial, err := netstorage.SearchMetricNames(at, denyPartialResponse, sq, deadline)
mns, isPartial, err := netstorage.SearchMetricNames(nil, at, denyPartialResponse, sq, deadline)
if err != nil {
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
}
@ -422,7 +422,7 @@ func TagValuesHandler(startTime time.Time, at *auth.Token, tagName string, w htt
}
filter := r.FormValue("filter")
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
tagValues, isPartial, err := netstorage.GetGraphiteTagValues(at, denyPartialResponse, tagName, filter, limit, deadline)
tagValues, isPartial, err := netstorage.GetGraphiteTagValues(nil, at, denyPartialResponse, tagName, filter, limit, deadline)
if err != nil {
return err
}
@ -454,7 +454,7 @@ func TagsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *
}
filter := r.FormValue("filter")
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
labels, isPartial, err := netstorage.GetGraphiteTags(at, denyPartialResponse, filter, limit, deadline)
labels, isPartial, err := netstorage.GetGraphiteTags(nil, at, denyPartialResponse, filter, limit, deadline)
if err != nil {
return err
}

View file

@ -26,6 +26,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
@ -146,6 +147,8 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
startTime := time.Now()
defer requestDuration.UpdateDuration(startTime)
tracerEnabled := searchutils.GetBool(r, "trace")
qt := querytracer.New(tracerEnabled)
// Limit the number of concurrent queries.
select {
@ -161,6 +164,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
t := timerpool.Get(d)
select {
case concurrencyCh <- struct{}{}:
qt.Printf("wait in queue because -search.maxConcurrentRequests=%d concurrent requests are executed", *maxConcurrentRequests)
timerpool.Put(t)
defer func() { <-concurrencyCh }()
case <-t.C:
@ -223,7 +227,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
}
switch p.Prefix {
case "select":
return selectHandler(startTime, w, r, p, at)
return selectHandler(qt, startTime, w, r, p, at)
case "delete":
return deleteHandler(startTime, w, r, p, at)
default:
@ -237,7 +241,7 @@ var vmuiFiles embed.FS
var vmuiFileServer = http.FileServer(http.FS(vmuiFiles))
func selectHandler(startTime time.Time, w http.ResponseWriter, r *http.Request, p *httpserver.Path, at *auth.Token) bool {
func selectHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request, p *httpserver.Path, at *auth.Token) bool {
defer func() {
// Count per-tenant cumulative durations and total requests
httpRequests.Get(at).Inc()
@ -290,7 +294,7 @@ func selectHandler(startTime time.Time, w http.ResponseWriter, r *http.Request,
labelValuesRequests.Inc()
labelName := s[:len(s)-len("/values")]
httpserver.EnableCORS(w, r)
if err := prometheus.LabelValuesHandler(startTime, at, labelName, w, r); err != nil {
if err := prometheus.LabelValuesHandler(qt, startTime, at, labelName, w, r); err != nil {
labelValuesErrors.Inc()
sendPrometheusError(w, r, err)
return true
@ -319,7 +323,7 @@ func selectHandler(startTime time.Time, w http.ResponseWriter, r *http.Request,
case "prometheus/api/v1/query":
queryRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.QueryHandler(startTime, at, w, r); err != nil {
if err := prometheus.QueryHandler(qt, startTime, at, w, r); err != nil {
queryErrors.Inc()
sendPrometheusError(w, r, err)
return true
@ -328,7 +332,7 @@ func selectHandler(startTime time.Time, w http.ResponseWriter, r *http.Request,
case "prometheus/api/v1/query_range":
queryRangeRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.QueryRangeHandler(startTime, at, w, r); err != nil {
if err := prometheus.QueryRangeHandler(qt, startTime, at, w, r); err != nil {
queryRangeErrors.Inc()
sendPrometheusError(w, r, err)
return true
@ -337,7 +341,7 @@ func selectHandler(startTime time.Time, w http.ResponseWriter, r *http.Request,
case "prometheus/api/v1/series":
seriesRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.SeriesHandler(startTime, at, w, r); err != nil {
if err := prometheus.SeriesHandler(qt, startTime, at, w, r); err != nil {
seriesErrors.Inc()
sendPrometheusError(w, r, err)
return true
@ -355,7 +359,7 @@ func selectHandler(startTime time.Time, w http.ResponseWriter, r *http.Request,
case "prometheus/api/v1/labels":
labelsRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.LabelsHandler(startTime, at, w, r); err != nil {
if err := prometheus.LabelsHandler(qt, startTime, at, w, r); err != nil {
labelsErrors.Inc()
sendPrometheusError(w, r, err)
return true

View file

@ -25,6 +25,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
"github.com/VictoriaMetrics/metrics"
@ -196,7 +197,8 @@ var resultPool sync.Pool
// Data processing is immediately stopped if f returns non-nil error.
//
// rss becomes unusable after the call to RunParallel.
func (rss *Results) RunParallel(f func(rs *Result, workerID uint) error) error {
func (rss *Results) RunParallel(qt *querytracer.Tracer, f func(rs *Result, workerID uint) error) error {
qt = qt.NewChild()
defer func() {
putTmpBlocksFile(rss.tbf)
rss.tbf = nil
@ -261,6 +263,7 @@ func (rss *Results) RunParallel(f func(rs *Result, workerID uint) error) error {
close(workCh)
}
workChsWG.Wait()
qt.Donef("parallel process of fetched data: series=%d, samples=%d", seriesProcessedTotal, rowsProcessedTotal)
return firstErr
}
@ -604,7 +607,9 @@ func (sbh *sortBlocksHeap) Pop() interface{} {
}
// RegisterMetricNames registers metric names from mrs in the storage.
func RegisterMetricNames(at *auth.Token, mrs []storage.MetricRow, deadline searchutils.Deadline) error {
func RegisterMetricNames(qt *querytracer.Tracer, at *auth.Token, mrs []storage.MetricRow, deadline searchutils.Deadline) error {
qt = qt.NewChild()
defer qt.Donef("register metric names")
// Split mrs among available vmstorage nodes.
mrsPerNode := make([][]storage.MetricRow, len(storageNodes))
for _, mr := range mrs {
@ -619,9 +624,9 @@ func RegisterMetricNames(at *auth.Token, mrs []storage.MetricRow, deadline searc
}
// Push mrs to storage nodes in parallel.
snr := startStorageNodesRequest(true, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, true, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.registerMetricNamesRequests.Inc()
err := sn.registerMetricNames(mrsPerNode[idx], deadline)
err := sn.registerMetricNames(qt, mrsPerNode[idx], deadline)
if err != nil {
sn.registerMetricNamesErrors.Inc()
}
@ -640,7 +645,9 @@ func RegisterMetricNames(at *auth.Token, mrs []storage.MetricRow, deadline searc
}
// DeleteSeries deletes time series matching the given sq.
func DeleteSeries(at *auth.Token, sq *storage.SearchQuery, deadline searchutils.Deadline) (int, error) {
func DeleteSeries(qt *querytracer.Tracer, at *auth.Token, sq *storage.SearchQuery, deadline searchutils.Deadline) (int, error) {
qt = qt.NewChild()
defer qt.Donef("delete series: %s", sq)
requestData := sq.Marshal(nil)
// Send the query to all the storage nodes in parallel.
@ -648,9 +655,9 @@ func DeleteSeries(at *auth.Token, sq *storage.SearchQuery, deadline searchutils.
deletedCount int
err error
}
snr := startStorageNodesRequest(true, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, true, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.deleteSeriesRequests.Inc()
deletedCount, err := sn.deleteMetrics(requestData, deadline)
deletedCount, err := sn.deleteMetrics(qt, requestData, deadline)
if err != nil {
sn.deleteSeriesErrors.Inc()
}
@ -677,7 +684,9 @@ func DeleteSeries(at *auth.Token, sq *storage.SearchQuery, deadline searchutils.
}
// GetLabelsOnTimeRange returns labels for the given tr until the given deadline.
func GetLabelsOnTimeRange(at *auth.Token, denyPartialResponse bool, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, bool, error) {
func GetLabelsOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get labels on timeRange=%s", &tr)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -686,9 +695,9 @@ func GetLabelsOnTimeRange(at *auth.Token, denyPartialResponse bool, tr storage.T
labels []string
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelsOnTimeRangeRequests.Inc()
labels, err := sn.getLabelsOnTimeRange(at.AccountID, at.ProjectID, tr, deadline)
labels, err := sn.getLabelsOnTimeRange(qt, at.AccountID, at.ProjectID, tr, deadline)
if err != nil {
sn.labelsOnTimeRangeErrors.Inc()
err = fmt.Errorf("cannot get labels on time range from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -709,12 +718,14 @@ func GetLabelsOnTimeRange(at *auth.Token, denyPartialResponse bool, tr storage.T
labels = append(labels, nr.labels...)
return nil
})
qt.Printf("get %d non-duplicated labels", len(labels))
if err != nil {
return nil, isPartial, fmt.Errorf("cannot fetch labels on time range from vmstorage nodes: %w", err)
}
// Deduplicate labels
labels = deduplicateStrings(labels)
qt.Printf("get %d unique labels after de-duplication", len(labels))
// Substitute "" with "__name__"
for i := range labels {
if labels[i] == "" {
@ -723,15 +734,18 @@ func GetLabelsOnTimeRange(at *auth.Token, denyPartialResponse bool, tr storage.T
}
// Sort labels like Prometheus does
sort.Strings(labels)
qt.Printf("sort %d labels", len(labels))
return labels, isPartial, nil
}
// GetGraphiteTags returns Graphite tags until the given deadline.
func GetGraphiteTags(at *auth.Token, denyPartialResponse bool, filter string, limit int, deadline searchutils.Deadline) ([]string, bool, error) {
func GetGraphiteTags(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, filter string, limit int, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get graphite tags: filter=%s, limit=%d", filter, limit)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
labels, isPartial, err := GetLabels(at, denyPartialResponse, deadline)
labels, isPartial, err := GetLabels(qt, at, denyPartialResponse, deadline)
if err != nil {
return nil, false, err
}
@ -772,7 +786,9 @@ func hasString(a []string, s string) bool {
}
// GetLabels returns labels until the given deadline.
func GetLabels(at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline) ([]string, bool, error) {
func GetLabels(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get labels")
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -781,9 +797,9 @@ func GetLabels(at *auth.Token, denyPartialResponse bool, deadline searchutils.De
labels []string
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelsRequests.Inc()
labels, err := sn.getLabels(at.AccountID, at.ProjectID, deadline)
labels, err := sn.getLabels(qt, at.AccountID, at.ProjectID, deadline)
if err != nil {
sn.labelsErrors.Inc()
err = fmt.Errorf("cannot get labels from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -804,12 +820,14 @@ func GetLabels(at *auth.Token, denyPartialResponse bool, deadline searchutils.De
labels = append(labels, nr.labels...)
return nil
})
qt.Printf("get %d non-duplicated labels from global index", len(labels))
if err != nil {
return nil, isPartial, fmt.Errorf("cannot fetch labels from vmstorage nodes: %w", err)
}
// Deduplicate labels
labels = deduplicateStrings(labels)
qt.Printf("get %d unique labels after de-duplication", len(labels))
// Substitute "" with "__name__"
for i := range labels {
if labels[i] == "" {
@ -818,12 +836,16 @@ func GetLabels(at *auth.Token, denyPartialResponse bool, deadline searchutils.De
}
// Sort labels like Prometheus does
sort.Strings(labels)
qt.Printf("sort %d labels", len(labels))
return labels, isPartial, nil
}
// GetLabelValuesOnTimeRange returns label values for the given labelName on the given tr
// until the given deadline.
func GetLabelValuesOnTimeRange(at *auth.Token, denyPartialResponse bool, labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, bool, error) {
func GetLabelValuesOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, labelName string,
tr storage.TimeRange, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild()
qt.Donef("get values for label %s on a timeRange %s", labelName, &tr)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -836,9 +858,9 @@ func GetLabelValuesOnTimeRange(at *auth.Token, denyPartialResponse bool, labelNa
labelValues []string
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelValuesOnTimeRangeRequests.Inc()
labelValues, err := sn.getLabelValuesOnTimeRange(at.AccountID, at.ProjectID, labelName, tr, deadline)
labelValues, err := sn.getLabelValuesOnTimeRange(qt, at.AccountID, at.ProjectID, labelName, tr, deadline)
if err != nil {
sn.labelValuesOnTimeRangeErrors.Inc()
err = fmt.Errorf("cannot get label values on time range from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -859,26 +881,31 @@ func GetLabelValuesOnTimeRange(at *auth.Token, denyPartialResponse bool, labelNa
labelValues = append(labelValues, nr.labelValues...)
return nil
})
qt.Printf("get %d non-duplicated label values", len(labelValues))
if err != nil {
return nil, isPartial, fmt.Errorf("cannot fetch label values on time range from vmstorage nodes: %w", err)
}
// Deduplicate label values
labelValues = deduplicateStrings(labelValues)
qt.Printf("get %d unique label values after de-duplication", len(labelValues))
// Sort labelValues like Prometheus does
sort.Strings(labelValues)
qt.Printf("sort %d label values", len(labelValues))
return labelValues, isPartial, nil
}
// GetGraphiteTagValues returns tag values for the given tagName until the given deadline.
func GetGraphiteTagValues(at *auth.Token, denyPartialResponse bool, tagName, filter string, limit int, deadline searchutils.Deadline) ([]string, bool, error) {
func GetGraphiteTagValues(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, tagName, filter string, limit int, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get graphite tag values for tagName=%s, filter=%s, limit=%d", tagName, filter, limit)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
if tagName == "name" {
tagName = ""
}
tagValues, isPartial, err := GetLabelValues(at, denyPartialResponse, tagName, deadline)
tagValues, isPartial, err := GetLabelValues(qt, at, denyPartialResponse, tagName, deadline)
if err != nil {
return nil, false, err
}
@ -896,7 +923,9 @@ func GetGraphiteTagValues(at *auth.Token, denyPartialResponse bool, tagName, fil
// GetLabelValues returns label values for the given labelName
// until the given deadline.
func GetLabelValues(at *auth.Token, denyPartialResponse bool, labelName string, deadline searchutils.Deadline) ([]string, bool, error) {
func GetLabelValues(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, labelName string, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get values for label %s", labelName)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -909,9 +938,9 @@ func GetLabelValues(at *auth.Token, denyPartialResponse bool, labelName string,
labelValues []string
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelValuesRequests.Inc()
labelValues, err := sn.getLabelValues(at.AccountID, at.ProjectID, labelName, deadline)
labelValues, err := sn.getLabelValues(qt, at.AccountID, at.ProjectID, labelName, deadline)
if err != nil {
sn.labelValuesErrors.Inc()
err = fmt.Errorf("cannot get label values from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -932,22 +961,27 @@ func GetLabelValues(at *auth.Token, denyPartialResponse bool, labelName string,
labelValues = append(labelValues, nr.labelValues...)
return nil
})
qt.Printf("get %d non-duplicated label values", len(labelValues))
if err != nil {
return nil, isPartial, fmt.Errorf("cannot fetch label values from vmstorage nodes: %w", err)
}
// Deduplicate label values
labelValues = deduplicateStrings(labelValues)
qt.Printf("get %d unique label values after de-duplication", len(labelValues))
// Sort labelValues like Prometheus does
sort.Strings(labelValues)
qt.Printf("sort %d label values", len(labelValues))
return labelValues, isPartial, nil
}
// GetTagValueSuffixes returns tag value suffixes for the given tagKey and the given tagValuePrefix.
//
// It can be used for implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find
func GetTagValueSuffixes(at *auth.Token, denyPartialResponse bool, tr storage.TimeRange, tagKey, tagValuePrefix string,
func GetTagValueSuffixes(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, tr storage.TimeRange, tagKey, tagValuePrefix string,
delimiter byte, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get tag value suffixes for tagKey=%s, tagValuePrefix=%s, timeRange=%s", tagKey, tagValuePrefix, &tr)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -956,9 +990,9 @@ func GetTagValueSuffixes(at *auth.Token, denyPartialResponse bool, tr storage.Ti
suffixes []string
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.tagValueSuffixesRequests.Inc()
suffixes, err := sn.getTagValueSuffixes(at.AccountID, at.ProjectID, tr, tagKey, tagValuePrefix, delimiter, deadline)
suffixes, err := sn.getTagValueSuffixes(qt, at.AccountID, at.ProjectID, tr, tagKey, tagValuePrefix, delimiter, deadline)
if err != nil {
sn.tagValueSuffixesErrors.Inc()
err = fmt.Errorf("cannot get tag value suffixes for tr=%s, tagKey=%q, tagValuePrefix=%q, delimiter=%c from vmstorage %s: %w",
@ -994,7 +1028,9 @@ func GetTagValueSuffixes(at *auth.Token, denyPartialResponse bool, tr storage.Ti
}
// GetLabelEntries returns all the label entries for at until the given deadline.
func GetLabelEntries(at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline) ([]storage.TagEntry, bool, error) {
func GetLabelEntries(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline) ([]storage.TagEntry, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get label entries")
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -1003,9 +1039,9 @@ func GetLabelEntries(at *auth.Token, denyPartialResponse bool, deadline searchut
labelEntries []storage.TagEntry
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelEntriesRequests.Inc()
labelEntries, err := sn.getLabelEntries(at.AccountID, at.ProjectID, deadline)
labelEntries, err := sn.getLabelEntries(qt, at.AccountID, at.ProjectID, deadline)
if err != nil {
sn.labelEntriesErrors.Inc()
err = fmt.Errorf("cannot get label entries from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -1029,6 +1065,7 @@ func GetLabelEntries(at *auth.Token, denyPartialResponse bool, deadline searchut
if err != nil {
return nil, isPartial, fmt.Errorf("cannot featch label etnries from vmstorage nodes: %w", err)
}
qt.Printf("get %d label entries before de-duplication", len(labelEntries))
// Substitute "" with "__name__"
for i := range labelEntries {
@ -1040,6 +1077,7 @@ func GetLabelEntries(at *auth.Token, denyPartialResponse bool, deadline searchut
// Deduplicate label entries
labelEntries = deduplicateLabelEntries(labelEntries)
qt.Printf("left %d label entries after de-duplication", len(labelEntries))
// Sort labelEntries by the number of label values in each entry.
sort.Slice(labelEntries, func(i, j int) bool {
@ -1049,6 +1087,7 @@ func GetLabelEntries(at *auth.Token, denyPartialResponse bool, deadline searchut
}
return labelEntries[i].Key > labelEntries[j].Key
})
qt.Printf("sort %d label entries", len(labelEntries))
return labelEntries, isPartial, nil
}
@ -1084,7 +1123,10 @@ func deduplicateStrings(a []string) []string {
}
// GetTSDBStatusForDate returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
func GetTSDBStatusForDate(at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline, date uint64, topN, maxMetrics int) (*storage.TSDBStatus, bool, error) {
func GetTSDBStatusForDate(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool,
deadline searchutils.Deadline, date uint64, topN, maxMetrics int) (*storage.TSDBStatus, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get tsdb stats for date=%d, topN=%d", date, topN)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -1093,9 +1135,9 @@ func GetTSDBStatusForDate(at *auth.Token, denyPartialResponse bool, deadline sea
status *storage.TSDBStatus
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.tsdbStatusRequests.Inc()
status, err := sn.getTSDBStatusForDate(at.AccountID, at.ProjectID, date, topN, maxMetrics, deadline)
status, err := sn.getTSDBStatusForDate(qt, at.AccountID, at.ProjectID, date, topN, maxMetrics, deadline)
if err != nil {
sn.tsdbStatusErrors.Inc()
err = fmt.Errorf("cannot obtain tsdb status from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -1173,7 +1215,10 @@ func toTopHeapEntries(m map[string]uint64, topN int) []storage.TopHeapEntry {
// GetTSDBStatusWithFilters returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
//
// It accepts aribtrary filters on time series in sq.
func GetTSDBStatusWithFilters(at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline, sq *storage.SearchQuery, topN int) (*storage.TSDBStatus, bool, error) {
func GetTSDBStatusWithFilters(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool,
deadline searchutils.Deadline, sq *storage.SearchQuery, topN int) (*storage.TSDBStatus, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get tsdb stats: %s, topN=%d", sq, topN)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -1183,9 +1228,9 @@ func GetTSDBStatusWithFilters(at *auth.Token, denyPartialResponse bool, deadline
status *storage.TSDBStatus
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.tsdbStatusWithFiltersRequests.Inc()
status, err := sn.getTSDBStatusWithFilters(requestData, topN, deadline)
status, err := sn.getTSDBStatusWithFilters(qt, requestData, topN, deadline)
if err != nil {
sn.tsdbStatusWithFiltersErrors.Inc()
err = fmt.Errorf("cannot obtain tsdb status with filters from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -1215,7 +1260,9 @@ func GetTSDBStatusWithFilters(at *auth.Token, denyPartialResponse bool, deadline
}
// GetSeriesCount returns the number of unique series for the given at.
func GetSeriesCount(at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline) (uint64, bool, error) {
func GetSeriesCount(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline) (uint64, bool, error) {
qt = qt.NewChild()
defer qt.Donef("get series count")
if deadline.Exceeded() {
return 0, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
@ -1224,9 +1271,9 @@ func GetSeriesCount(at *auth.Token, denyPartialResponse bool, deadline searchuti
n uint64
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.seriesCountRequests.Inc()
n, err := sn.getSeriesCount(at.AccountID, at.ProjectID, deadline)
n, err := sn.getSeriesCount(qt, at.AccountID, at.ProjectID, deadline)
if err != nil {
sn.seriesCountErrors.Inc()
err = fmt.Errorf("cannot get series count from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -1306,7 +1353,10 @@ var metricNamePool = &sync.Pool{
// f is called in parallel from multiple goroutines.
// It is the responsibility of f to call b.UnmarshalData before reading timestamps and values from the block.
// It is the responsibility of f to filter blocks according to the given tr.
func ExportBlocks(at *auth.Token, sq *storage.SearchQuery, deadline searchutils.Deadline, f func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error) error {
func ExportBlocks(qt *querytracer.Tracer, at *auth.Token, sq *storage.SearchQuery, deadline searchutils.Deadline,
f func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error) error {
qt = qt.NewChild()
defer qt.Donef("export blocks: %s", sq)
if deadline.Exceeded() {
return fmt.Errorf("timeout exceeded before starting data export: %s", deadline.String())
}
@ -1316,6 +1366,8 @@ func ExportBlocks(at *auth.Token, sq *storage.SearchQuery, deadline searchutils.
}
var wg syncwg.WaitGroup
var stopped uint32
var blocksRead uint64
var samples uint64
processBlock := func(mb *storage.MetricBlock) error {
wg.Add(1)
defer wg.Done()
@ -1331,13 +1383,16 @@ func ExportBlocks(at *auth.Token, sq *storage.SearchQuery, deadline searchutils.
}
mn.Reset()
metricNamePool.Put(mn)
atomic.AddUint64(&blocksRead, 1)
atomic.AddUint64(&samples, uint64(mb.Block.RowsCount()))
return nil
}
_, err := processSearchQuery(at, true, sq, true, processBlock, deadline)
_, err := processSearchQuery(qt, at, true, sq, true, processBlock, deadline)
// Make sure processBlock isn't called anymore in order to prevent from data races.
atomic.StoreUint32(&stopped, 1)
wg.Wait()
qt.Printf("export blocks=%d, samples=%d", blocksRead, samples)
if err != nil {
return fmt.Errorf("error occured during export: %w", err)
@ -1346,7 +1401,9 @@ func ExportBlocks(at *auth.Token, sq *storage.SearchQuery, deadline searchutils.
}
// SearchMetricNames returns all the metric names matching sq until the given deadline.
func SearchMetricNames(at *auth.Token, denyPartialResponse bool, sq *storage.SearchQuery, deadline searchutils.Deadline) ([]storage.MetricName, bool, error) {
func SearchMetricNames(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, sq *storage.SearchQuery, deadline searchutils.Deadline) ([]storage.MetricName, bool, error) {
qt = qt.NewChild()
defer qt.Donef("fetch metric names: %s", sq)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting to search metric names: %s", deadline.String())
}
@ -1357,9 +1414,9 @@ func SearchMetricNames(at *auth.Token, denyPartialResponse bool, sq *storage.Sea
metricNames [][]byte
err error
}
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.searchMetricNamesRequests.Inc()
metricNames, err := sn.processSearchMetricNames(requestData, deadline)
metricNames, err := sn.processSearchMetricNames(qt, requestData, deadline)
if err != nil {
sn.searchMetricNamesErrors.Inc()
err = fmt.Errorf("cannot search metric names on vmstorage %s: %w", sn.connPool.Addr(), err)
@ -1402,10 +1459,15 @@ func SearchMetricNames(at *auth.Token, denyPartialResponse bool, sq *storage.Sea
// ProcessSearchQuery performs sq until the given deadline.
//
// Results.RunParallel or Results.Cancel must be called on the returned Results.
func ProcessSearchQuery(at *auth.Token, denyPartialResponse bool, sq *storage.SearchQuery, fetchData bool, deadline searchutils.Deadline) (*Results, bool, error) {
func ProcessSearchQuery(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, sq *storage.SearchQuery,
fetchData bool, deadline searchutils.Deadline) (*Results, bool, error) {
qt = qt.NewChild()
defer qt.Donef("fetch matching series: %s, fetchData=%v", sq, fetchData)
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
// Setup search.
tr := storage.TimeRange{
MinTimestamp: sq.MinTimestamp,
MaxTimestamp: sq.MaxTimestamp,
@ -1416,6 +1478,7 @@ func ProcessSearchQuery(at *auth.Token, denyPartialResponse bool, sq *storage.Se
}
var wg syncwg.WaitGroup
var stopped uint32
var blocksRead uint64
var samples uint64
processBlock := func(mb *storage.MetricBlock) error {
wg.Add(1)
@ -1427,6 +1490,7 @@ func ProcessSearchQuery(at *auth.Token, denyPartialResponse bool, sq *storage.Se
tbfw.RegisterEmptyBlock(mb)
return nil
}
atomic.AddUint64(&blocksRead, 1)
n := atomic.AddUint64(&samples, uint64(mb.Block.RowsCount()))
if *maxSamplesPerQuery > 0 && n > uint64(*maxSamplesPerQuery) {
return fmt.Errorf("cannot select more than -search.maxSamplesPerQuery=%d samples; possible solutions: to increase the -search.maxSamplesPerQuery; to reduce time range for the query; to use more specific label filters in order to select lower number of series", *maxSamplesPerQuery)
@ -1436,7 +1500,7 @@ func ProcessSearchQuery(at *auth.Token, denyPartialResponse bool, sq *storage.Se
}
return nil
}
isPartial, err := processSearchQuery(at, denyPartialResponse, sq, fetchData, processBlock, deadline)
isPartial, err := processSearchQuery(qt, at, denyPartialResponse, sq, fetchData, processBlock, deadline)
// Make sure processBlock isn't called anymore in order to protect from data races.
atomic.StoreUint32(&stopped, 1)
@ -1450,6 +1514,7 @@ func ProcessSearchQuery(at *auth.Token, denyPartialResponse bool, sq *storage.Se
putTmpBlocksFile(tbfw.tbf)
return nil, false, fmt.Errorf("cannot finalize temporary blocks file with %d time series: %w", len(tbfw.m), err)
}
qt.Printf("fetch unique series=%d, blocks=%d, samples=%d, bytes=%d", len(tbfw.m), blocksRead, samples, tbfw.tbf.Len())
var rss Results
rss.at = at
@ -1468,14 +1533,14 @@ func ProcessSearchQuery(at *auth.Token, denyPartialResponse bool, sq *storage.Se
return &rss, isPartial, nil
}
func processSearchQuery(at *auth.Token, denyPartialResponse bool, sq *storage.SearchQuery, fetchData bool,
func processSearchQuery(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, sq *storage.SearchQuery, fetchData bool,
processBlock func(mb *storage.MetricBlock) error, deadline searchutils.Deadline) (bool, error) {
requestData := sq.Marshal(nil)
// Send the query to all the storage nodes in parallel.
snr := startStorageNodesRequest(denyPartialResponse, func(idx int, sn *storageNode) interface{} {
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.searchRequests.Inc()
err := sn.processSearchQuery(requestData, fetchData, processBlock, deadline)
err := sn.processSearchQuery(qt, requestData, fetchData, processBlock, deadline)
if err != nil {
sn.searchErrors.Inc()
err = fmt.Errorf("cannot perform search on vmstorage %s: %w", sn.connPool.Addr(), err)
@ -1499,12 +1564,14 @@ type storageNodesRequest struct {
resultsCh chan interface{}
}
func startStorageNodesRequest(denyPartialResponse bool, f func(idx int, sn *storageNode) interface{}) *storageNodesRequest {
func startStorageNodesRequest(qt *querytracer.Tracer, denyPartialResponse bool, f func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{}) *storageNodesRequest {
resultsCh := make(chan interface{}, len(storageNodes))
for idx, sn := range storageNodes {
qtChild := qt.NewChild()
go func(idx int, sn *storageNode) {
result := f(idx, sn)
result := f(qtChild, idx, sn)
resultsCh <- result
qtChild.Donef("rpc at vmstorage %s", sn.connPool.Addr())
}(idx, sn)
}
return &storageNodesRequest{
@ -1667,17 +1734,17 @@ type storageNode struct {
metricRowsRead *metrics.Counter
}
func (sn *storageNode) registerMetricNames(mrs []storage.MetricRow, deadline searchutils.Deadline) error {
func (sn *storageNode) registerMetricNames(qt *querytracer.Tracer, mrs []storage.MetricRow, deadline searchutils.Deadline) error {
if len(mrs) == 0 {
return nil
}
f := func(bc *handshake.BufferedConn) error {
return sn.registerMetricNamesOnConn(bc, mrs)
}
return sn.execOnConnWithPossibleRetry("registerMetricNames_v2", f, deadline)
return sn.execOnConnWithPossibleRetry(qt, "registerMetricNames_v3", f, deadline)
}
func (sn *storageNode) deleteMetrics(requestData []byte, deadline searchutils.Deadline) (int, error) {
func (sn *storageNode) deleteMetrics(qt *querytracer.Tracer, requestData []byte, deadline searchutils.Deadline) (int, error) {
var deletedCount int
f := func(bc *handshake.BufferedConn) error {
n, err := sn.deleteMetricsOnConn(bc, requestData)
@ -1687,13 +1754,13 @@ func (sn *storageNode) deleteMetrics(requestData []byte, deadline searchutils.De
deletedCount = n
return nil
}
if err := sn.execOnConnWithPossibleRetry("deleteMetrics_v4", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "deleteMetrics_v5", f, deadline); err != nil {
return 0, err
}
return deletedCount, nil
}
func (sn *storageNode) getLabelsOnTimeRange(accountID, projectID uint32, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
func (sn *storageNode) getLabelsOnTimeRange(qt *querytracer.Tracer, accountID, projectID uint32, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
var labels []string
f := func(bc *handshake.BufferedConn) error {
ls, err := sn.getLabelsOnTimeRangeOnConn(bc, accountID, projectID, tr)
@ -1703,13 +1770,13 @@ func (sn *storageNode) getLabelsOnTimeRange(accountID, projectID uint32, tr stor
labels = ls
return nil
}
if err := sn.execOnConnWithPossibleRetry("labelsOnTimeRange_v2", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "labelsOnTimeRange_v3", f, deadline); err != nil {
return nil, err
}
return labels, nil
}
func (sn *storageNode) getLabels(accountID, projectID uint32, deadline searchutils.Deadline) ([]string, error) {
func (sn *storageNode) getLabels(qt *querytracer.Tracer, accountID, projectID uint32, deadline searchutils.Deadline) ([]string, error) {
var labels []string
f := func(bc *handshake.BufferedConn) error {
ls, err := sn.getLabelsOnConn(bc, accountID, projectID)
@ -1719,13 +1786,14 @@ func (sn *storageNode) getLabels(accountID, projectID uint32, deadline searchuti
labels = ls
return nil
}
if err := sn.execOnConnWithPossibleRetry("labels_v3", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "labels_v4", f, deadline); err != nil {
return nil, err
}
return labels, nil
}
func (sn *storageNode) getLabelValuesOnTimeRange(accountID, projectID uint32, labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
func (sn *storageNode) getLabelValuesOnTimeRange(qt *querytracer.Tracer, accountID, projectID uint32, labelName string,
tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
var labelValues []string
f := func(bc *handshake.BufferedConn) error {
lvs, err := sn.getLabelValuesOnTimeRangeOnConn(bc, accountID, projectID, labelName, tr)
@ -1735,13 +1803,13 @@ func (sn *storageNode) getLabelValuesOnTimeRange(accountID, projectID uint32, la
labelValues = lvs
return nil
}
if err := sn.execOnConnWithPossibleRetry("labelValuesOnTimeRange_v2", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "labelValuesOnTimeRange_v3", f, deadline); err != nil {
return nil, err
}
return labelValues, nil
}
func (sn *storageNode) getLabelValues(accountID, projectID uint32, labelName string, deadline searchutils.Deadline) ([]string, error) {
func (sn *storageNode) getLabelValues(qt *querytracer.Tracer, accountID, projectID uint32, labelName string, deadline searchutils.Deadline) ([]string, error) {
var labelValues []string
f := func(bc *handshake.BufferedConn) error {
lvs, err := sn.getLabelValuesOnConn(bc, accountID, projectID, labelName)
@ -1751,13 +1819,13 @@ func (sn *storageNode) getLabelValues(accountID, projectID uint32, labelName str
labelValues = lvs
return nil
}
if err := sn.execOnConnWithPossibleRetry("labelValues_v3", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "labelValues_v4", f, deadline); err != nil {
return nil, err
}
return labelValues, nil
}
func (sn *storageNode) getTagValueSuffixes(accountID, projectID uint32, tr storage.TimeRange, tagKey, tagValuePrefix string,
func (sn *storageNode) getTagValueSuffixes(qt *querytracer.Tracer, accountID, projectID uint32, tr storage.TimeRange, tagKey, tagValuePrefix string,
delimiter byte, deadline searchutils.Deadline) ([]string, error) {
var suffixes []string
f := func(bc *handshake.BufferedConn) error {
@ -1768,13 +1836,13 @@ func (sn *storageNode) getTagValueSuffixes(accountID, projectID uint32, tr stora
suffixes = ss
return nil
}
if err := sn.execOnConnWithPossibleRetry("tagValueSuffixes_v2", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "tagValueSuffixes_v3", f, deadline); err != nil {
return nil, err
}
return suffixes, nil
}
func (sn *storageNode) getLabelEntries(accountID, projectID uint32, deadline searchutils.Deadline) ([]storage.TagEntry, error) {
func (sn *storageNode) getLabelEntries(qt *querytracer.Tracer, accountID, projectID uint32, deadline searchutils.Deadline) ([]storage.TagEntry, error) {
var tagEntries []storage.TagEntry
f := func(bc *handshake.BufferedConn) error {
tes, err := sn.getLabelEntriesOnConn(bc, accountID, projectID)
@ -1784,13 +1852,14 @@ func (sn *storageNode) getLabelEntries(accountID, projectID uint32, deadline sea
tagEntries = tes
return nil
}
if err := sn.execOnConnWithPossibleRetry("labelEntries_v3", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "labelEntries_v4", f, deadline); err != nil {
return nil, err
}
return tagEntries, nil
}
func (sn *storageNode) getTSDBStatusForDate(accountID, projectID uint32, date uint64, topN, maxMetrics int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
func (sn *storageNode) getTSDBStatusForDate(qt *querytracer.Tracer, accountID, projectID uint32,
date uint64, topN, maxMetrics int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
var status *storage.TSDBStatus
f := func(bc *handshake.BufferedConn) error {
st, err := sn.getTSDBStatusForDateOnConn(bc, accountID, projectID, date, topN, maxMetrics)
@ -1800,13 +1869,13 @@ func (sn *storageNode) getTSDBStatusForDate(accountID, projectID uint32, date ui
status = st
return nil
}
if err := sn.execOnConnWithPossibleRetry("tsdbStatus_v3", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "tsdbStatus_v4", f, deadline); err != nil {
return nil, err
}
return status, nil
}
func (sn *storageNode) getTSDBStatusWithFilters(requestData []byte, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
func (sn *storageNode) getTSDBStatusWithFilters(qt *querytracer.Tracer, requestData []byte, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
var status *storage.TSDBStatus
f := func(bc *handshake.BufferedConn) error {
st, err := sn.getTSDBStatusWithFiltersOnConn(bc, requestData, topN)
@ -1816,13 +1885,13 @@ func (sn *storageNode) getTSDBStatusWithFilters(requestData []byte, topN int, de
status = st
return nil
}
if err := sn.execOnConnWithPossibleRetry("tsdbStatusWithFilters_v2", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "tsdbStatusWithFilters_v3", f, deadline); err != nil {
return nil, err
}
return status, nil
}
func (sn *storageNode) getSeriesCount(accountID, projectID uint32, deadline searchutils.Deadline) (uint64, error) {
func (sn *storageNode) getSeriesCount(qt *querytracer.Tracer, accountID, projectID uint32, deadline searchutils.Deadline) (uint64, error) {
var n uint64
f := func(bc *handshake.BufferedConn) error {
nn, err := sn.getSeriesCountOnConn(bc, accountID, projectID)
@ -1832,13 +1901,13 @@ func (sn *storageNode) getSeriesCount(accountID, projectID uint32, deadline sear
n = nn
return nil
}
if err := sn.execOnConnWithPossibleRetry("seriesCount_v3", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "seriesCount_v4", f, deadline); err != nil {
return 0, err
}
return n, nil
}
func (sn *storageNode) processSearchMetricNames(requestData []byte, deadline searchutils.Deadline) ([][]byte, error) {
func (sn *storageNode) processSearchMetricNames(qt *querytracer.Tracer, requestData []byte, deadline searchutils.Deadline) ([][]byte, error) {
var metricNames [][]byte
f := func(bc *handshake.BufferedConn) error {
mns, err := sn.processSearchMetricNamesOnConn(bc, requestData)
@ -1848,24 +1917,27 @@ func (sn *storageNode) processSearchMetricNames(requestData []byte, deadline sea
metricNames = mns
return nil
}
if err := sn.execOnConnWithPossibleRetry("searchMetricNames_v2", f, deadline); err != nil {
if err := sn.execOnConnWithPossibleRetry(qt, "searchMetricNames_v3", f, deadline); err != nil {
return nil, err
}
return metricNames, nil
}
func (sn *storageNode) processSearchQuery(requestData []byte, fetchData bool, processBlock func(mb *storage.MetricBlock) error, deadline searchutils.Deadline) error {
func (sn *storageNode) processSearchQuery(qt *querytracer.Tracer, requestData []byte, fetchData bool,
processBlock func(mb *storage.MetricBlock) error, deadline searchutils.Deadline) error {
f := func(bc *handshake.BufferedConn) error {
if err := sn.processSearchQueryOnConn(bc, requestData, fetchData, processBlock); err != nil {
return err
}
return nil
}
return sn.execOnConnWithPossibleRetry("search_v5", f, deadline)
return sn.execOnConnWithPossibleRetry(qt, "search_v6", f, deadline)
}
func (sn *storageNode) execOnConnWithPossibleRetry(funcName string, f func(bc *handshake.BufferedConn) error, deadline searchutils.Deadline) error {
err := sn.execOnConn(funcName, f, deadline)
func (sn *storageNode) execOnConnWithPossibleRetry(qt *querytracer.Tracer, funcName string, f func(bc *handshake.BufferedConn) error, deadline searchutils.Deadline) error {
qtChild := qt.NewChild()
err := sn.execOnConn(qtChild, funcName, f, deadline)
qtChild.Donef("rpc call %s()", funcName)
if err == nil {
return nil
}
@ -1876,10 +1948,13 @@ func (sn *storageNode) execOnConnWithPossibleRetry(funcName string, f func(bc *h
return err
}
// Repeat the query in the hope the error was temporary.
return sn.execOnConn(funcName, f, deadline)
qtChild = qt.NewChild()
err = sn.execOnConn(qtChild, funcName, f, deadline)
qtChild.Donef("retry rpc call %s() after error", funcName)
return err
}
func (sn *storageNode) execOnConn(rpcName string, f func(bc *handshake.BufferedConn) error, deadline searchutils.Deadline) error {
func (sn *storageNode) execOnConn(qt *querytracer.Tracer, funcName string, f func(bc *handshake.BufferedConn) error, deadline searchutils.Deadline) error {
sn.concurrentQueries.Inc()
defer sn.concurrentQueries.Dec()
@ -1901,22 +1976,30 @@ func (sn *storageNode) execOnConn(rpcName string, f func(bc *handshake.BufferedC
_ = bc.Close()
logger.Panicf("FATAL: cannot set connection deadline: %s", err)
}
if err := writeBytes(bc, []byte(rpcName)); err != nil {
if err := writeBytes(bc, []byte(funcName)); err != nil {
// Close the connection instead of returning it to the pool,
// since it may be broken.
_ = bc.Close()
return fmt.Errorf("cannot send rpcName=%q to the server: %w", rpcName, err)
return fmt.Errorf("cannot send funcName=%q to the server: %w", funcName, err)
}
// Send query trace flag
traceEnabled := qt.Enabled()
if err := writeBool(bc, traceEnabled); err != nil {
// Close the connection instead of returning it to the pool,
// since it may be broken.
_ = bc.Close()
return fmt.Errorf("cannot send traceEnabled=%v for funcName=%q to the server: %w", traceEnabled, funcName, err)
}
// Send the remaining timeout instead of deadline to remote server, since it may have different time.
timeoutSecs := uint32(timeout.Seconds() + 1)
if err := writeUint32(bc, timeoutSecs); err != nil {
// Close the connection instead of returning it to the pool,
// since it may be broken.
_ = bc.Close()
return fmt.Errorf("cannot send timeout=%d for rpcName=%q to the server: %w", timeout, rpcName, err)
return fmt.Errorf("cannot send timeout=%d for funcName=%q to the server: %w", timeout, funcName, err)
}
// Execute the rpc function.
if err := f(bc); err != nil {
remoteAddr := bc.RemoteAddr()
var er *errRemote
@ -1930,15 +2013,36 @@ func (sn *storageNode) execOnConn(rpcName string, f func(bc *handshake.BufferedC
_ = bc.Close()
}
if deadline.Exceeded() {
return fmt.Errorf("cannot execute rpcName=%q on vmstorage %q with timeout %s: %w", rpcName, remoteAddr, deadline.String(), err)
return fmt.Errorf("cannot execute funcName=%q on vmstorage %q with timeout %s: %w", funcName, remoteAddr, deadline.String(), err)
}
return fmt.Errorf("cannot execute rpcName=%q on vmstorage %q: %w", rpcName, remoteAddr, err)
return fmt.Errorf("cannot execute funcName=%q on vmstorage %q: %w", funcName, remoteAddr, err)
}
// Read trace from the response
bb := traceJSONBufPool.Get()
bb.B, err = readBytes(bb.B[:0], bc, maxTraceJSONSize)
if err != nil {
// Close the connection instead of returning it to the pool,
// since it may be broken.
_ = bc.Close()
return fmt.Errorf("cannot read trace for funcName=%q from the server: %w", funcName, err)
}
if err := qt.AddJSON(bb.B); err != nil {
// Close the connection instead of returning it to the pool,
// since it may be broken.
_ = bc.Close()
return fmt.Errorf("cannot read trace for funcName=%q from the server: %w", funcName, err)
}
traceJSONBufPool.Put(bb)
// Return the connection back to the pool, assuming it is healthy.
sn.connPool.Put(bc)
return nil
}
var traceJSONBufPool bytesutil.ByteBufferPool
const maxTraceJSONSize = 1024 * 1024
type errRemote struct {
msg string
}

View file

@ -124,6 +124,11 @@ func (tbf *tmpBlocksFile) WriteBlockData(b []byte) (tmpBlockAddr, error) {
return addr, nil
}
// Len() returnt tbf size in bytes.
func (tbf *tmpBlocksFile) Len() uint64 {
return tbf.offset
}
func (tbf *tmpBlocksFile) Finalize() error {
if tbf.f == nil {
return nil

View file

@ -5,6 +5,7 @@
"time"
"github.com/valyala/quicktemplate"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
) %}
@ -125,8 +126,12 @@
}
{% endfunc %}
{% func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
{% func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) %}
{
{% code
lines := 0
bytesTotal := 0
%}
"status":"success",
"data":{
"resultType":"matrix",
@ -134,18 +139,30 @@
{% code bb, ok := <-resultsCh %}
{% if ok %}
{%z= bb.B %}
{% code quicktemplate.ReleaseByteBuffer(bb) %}
{% code
lines++
bytesTotal += len(bb.B)
quicktemplate.ReleaseByteBuffer(bb)
%}
{% for bb := range resultsCh %}
,{%z= bb.B %}
{% code quicktemplate.ReleaseByteBuffer(bb) %}
{% code
lines++
bytesTotal += len(bb.B)
quicktemplate.ReleaseByteBuffer(bb)
%}
{% endfor %}
{% endif %}
]
}
{% code
qt.Donef("export format=promapi: lines=%d, bytes=%d", lines, bytesTotal)
%}
{%= dumpQueryTrace(qt) %}
}
{% endfunc %}
{% func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
{% func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) %}
{% for bb := range resultsCh %}
{%z= bb.B %}
{% code quicktemplate.ReleaseByteBuffer(bb) %}

View file

@ -11,570 +11,588 @@ import (
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/valyala/quicktemplate"
)
//line app/vmselect/prometheus/export.qtpl:13
//line app/vmselect/prometheus/export.qtpl:14
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vmselect/prometheus/export.qtpl:13
//line app/vmselect/prometheus/export.qtpl:14
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vmselect/prometheus/export.qtpl:13
//line app/vmselect/prometheus/export.qtpl:14
func StreamExportCSVLine(qw422016 *qt422016.Writer, xb *exportBlock, fieldNames []string) {
//line app/vmselect/prometheus/export.qtpl:14
if len(xb.timestamps) == 0 || len(fieldNames) == 0 {
//line app/vmselect/prometheus/export.qtpl:14
return
//line app/vmselect/prometheus/export.qtpl:14
}
//line app/vmselect/prometheus/export.qtpl:15
for i, timestamp := range xb.timestamps {
if len(xb.timestamps) == 0 || len(fieldNames) == 0 {
//line app/vmselect/prometheus/export.qtpl:15
return
//line app/vmselect/prometheus/export.qtpl:15
}
//line app/vmselect/prometheus/export.qtpl:16
for i, timestamp := range xb.timestamps {
//line app/vmselect/prometheus/export.qtpl:17
value := xb.values[i]
//line app/vmselect/prometheus/export.qtpl:17
//line app/vmselect/prometheus/export.qtpl:18
streamexportCSVField(qw422016, xb.mn, fieldNames[0], timestamp, value)
//line app/vmselect/prometheus/export.qtpl:18
//line app/vmselect/prometheus/export.qtpl:19
for _, fieldName := range fieldNames[1:] {
//line app/vmselect/prometheus/export.qtpl:18
//line app/vmselect/prometheus/export.qtpl:19
qw422016.N().S(`,`)
//line app/vmselect/prometheus/export.qtpl:20
streamexportCSVField(qw422016, xb.mn, fieldName, timestamp, value)
//line app/vmselect/prometheus/export.qtpl:21
}
streamexportCSVField(qw422016, xb.mn, fieldName, timestamp, value)
//line app/vmselect/prometheus/export.qtpl:22
}
//line app/vmselect/prometheus/export.qtpl:23
qw422016.N().S(`
`)
//line app/vmselect/prometheus/export.qtpl:23
}
//line app/vmselect/prometheus/export.qtpl:24
}
//line app/vmselect/prometheus/export.qtpl:25
}
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
func WriteExportCSVLine(qq422016 qtio422016.Writer, xb *exportBlock, fieldNames []string) {
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
StreamExportCSVLine(qw422016, xb, fieldNames)
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
}
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
func ExportCSVLine(xb *exportBlock, fieldNames []string) string {
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
WriteExportCSVLine(qb422016, xb, fieldNames)
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
return qs422016
//line app/vmselect/prometheus/export.qtpl:24
//line app/vmselect/prometheus/export.qtpl:25
}
//line app/vmselect/prometheus/export.qtpl:26
func streamexportCSVField(qw422016 *qt422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
//line app/vmselect/prometheus/export.qtpl:27
if fieldName == "__value__" {
func streamexportCSVField(qw422016 *qt422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
//line app/vmselect/prometheus/export.qtpl:28
qw422016.N().F(value)
if fieldName == "__value__" {
//line app/vmselect/prometheus/export.qtpl:29
return
qw422016.N().F(value)
//line app/vmselect/prometheus/export.qtpl:30
}
//line app/vmselect/prometheus/export.qtpl:31
if fieldName == "__timestamp__" {
//line app/vmselect/prometheus/export.qtpl:32
qw422016.N().DL(timestamp)
//line app/vmselect/prometheus/export.qtpl:33
return
//line app/vmselect/prometheus/export.qtpl:34
//line app/vmselect/prometheus/export.qtpl:31
}
//line app/vmselect/prometheus/export.qtpl:32
if fieldName == "__timestamp__" {
//line app/vmselect/prometheus/export.qtpl:33
qw422016.N().DL(timestamp)
//line app/vmselect/prometheus/export.qtpl:34
return
//line app/vmselect/prometheus/export.qtpl:35
if strings.HasPrefix(fieldName, "__timestamp__:") {
}
//line app/vmselect/prometheus/export.qtpl:36
if strings.HasPrefix(fieldName, "__timestamp__:") {
//line app/vmselect/prometheus/export.qtpl:37
timeFormat := fieldName[len("__timestamp__:"):]
//line app/vmselect/prometheus/export.qtpl:37
switch timeFormat {
//line app/vmselect/prometheus/export.qtpl:38
case "unix_s":
switch timeFormat {
//line app/vmselect/prometheus/export.qtpl:39
qw422016.N().DL(timestamp / 1000)
case "unix_s":
//line app/vmselect/prometheus/export.qtpl:40
case "unix_ms":
qw422016.N().DL(timestamp / 1000)
//line app/vmselect/prometheus/export.qtpl:41
qw422016.N().DL(timestamp)
case "unix_ms":
//line app/vmselect/prometheus/export.qtpl:42
case "unix_ns":
qw422016.N().DL(timestamp)
//line app/vmselect/prometheus/export.qtpl:43
qw422016.N().DL(timestamp * 1e6)
case "unix_ns":
//line app/vmselect/prometheus/export.qtpl:44
qw422016.N().DL(timestamp * 1e6)
//line app/vmselect/prometheus/export.qtpl:45
case "rfc3339":
//line app/vmselect/prometheus/export.qtpl:46
//line app/vmselect/prometheus/export.qtpl:47
bb := quicktemplate.AcquireByteBuffer()
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], time.RFC3339)
//line app/vmselect/prometheus/export.qtpl:49
//line app/vmselect/prometheus/export.qtpl:50
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/export.qtpl:51
//line app/vmselect/prometheus/export.qtpl:52
quicktemplate.ReleaseByteBuffer(bb)
//line app/vmselect/prometheus/export.qtpl:53
default:
//line app/vmselect/prometheus/export.qtpl:54
default:
//line app/vmselect/prometheus/export.qtpl:55
if strings.HasPrefix(timeFormat, "custom:") {
//line app/vmselect/prometheus/export.qtpl:56
//line app/vmselect/prometheus/export.qtpl:57
layout := timeFormat[len("custom:"):]
bb := quicktemplate.AcquireByteBuffer()
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], layout)
//line app/vmselect/prometheus/export.qtpl:60
if bytes.ContainsAny(bb.B, `"`+",\n") {
//line app/vmselect/prometheus/export.qtpl:61
qw422016.E().QZ(bb.B)
if bytes.ContainsAny(bb.B, `"`+",\n") {
//line app/vmselect/prometheus/export.qtpl:62
} else {
qw422016.E().QZ(bb.B)
//line app/vmselect/prometheus/export.qtpl:63
qw422016.N().Z(bb.B)
} else {
//line app/vmselect/prometheus/export.qtpl:64
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/export.qtpl:65
}
//line app/vmselect/prometheus/export.qtpl:66
//line app/vmselect/prometheus/export.qtpl:67
quicktemplate.ReleaseByteBuffer(bb)
//line app/vmselect/prometheus/export.qtpl:68
} else {
//line app/vmselect/prometheus/export.qtpl:68
qw422016.N().S(`Unsupported timeFormat=`)
//line app/vmselect/prometheus/export.qtpl:69
qw422016.N().S(timeFormat)
} else {
//line app/vmselect/prometheus/export.qtpl:69
qw422016.N().S(`Unsupported timeFormat=`)
//line app/vmselect/prometheus/export.qtpl:70
}
qw422016.N().S(timeFormat)
//line app/vmselect/prometheus/export.qtpl:71
}
}
//line app/vmselect/prometheus/export.qtpl:72
return
}
//line app/vmselect/prometheus/export.qtpl:73
}
return
//line app/vmselect/prometheus/export.qtpl:74
}
//line app/vmselect/prometheus/export.qtpl:75
v := mn.GetTagValue(fieldName)
//line app/vmselect/prometheus/export.qtpl:75
if bytes.ContainsAny(v, `"`+",\n") {
//line app/vmselect/prometheus/export.qtpl:76
qw422016.N().QZ(v)
if bytes.ContainsAny(v, `"`+",\n") {
//line app/vmselect/prometheus/export.qtpl:77
} else {
qw422016.N().QZ(v)
//line app/vmselect/prometheus/export.qtpl:78
qw422016.N().Z(v)
} else {
//line app/vmselect/prometheus/export.qtpl:79
}
qw422016.N().Z(v)
//line app/vmselect/prometheus/export.qtpl:80
}
//line app/vmselect/prometheus/export.qtpl:81
}
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
func writeexportCSVField(qq422016 qtio422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
streamexportCSVField(qw422016, mn, fieldName, timestamp, value)
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
}
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
func exportCSVField(mn *storage.MetricName, fieldName string, timestamp int64, value float64) string {
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
writeexportCSVField(qb422016, mn, fieldName, timestamp, value)
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
return qs422016
//line app/vmselect/prometheus/export.qtpl:80
//line app/vmselect/prometheus/export.qtpl:81
}
//line app/vmselect/prometheus/export.qtpl:82
//line app/vmselect/prometheus/export.qtpl:83
func StreamExportPrometheusLine(qw422016 *qt422016.Writer, xb *exportBlock) {
//line app/vmselect/prometheus/export.qtpl:83
if len(xb.timestamps) == 0 {
//line app/vmselect/prometheus/export.qtpl:83
return
//line app/vmselect/prometheus/export.qtpl:83
}
//line app/vmselect/prometheus/export.qtpl:84
if len(xb.timestamps) == 0 {
//line app/vmselect/prometheus/export.qtpl:84
return
//line app/vmselect/prometheus/export.qtpl:84
}
//line app/vmselect/prometheus/export.qtpl:85
bb := quicktemplate.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:85
//line app/vmselect/prometheus/export.qtpl:86
writeprometheusMetricName(bb, xb.mn)
//line app/vmselect/prometheus/export.qtpl:86
//line app/vmselect/prometheus/export.qtpl:87
for i, ts := range xb.timestamps {
//line app/vmselect/prometheus/export.qtpl:87
//line app/vmselect/prometheus/export.qtpl:88
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/export.qtpl:87
qw422016.N().S(` `)
//line app/vmselect/prometheus/export.qtpl:88
qw422016.N().S(` `)
//line app/vmselect/prometheus/export.qtpl:89
qw422016.N().F(xb.values[i])
//line app/vmselect/prometheus/export.qtpl:88
//line app/vmselect/prometheus/export.qtpl:89
qw422016.N().S(` `)
//line app/vmselect/prometheus/export.qtpl:89
//line app/vmselect/prometheus/export.qtpl:90
qw422016.N().DL(ts)
//line app/vmselect/prometheus/export.qtpl:89
//line app/vmselect/prometheus/export.qtpl:90
qw422016.N().S(`
`)
//line app/vmselect/prometheus/export.qtpl:90
}
//line app/vmselect/prometheus/export.qtpl:91
}
//line app/vmselect/prometheus/export.qtpl:92
quicktemplate.ReleaseByteBuffer(bb)
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
}
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
func WriteExportPrometheusLine(qq422016 qtio422016.Writer, xb *exportBlock) {
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
StreamExportPrometheusLine(qw422016, xb)
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
}
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
func ExportPrometheusLine(xb *exportBlock) string {
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
WriteExportPrometheusLine(qb422016, xb)
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
return qs422016
//line app/vmselect/prometheus/export.qtpl:92
//line app/vmselect/prometheus/export.qtpl:93
}
//line app/vmselect/prometheus/export.qtpl:94
//line app/vmselect/prometheus/export.qtpl:95
func StreamExportJSONLine(qw422016 *qt422016.Writer, xb *exportBlock) {
//line app/vmselect/prometheus/export.qtpl:95
//line app/vmselect/prometheus/export.qtpl:96
if len(xb.timestamps) == 0 {
//line app/vmselect/prometheus/export.qtpl:95
//line app/vmselect/prometheus/export.qtpl:96
return
//line app/vmselect/prometheus/export.qtpl:95
//line app/vmselect/prometheus/export.qtpl:96
}
//line app/vmselect/prometheus/export.qtpl:95
//line app/vmselect/prometheus/export.qtpl:96
qw422016.N().S(`{"metric":`)
//line app/vmselect/prometheus/export.qtpl:97
//line app/vmselect/prometheus/export.qtpl:98
streammetricNameObject(qw422016, xb.mn)
//line app/vmselect/prometheus/export.qtpl:97
//line app/vmselect/prometheus/export.qtpl:98
qw422016.N().S(`,"values":[`)
//line app/vmselect/prometheus/export.qtpl:99
if len(xb.values) > 0 {
//line app/vmselect/prometheus/export.qtpl:100
if len(xb.values) > 0 {
//line app/vmselect/prometheus/export.qtpl:101
values := xb.values
//line app/vmselect/prometheus/export.qtpl:101
qw422016.N().F(values[0])
//line app/vmselect/prometheus/export.qtpl:102
qw422016.N().F(values[0])
//line app/vmselect/prometheus/export.qtpl:103
values = values[1:]
//line app/vmselect/prometheus/export.qtpl:103
//line app/vmselect/prometheus/export.qtpl:104
for _, v := range values {
//line app/vmselect/prometheus/export.qtpl:103
//line app/vmselect/prometheus/export.qtpl:104
qw422016.N().S(`,`)
//line app/vmselect/prometheus/export.qtpl:104
if math.IsNaN(v) {
//line app/vmselect/prometheus/export.qtpl:104
qw422016.N().S(`null`)
//line app/vmselect/prometheus/export.qtpl:104
} else {
//line app/vmselect/prometheus/export.qtpl:104
qw422016.N().F(v)
//line app/vmselect/prometheus/export.qtpl:104
}
//line app/vmselect/prometheus/export.qtpl:105
if math.IsNaN(v) {
//line app/vmselect/prometheus/export.qtpl:105
qw422016.N().S(`null`)
//line app/vmselect/prometheus/export.qtpl:105
} else {
//line app/vmselect/prometheus/export.qtpl:105
qw422016.N().F(v)
//line app/vmselect/prometheus/export.qtpl:105
}
//line app/vmselect/prometheus/export.qtpl:106
}
//line app/vmselect/prometheus/export.qtpl:106
//line app/vmselect/prometheus/export.qtpl:107
}
//line app/vmselect/prometheus/export.qtpl:106
//line app/vmselect/prometheus/export.qtpl:107
qw422016.N().S(`],"timestamps":[`)
//line app/vmselect/prometheus/export.qtpl:109
if len(xb.timestamps) > 0 {
//line app/vmselect/prometheus/export.qtpl:110
if len(xb.timestamps) > 0 {
//line app/vmselect/prometheus/export.qtpl:111
timestamps := xb.timestamps
//line app/vmselect/prometheus/export.qtpl:111
qw422016.N().DL(timestamps[0])
//line app/vmselect/prometheus/export.qtpl:112
qw422016.N().DL(timestamps[0])
//line app/vmselect/prometheus/export.qtpl:113
timestamps = timestamps[1:]
//line app/vmselect/prometheus/export.qtpl:113
for _, ts := range timestamps {
//line app/vmselect/prometheus/export.qtpl:113
qw422016.N().S(`,`)
//line app/vmselect/prometheus/export.qtpl:114
qw422016.N().DL(ts)
for _, ts := range timestamps {
//line app/vmselect/prometheus/export.qtpl:114
qw422016.N().S(`,`)
//line app/vmselect/prometheus/export.qtpl:115
qw422016.N().DL(ts)
//line app/vmselect/prometheus/export.qtpl:116
}
//line app/vmselect/prometheus/export.qtpl:116
//line app/vmselect/prometheus/export.qtpl:117
}
//line app/vmselect/prometheus/export.qtpl:116
//line app/vmselect/prometheus/export.qtpl:117
qw422016.N().S(`]}`)
//line app/vmselect/prometheus/export.qtpl:118
//line app/vmselect/prometheus/export.qtpl:119
qw422016.N().S(`
`)
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
}
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
func WriteExportJSONLine(qq422016 qtio422016.Writer, xb *exportBlock) {
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
StreamExportJSONLine(qw422016, xb)
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
}
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
func ExportJSONLine(xb *exportBlock) string {
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
WriteExportJSONLine(qb422016, xb)
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
return qs422016
//line app/vmselect/prometheus/export.qtpl:119
//line app/vmselect/prometheus/export.qtpl:120
}
//line app/vmselect/prometheus/export.qtpl:121
//line app/vmselect/prometheus/export.qtpl:122
func StreamExportPromAPILine(qw422016 *qt422016.Writer, xb *exportBlock) {
//line app/vmselect/prometheus/export.qtpl:121
//line app/vmselect/prometheus/export.qtpl:122
qw422016.N().S(`{"metric":`)
//line app/vmselect/prometheus/export.qtpl:123
//line app/vmselect/prometheus/export.qtpl:124
streammetricNameObject(qw422016, xb.mn)
//line app/vmselect/prometheus/export.qtpl:123
//line app/vmselect/prometheus/export.qtpl:124
qw422016.N().S(`,"values":`)
//line app/vmselect/prometheus/export.qtpl:124
//line app/vmselect/prometheus/export.qtpl:125
streamvaluesWithTimestamps(qw422016, xb.values, xb.timestamps)
//line app/vmselect/prometheus/export.qtpl:124
//line app/vmselect/prometheus/export.qtpl:125
qw422016.N().S(`}`)
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
}
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
func WriteExportPromAPILine(qq422016 qtio422016.Writer, xb *exportBlock) {
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
StreamExportPromAPILine(qw422016, xb)
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
}
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
func ExportPromAPILine(xb *exportBlock) string {
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
WriteExportPromAPILine(qb422016, xb)
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
return qs422016
//line app/vmselect/prometheus/export.qtpl:126
//line app/vmselect/prometheus/export.qtpl:127
}
//line app/vmselect/prometheus/export.qtpl:128
func StreamExportPromAPIResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
//line app/vmselect/prometheus/export.qtpl:128
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
//line app/vmselect/prometheus/export.qtpl:129
func StreamExportPromAPIResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/export.qtpl:129
qw422016.N().S(`{`)
//line app/vmselect/prometheus/export.qtpl:132
lines := 0
bytesTotal := 0
//line app/vmselect/prometheus/export.qtpl:134
qw422016.N().S(`"status":"success","data":{"resultType":"matrix","result":[`)
//line app/vmselect/prometheus/export.qtpl:139
bb, ok := <-resultsCh
//line app/vmselect/prometheus/export.qtpl:135
//line app/vmselect/prometheus/export.qtpl:140
if ok {
//line app/vmselect/prometheus/export.qtpl:136
//line app/vmselect/prometheus/export.qtpl:141
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/export.qtpl:137
//line app/vmselect/prometheus/export.qtpl:143
lines++
bytesTotal += len(bb.B)
quicktemplate.ReleaseByteBuffer(bb)
//line app/vmselect/prometheus/export.qtpl:138
//line app/vmselect/prometheus/export.qtpl:147
for bb := range resultsCh {
//line app/vmselect/prometheus/export.qtpl:138
//line app/vmselect/prometheus/export.qtpl:147
qw422016.N().S(`,`)
//line app/vmselect/prometheus/export.qtpl:139
//line app/vmselect/prometheus/export.qtpl:148
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/export.qtpl:140
//line app/vmselect/prometheus/export.qtpl:150
lines++
bytesTotal += len(bb.B)
quicktemplate.ReleaseByteBuffer(bb)
//line app/vmselect/prometheus/export.qtpl:141
//line app/vmselect/prometheus/export.qtpl:154
}
//line app/vmselect/prometheus/export.qtpl:142
//line app/vmselect/prometheus/export.qtpl:155
}
//line app/vmselect/prometheus/export.qtpl:142
qw422016.N().S(`]}}`)
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:155
qw422016.N().S(`]}`)
//line app/vmselect/prometheus/export.qtpl:159
qt.Donef("export format=promapi: lines=%d, bytes=%d", lines, bytesTotal)
//line app/vmselect/prometheus/export.qtpl:161
streamdumpQueryTrace(qw422016, qt)
//line app/vmselect/prometheus/export.qtpl:161
qw422016.N().S(`}`)
//line app/vmselect/prometheus/export.qtpl:163
}
//line app/vmselect/prometheus/export.qtpl:146
func WriteExportPromAPIResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:163
func WriteExportPromAPIResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/export.qtpl:163
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/export.qtpl:146
StreamExportPromAPIResponse(qw422016, resultsCh)
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:163
StreamExportPromAPIResponse(qw422016, resultsCh, qt)
//line app/vmselect/prometheus/export.qtpl:163
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:163
}
//line app/vmselect/prometheus/export.qtpl:146
func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:163
func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) string {
//line app/vmselect/prometheus/export.qtpl:163
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:146
WriteExportPromAPIResponse(qb422016, resultsCh)
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:163
WriteExportPromAPIResponse(qb422016, resultsCh, qt)
//line app/vmselect/prometheus/export.qtpl:163
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:163
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:163
return qs422016
//line app/vmselect/prometheus/export.qtpl:146
//line app/vmselect/prometheus/export.qtpl:163
}
//line app/vmselect/prometheus/export.qtpl:148
func StreamExportStdResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
//line app/vmselect/prometheus/export.qtpl:149
//line app/vmselect/prometheus/export.qtpl:165
func StreamExportStdResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/export.qtpl:166
for bb := range resultsCh {
//line app/vmselect/prometheus/export.qtpl:150
//line app/vmselect/prometheus/export.qtpl:167
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/export.qtpl:151
//line app/vmselect/prometheus/export.qtpl:168
quicktemplate.ReleaseByteBuffer(bb)
//line app/vmselect/prometheus/export.qtpl:152
//line app/vmselect/prometheus/export.qtpl:169
}
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
}
//line app/vmselect/prometheus/export.qtpl:153
func WriteExportStdResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
func WriteExportStdResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/export.qtpl:170
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/export.qtpl:153
StreamExportStdResponse(qw422016, resultsCh)
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
StreamExportStdResponse(qw422016, resultsCh, qt)
//line app/vmselect/prometheus/export.qtpl:170
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
}
//line app/vmselect/prometheus/export.qtpl:153
func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer) string {
//line app/vmselect/prometheus/export.qtpl:170
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:153
WriteExportStdResponse(qb422016, resultsCh)
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
WriteExportStdResponse(qb422016, resultsCh, qt)
//line app/vmselect/prometheus/export.qtpl:170
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
return qs422016
//line app/vmselect/prometheus/export.qtpl:153
//line app/vmselect/prometheus/export.qtpl:170
}
//line app/vmselect/prometheus/export.qtpl:155
//line app/vmselect/prometheus/export.qtpl:172
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
//line app/vmselect/prometheus/export.qtpl:156
//line app/vmselect/prometheus/export.qtpl:173
qw422016.N().Z(mn.MetricGroup)
//line app/vmselect/prometheus/export.qtpl:157
//line app/vmselect/prometheus/export.qtpl:174
if len(mn.Tags) > 0 {
//line app/vmselect/prometheus/export.qtpl:157
//line app/vmselect/prometheus/export.qtpl:174
qw422016.N().S(`{`)
//line app/vmselect/prometheus/export.qtpl:159
//line app/vmselect/prometheus/export.qtpl:176
tags := mn.Tags
//line app/vmselect/prometheus/export.qtpl:160
//line app/vmselect/prometheus/export.qtpl:177
qw422016.N().Z(tags[0].Key)
//line app/vmselect/prometheus/export.qtpl:160
//line app/vmselect/prometheus/export.qtpl:177
qw422016.N().S(`=`)
//line app/vmselect/prometheus/export.qtpl:160
//line app/vmselect/prometheus/export.qtpl:177
qw422016.N().QZ(tags[0].Value)
//line app/vmselect/prometheus/export.qtpl:161
//line app/vmselect/prometheus/export.qtpl:178
tags = tags[1:]
//line app/vmselect/prometheus/export.qtpl:162
//line app/vmselect/prometheus/export.qtpl:179
for i := range tags {
//line app/vmselect/prometheus/export.qtpl:163
//line app/vmselect/prometheus/export.qtpl:180
tag := &tags[i]
//line app/vmselect/prometheus/export.qtpl:163
//line app/vmselect/prometheus/export.qtpl:180
qw422016.N().S(`,`)
//line app/vmselect/prometheus/export.qtpl:164
//line app/vmselect/prometheus/export.qtpl:181
qw422016.N().Z(tag.Key)
//line app/vmselect/prometheus/export.qtpl:164
//line app/vmselect/prometheus/export.qtpl:181
qw422016.N().S(`=`)
//line app/vmselect/prometheus/export.qtpl:164
//line app/vmselect/prometheus/export.qtpl:181
qw422016.N().QZ(tag.Value)
//line app/vmselect/prometheus/export.qtpl:165
//line app/vmselect/prometheus/export.qtpl:182
}
//line app/vmselect/prometheus/export.qtpl:165
//line app/vmselect/prometheus/export.qtpl:182
qw422016.N().S(`}`)
//line app/vmselect/prometheus/export.qtpl:167
//line app/vmselect/prometheus/export.qtpl:184
}
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
}
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
func writeprometheusMetricName(qq422016 qtio422016.Writer, mn *storage.MetricName) {
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
streamprometheusMetricName(qw422016, mn)
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
}
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
func prometheusMetricName(mn *storage.MetricName) string {
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
writeprometheusMetricName(qb422016, mn)
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
return qs422016
//line app/vmselect/prometheus/export.qtpl:168
//line app/vmselect/prometheus/export.qtpl:185
}

View file

@ -1,7 +1,12 @@
{% stripspace %}
{% import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
) %}
LabelValuesResponse generates response for /api/v1/label/<labelName>/values .
See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
{% func LabelValuesResponse(isPartial bool, labelValues []string) %}
{% func LabelValuesResponse(isPartial bool, labelValues []string, qt *querytracer.Tracer, qtDone func()) %}
{
"status":"success",
"isPartial":{% if isPartial %}true{% else %}false{% endif %},
@ -11,6 +16,11 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-va
{% if i+1 < len(labelValues) %},{% endif %}
{% endfor %}
]
{% code
qt.Printf("generate response for %d label values", len(labelValues))
qtDone()
%}
{%= dumpQueryTrace(qt) %}
}
{% endfunc %}
{% endstripspace %}

View file

@ -1,79 +1,92 @@
// Code generated by qtc from "label_values_response.qtpl". DO NOT EDIT.
// See https://github.com/valyala/quicktemplate for details.
// LabelValuesResponse generates response for /api/v1/label/<labelName>/values .See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
//line app/vmselect/prometheus/label_values_response.qtpl:4
//line app/vmselect/prometheus/label_values_response.qtpl:3
package prometheus
//line app/vmselect/prometheus/label_values_response.qtpl:4
//line app/vmselect/prometheus/label_values_response.qtpl:3
import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
)
// LabelValuesResponse generates response for /api/v1/label/<labelName>/values .See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
//line app/vmselect/prometheus/label_values_response.qtpl:9
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vmselect/prometheus/label_values_response.qtpl:4
//line app/vmselect/prometheus/label_values_response.qtpl:9
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vmselect/prometheus/label_values_response.qtpl:4
func StreamLabelValuesResponse(qw422016 *qt422016.Writer, isPartial bool, labelValues []string) {
//line app/vmselect/prometheus/label_values_response.qtpl:4
qw422016.N().S(`{"status":"success","isPartial":`)
//line app/vmselect/prometheus/label_values_response.qtpl:7
if isPartial {
//line app/vmselect/prometheus/label_values_response.qtpl:7
qw422016.N().S(`true`)
//line app/vmselect/prometheus/label_values_response.qtpl:7
} else {
//line app/vmselect/prometheus/label_values_response.qtpl:7
qw422016.N().S(`false`)
//line app/vmselect/prometheus/label_values_response.qtpl:7
}
//line app/vmselect/prometheus/label_values_response.qtpl:7
qw422016.N().S(`,"data":[`)
//line app/vmselect/prometheus/label_values_response.qtpl:9
for i, labelValue := range labelValues {
//line app/vmselect/prometheus/label_values_response.qtpl:10
qw422016.N().Q(labelValue)
//line app/vmselect/prometheus/label_values_response.qtpl:11
if i+1 < len(labelValues) {
//line app/vmselect/prometheus/label_values_response.qtpl:11
qw422016.N().S(`,`)
//line app/vmselect/prometheus/label_values_response.qtpl:11
}
func StreamLabelValuesResponse(qw422016 *qt422016.Writer, isPartial bool, labelValues []string, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/label_values_response.qtpl:9
qw422016.N().S(`{"status":"success","isPartial":`)
//line app/vmselect/prometheus/label_values_response.qtpl:12
if isPartial {
//line app/vmselect/prometheus/label_values_response.qtpl:12
qw422016.N().S(`true`)
//line app/vmselect/prometheus/label_values_response.qtpl:12
} else {
//line app/vmselect/prometheus/label_values_response.qtpl:12
qw422016.N().S(`false`)
//line app/vmselect/prometheus/label_values_response.qtpl:12
}
//line app/vmselect/prometheus/label_values_response.qtpl:12
qw422016.N().S(`]}`)
qw422016.N().S(`,"data":[`)
//line app/vmselect/prometheus/label_values_response.qtpl:14
for i, labelValue := range labelValues {
//line app/vmselect/prometheus/label_values_response.qtpl:15
qw422016.N().Q(labelValue)
//line app/vmselect/prometheus/label_values_response.qtpl:16
if i+1 < len(labelValues) {
//line app/vmselect/prometheus/label_values_response.qtpl:16
qw422016.N().S(`,`)
//line app/vmselect/prometheus/label_values_response.qtpl:16
}
//line app/vmselect/prometheus/label_values_response.qtpl:17
}
//line app/vmselect/prometheus/label_values_response.qtpl:17
qw422016.N().S(`]`)
//line app/vmselect/prometheus/label_values_response.qtpl:20
qt.Printf("generate response for %d label values", len(labelValues))
qtDone()
//line app/vmselect/prometheus/label_values_response.qtpl:23
streamdumpQueryTrace(qw422016, qt)
//line app/vmselect/prometheus/label_values_response.qtpl:23
qw422016.N().S(`}`)
//line app/vmselect/prometheus/label_values_response.qtpl:25
}
//line app/vmselect/prometheus/label_values_response.qtpl:15
func WriteLabelValuesResponse(qq422016 qtio422016.Writer, isPartial bool, labelValues []string) {
//line app/vmselect/prometheus/label_values_response.qtpl:15
//line app/vmselect/prometheus/label_values_response.qtpl:25
func WriteLabelValuesResponse(qq422016 qtio422016.Writer, isPartial bool, labelValues []string, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/label_values_response.qtpl:25
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/label_values_response.qtpl:15
StreamLabelValuesResponse(qw422016, isPartial, labelValues)
//line app/vmselect/prometheus/label_values_response.qtpl:15
//line app/vmselect/prometheus/label_values_response.qtpl:25
StreamLabelValuesResponse(qw422016, isPartial, labelValues, qt, qtDone)
//line app/vmselect/prometheus/label_values_response.qtpl:25
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/label_values_response.qtpl:15
//line app/vmselect/prometheus/label_values_response.qtpl:25
}
//line app/vmselect/prometheus/label_values_response.qtpl:15
func LabelValuesResponse(isPartial bool, labelValues []string) string {
//line app/vmselect/prometheus/label_values_response.qtpl:15
//line app/vmselect/prometheus/label_values_response.qtpl:25
func LabelValuesResponse(isPartial bool, labelValues []string, qt *querytracer.Tracer, qtDone func()) string {
//line app/vmselect/prometheus/label_values_response.qtpl:25
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/label_values_response.qtpl:15
WriteLabelValuesResponse(qb422016, isPartial, labelValues)
//line app/vmselect/prometheus/label_values_response.qtpl:15
//line app/vmselect/prometheus/label_values_response.qtpl:25
WriteLabelValuesResponse(qb422016, isPartial, labelValues, qt, qtDone)
//line app/vmselect/prometheus/label_values_response.qtpl:25
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/label_values_response.qtpl:15
//line app/vmselect/prometheus/label_values_response.qtpl:25
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/label_values_response.qtpl:15
//line app/vmselect/prometheus/label_values_response.qtpl:25
return qs422016
//line app/vmselect/prometheus/label_values_response.qtpl:15
//line app/vmselect/prometheus/label_values_response.qtpl:25
}

View file

@ -1,7 +1,12 @@
{% stripspace %}
{% import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
) %}
LabelsResponse generates response for /api/v1/labels .
See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
{% func LabelsResponse(isPartial bool, labels []string) %}
{% func LabelsResponse(isPartial bool, labels []string, qt *querytracer.Tracer, qtDone func()) %}
{
"status":"success",
"isPartial":{% if isPartial %}true{% else %}false{% endif %},
@ -11,6 +16,11 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-nam
{% if i+1 < len(labels) %},{% endif %}
{% endfor %}
]
{% code
qt.Printf("generate response for %d labels", len(labels))
qtDone()
%}
{%= dumpQueryTrace(qt) %}
}
{% endfunc %}
{% endstripspace %}

View file

@ -1,79 +1,92 @@
// Code generated by qtc from "labels_response.qtpl". DO NOT EDIT.
// See https://github.com/valyala/quicktemplate for details.
// LabelsResponse generates response for /api/v1/labels .See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
//line app/vmselect/prometheus/labels_response.qtpl:4
//line app/vmselect/prometheus/labels_response.qtpl:3
package prometheus
//line app/vmselect/prometheus/labels_response.qtpl:4
//line app/vmselect/prometheus/labels_response.qtpl:3
import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
)
// LabelsResponse generates response for /api/v1/labels .See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
//line app/vmselect/prometheus/labels_response.qtpl:9
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vmselect/prometheus/labels_response.qtpl:4
//line app/vmselect/prometheus/labels_response.qtpl:9
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vmselect/prometheus/labels_response.qtpl:4
func StreamLabelsResponse(qw422016 *qt422016.Writer, isPartial bool, labels []string) {
//line app/vmselect/prometheus/labels_response.qtpl:4
qw422016.N().S(`{"status":"success","isPartial":`)
//line app/vmselect/prometheus/labels_response.qtpl:7
if isPartial {
//line app/vmselect/prometheus/labels_response.qtpl:7
qw422016.N().S(`true`)
//line app/vmselect/prometheus/labels_response.qtpl:7
} else {
//line app/vmselect/prometheus/labels_response.qtpl:7
qw422016.N().S(`false`)
//line app/vmselect/prometheus/labels_response.qtpl:7
}
//line app/vmselect/prometheus/labels_response.qtpl:7
qw422016.N().S(`,"data":[`)
//line app/vmselect/prometheus/labels_response.qtpl:9
for i, label := range labels {
//line app/vmselect/prometheus/labels_response.qtpl:10
qw422016.N().Q(label)
//line app/vmselect/prometheus/labels_response.qtpl:11
if i+1 < len(labels) {
//line app/vmselect/prometheus/labels_response.qtpl:11
qw422016.N().S(`,`)
//line app/vmselect/prometheus/labels_response.qtpl:11
}
func StreamLabelsResponse(qw422016 *qt422016.Writer, isPartial bool, labels []string, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/labels_response.qtpl:9
qw422016.N().S(`{"status":"success","isPartial":`)
//line app/vmselect/prometheus/labels_response.qtpl:12
if isPartial {
//line app/vmselect/prometheus/labels_response.qtpl:12
qw422016.N().S(`true`)
//line app/vmselect/prometheus/labels_response.qtpl:12
} else {
//line app/vmselect/prometheus/labels_response.qtpl:12
qw422016.N().S(`false`)
//line app/vmselect/prometheus/labels_response.qtpl:12
}
//line app/vmselect/prometheus/labels_response.qtpl:12
qw422016.N().S(`]}`)
qw422016.N().S(`,"data":[`)
//line app/vmselect/prometheus/labels_response.qtpl:14
for i, label := range labels {
//line app/vmselect/prometheus/labels_response.qtpl:15
qw422016.N().Q(label)
//line app/vmselect/prometheus/labels_response.qtpl:16
if i+1 < len(labels) {
//line app/vmselect/prometheus/labels_response.qtpl:16
qw422016.N().S(`,`)
//line app/vmselect/prometheus/labels_response.qtpl:16
}
//line app/vmselect/prometheus/labels_response.qtpl:17
}
//line app/vmselect/prometheus/labels_response.qtpl:17
qw422016.N().S(`]`)
//line app/vmselect/prometheus/labels_response.qtpl:20
qt.Printf("generate response for %d labels", len(labels))
qtDone()
//line app/vmselect/prometheus/labels_response.qtpl:23
streamdumpQueryTrace(qw422016, qt)
//line app/vmselect/prometheus/labels_response.qtpl:23
qw422016.N().S(`}`)
//line app/vmselect/prometheus/labels_response.qtpl:25
}
//line app/vmselect/prometheus/labels_response.qtpl:15
func WriteLabelsResponse(qq422016 qtio422016.Writer, isPartial bool, labels []string) {
//line app/vmselect/prometheus/labels_response.qtpl:15
//line app/vmselect/prometheus/labels_response.qtpl:25
func WriteLabelsResponse(qq422016 qtio422016.Writer, isPartial bool, labels []string, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/labels_response.qtpl:25
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/labels_response.qtpl:15
StreamLabelsResponse(qw422016, isPartial, labels)
//line app/vmselect/prometheus/labels_response.qtpl:15
//line app/vmselect/prometheus/labels_response.qtpl:25
StreamLabelsResponse(qw422016, isPartial, labels, qt, qtDone)
//line app/vmselect/prometheus/labels_response.qtpl:25
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/labels_response.qtpl:15
//line app/vmselect/prometheus/labels_response.qtpl:25
}
//line app/vmselect/prometheus/labels_response.qtpl:15
func LabelsResponse(isPartial bool, labels []string) string {
//line app/vmselect/prometheus/labels_response.qtpl:15
//line app/vmselect/prometheus/labels_response.qtpl:25
func LabelsResponse(isPartial bool, labels []string, qt *querytracer.Tracer, qtDone func()) string {
//line app/vmselect/prometheus/labels_response.qtpl:25
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/labels_response.qtpl:15
WriteLabelsResponse(qb422016, isPartial, labels)
//line app/vmselect/prometheus/labels_response.qtpl:15
//line app/vmselect/prometheus/labels_response.qtpl:25
WriteLabelsResponse(qb422016, isPartial, labels, qt, qtDone)
//line app/vmselect/prometheus/labels_response.qtpl:25
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/labels_response.qtpl:15
//line app/vmselect/prometheus/labels_response.qtpl:25
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/labels_response.qtpl:15
//line app/vmselect/prometheus/labels_response.qtpl:25
return qs422016
//line app/vmselect/prometheus/labels_response.qtpl:15
//line app/vmselect/prometheus/labels_response.qtpl:25
}

View file

@ -25,6 +25,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson/fastfloat"
@ -89,7 +90,7 @@ func FederateHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter,
}
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, start, end, tagFilterss, *maxFederateSeries)
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
rss, isPartial, err := netstorage.ProcessSearchQuery(at, denyPartialResponse, sq, true, deadline)
rss, isPartial, err := netstorage.ProcessSearchQuery(nil, at, denyPartialResponse, sq, true, deadline)
if err != nil {
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
@ -100,7 +101,7 @@ func FederateHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter,
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw)
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
err = rss.RunParallel(nil, func(rs *netstorage.Result, workerID uint) error {
if err := bw.Error(); err != nil {
return err
}
@ -158,12 +159,12 @@ func ExportCSVHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter
// Unconditionally deny partial response for the exported data,
// since users usually expect that the exported data is full.
denyPartialResponse := true
rss, _, err := netstorage.ProcessSearchQuery(at, denyPartialResponse, sq, true, ep.deadline)
rss, _, err := netstorage.ProcessSearchQuery(nil, at, denyPartialResponse, sq, true, ep.deadline)
if err != nil {
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
go func() {
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
err := rss.RunParallel(nil, func(rs *netstorage.Result, workerID uint) error {
if err := bw.Error(); err != nil {
return err
}
@ -181,7 +182,7 @@ func ExportCSVHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter
}()
} else {
go func() {
err := netstorage.ExportBlocks(at, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
err := netstorage.ExportBlocks(nil, at, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
if err := bw.Error(); err != nil {
return err
}
@ -242,7 +243,7 @@ func ExportNativeHandler(startTime time.Time, at *auth.Token, w http.ResponseWri
_, _ = bw.Write(trBuf)
// Marshal native blocks.
err = netstorage.ExportBlocks(at, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
err = netstorage.ExportBlocks(nil, at, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
if err := bw.Error(); err != nil {
return err
}
@ -297,7 +298,7 @@ func ExportHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
format := r.FormValue("format")
maxRowsPerLine := int(fastfloat.ParseInt64BestEffort(r.FormValue("max_rows_per_line")))
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
if err := exportHandler(at, w, ep, format, maxRowsPerLine, reduceMemUsage); err != nil {
if err := exportHandler(nil, at, w, ep, format, maxRowsPerLine, reduceMemUsage); err != nil {
return fmt.Errorf("error when exporting data on the time range (start=%d, end=%d): %w", ep.start, ep.end, err)
}
return nil
@ -305,7 +306,7 @@ func ExportHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
func exportHandler(at *auth.Token, w http.ResponseWriter, ep *exportParams, format string, maxRowsPerLine int, reduceMemUsage bool) error {
func exportHandler(qt *querytracer.Tracer, at *auth.Token, w http.ResponseWriter, ep *exportParams, format string, maxRowsPerLine int, reduceMemUsage bool) error {
writeResponseFunc := WriteExportStdResponse
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
bb := quicktemplate.AcquireByteBuffer()
@ -370,12 +371,13 @@ func exportHandler(at *auth.Token, w http.ResponseWriter, ep *exportParams, form
// Unconditionally deny partial response for the exported data,
// since users usually expect that the exported data is full.
denyPartialResponse := true
rss, _, err := netstorage.ProcessSearchQuery(at, denyPartialResponse, sq, true, ep.deadline)
rss, _, err := netstorage.ProcessSearchQuery(qt, at, denyPartialResponse, sq, true, ep.deadline)
if err != nil {
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
qtChild := qt.NewChild()
go func() {
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
err := rss.RunParallel(qtChild, func(rs *netstorage.Result, workerID uint) error {
if err := bw.Error(); err != nil {
return err
}
@ -388,12 +390,14 @@ func exportHandler(at *auth.Token, w http.ResponseWriter, ep *exportParams, form
exportBlockPool.Put(xb)
return nil
})
qtChild.Donef("background export format=%s", format)
close(resultsCh)
doneCh <- err
}()
} else {
qtChild := qt.NewChild()
go func() {
err := netstorage.ExportBlocks(at, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
err := netstorage.ExportBlocks(qtChild, at, sq, ep.deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
if err := bw.Error(); err != nil {
return err
}
@ -410,13 +414,14 @@ func exportHandler(at *auth.Token, w http.ResponseWriter, ep *exportParams, form
exportBlockPool.Put(xb)
return nil
})
qtChild.Donef("background export format=%s", format)
close(resultsCh)
doneCh <- err
}()
}
// writeResponseFunc must consume all the data from resultsCh.
writeResponseFunc(bw, resultsCh)
writeResponseFunc(bw, resultsCh, qt)
if err := bw.Flush(); err != nil {
return err
}
@ -464,7 +469,7 @@ func DeleteHandler(startTime time.Time, at *auth.Token, r *http.Request) error {
}
ct := startTime.UnixNano() / 1e6
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, 0, ct, tagFilterss, 0)
deletedCount, err := netstorage.DeleteSeries(at, sq, deadline)
deletedCount, err := netstorage.DeleteSeries(nil, at, sq, deadline)
if err != nil {
return fmt.Errorf("cannot delete time series: %w", err)
}
@ -524,7 +529,7 @@ var httpClient = &http.Client{
// LabelValuesHandler processes /api/v1/label/<labelName>/values request.
//
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w http.ResponseWriter, r *http.Request) error {
func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, labelName string, w http.ResponseWriter, r *http.Request) error {
defer labelValuesDuration.UpdateDuration(startTime)
deadline := searchutils.GetDeadlineForQuery(r, startTime)
@ -542,7 +547,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
if len(matches) == 0 && len(etfs) == 0 {
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
var err error
labelValues, isPartial, err = netstorage.GetLabelValues(at, denyPartialResponse, labelName, deadline)
labelValues, isPartial, err = netstorage.GetLabelValues(qt, at, denyPartialResponse, labelName, deadline)
if err != nil {
return fmt.Errorf(`cannot obtain label values for %q: %w`, labelName, err)
}
@ -560,7 +565,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
MinTimestamp: start,
MaxTimestamp: end,
}
labelValues, isPartial, err = netstorage.GetLabelValuesOnTimeRange(at, denyPartialResponse, labelName, tr, deadline)
labelValues, isPartial, err = netstorage.GetLabelValuesOnTimeRange(qt, at, denyPartialResponse, labelName, tr, deadline)
if err != nil {
return fmt.Errorf(`cannot obtain label values on time range for %q: %w`, labelName, err)
}
@ -582,7 +587,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
if err != nil {
return err
}
labelValues, isPartial, err = labelValuesWithMatches(at, denyPartialResponse, labelName, matches, etfs, start, end, deadline)
labelValues, isPartial, err = labelValuesWithMatches(qt, at, denyPartialResponse, labelName, matches, etfs, start, end, deadline)
if err != nil {
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
}
@ -591,14 +596,17 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
w.Header().Set("Content-Type", "application/json")
bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw)
WriteLabelValuesResponse(bw, isPartial, labelValues)
qtDone := func() {
qt.Donef("/api/v1/labels")
}
WriteLabelValuesResponse(bw, isPartial, labelValues, qt, qtDone)
if err := bw.Flush(); err != nil {
return fmt.Errorf("canot flush label values to remote client: %w", err)
}
return nil
}
func labelValuesWithMatches(at *auth.Token, denyPartialResponse bool, labelName string, matches []string, etfs [][]storage.TagFilter,
func labelValuesWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, labelName string, matches []string, etfs [][]storage.TagFilter,
start, end int64, deadline searchutils.Deadline) ([]string, bool, error) {
tagFilterss, err := getTagFilterssFromMatches(matches)
if err != nil {
@ -629,7 +637,7 @@ func labelValuesWithMatches(at *auth.Token, denyPartialResponse bool, labelName
isPartial := false
if end-start > 24*3600*1000 {
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
mns, isPartialResponse, err := netstorage.SearchMetricNames(at, denyPartialResponse, sq, deadline)
mns, isPartialResponse, err := netstorage.SearchMetricNames(qt, at, denyPartialResponse, sq, deadline)
if err != nil {
return nil, false, fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
}
@ -642,13 +650,13 @@ func labelValuesWithMatches(at *auth.Token, denyPartialResponse bool, labelName
m[string(labelValue)] = struct{}{}
}
} else {
rss, isPartialResponse, err := netstorage.ProcessSearchQuery(at, denyPartialResponse, sq, false, deadline)
rss, isPartialResponse, err := netstorage.ProcessSearchQuery(qt, at, denyPartialResponse, sq, false, deadline)
if err != nil {
return nil, false, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
isPartial = isPartialResponse
var mLock sync.Mutex
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
err = rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
labelValue := rs.MetricName.GetTagValue(labelName)
if len(labelValue) == 0 {
return nil
@ -667,6 +675,7 @@ func labelValuesWithMatches(at *auth.Token, denyPartialResponse bool, labelName
labelValues = append(labelValues, labelValue)
}
sort.Strings(labelValues)
qt.Printf("sort %d label values", len(labelValues))
return labelValues, isPartial, nil
}
@ -678,7 +687,7 @@ func LabelsCountHandler(startTime time.Time, at *auth.Token, w http.ResponseWrit
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
labelEntries, isPartial, err := netstorage.GetLabelEntries(at, denyPartialResponse, deadline)
labelEntries, isPartial, err := netstorage.GetLabelEntries(nil, at, denyPartialResponse, deadline)
if err != nil {
return fmt.Errorf(`cannot obtain label entries: %w`, err)
}
@ -743,7 +752,7 @@ func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
var status *storage.TSDBStatus
var isPartial bool
if len(matches) == 0 && len(etfs) == 0 {
status, isPartial, err = netstorage.GetTSDBStatusForDate(at, denyPartialResponse, deadline, date, topN, *maxTSDBStatusSeries)
status, isPartial, err = netstorage.GetTSDBStatusForDate(nil, at, denyPartialResponse, deadline, date, topN, *maxTSDBStatusSeries)
if err != nil {
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
}
@ -776,7 +785,7 @@ func tsdbStatusWithMatches(at *auth.Token, denyPartialResponse bool, matches []s
start := int64(date*secsPerDay) * 1000
end := int64(date*secsPerDay+secsPerDay) * 1000
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, start, end, tagFilterss, maxMetrics)
status, isPartial, err := netstorage.GetTSDBStatusWithFilters(at, denyPartialResponse, deadline, sq, topN)
status, isPartial, err := netstorage.GetTSDBStatusWithFilters(nil, at, denyPartialResponse, deadline, sq, topN)
if err != nil {
return nil, false, err
}
@ -788,7 +797,7 @@ var tsdbStatusDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/
// LabelsHandler processes /api/v1/labels request.
//
// See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
defer labelsDuration.UpdateDuration(startTime)
deadline := searchutils.GetDeadlineForQuery(r, startTime)
@ -806,7 +815,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
if len(matches) == 0 && len(etfs) == 0 {
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
var err error
labels, isPartial, err = netstorage.GetLabels(at, denyPartialResponse, deadline)
labels, isPartial, err = netstorage.GetLabels(qt, at, denyPartialResponse, deadline)
if err != nil {
return fmt.Errorf("cannot obtain labels: %w", err)
}
@ -824,7 +833,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
MinTimestamp: start,
MaxTimestamp: end,
}
labels, isPartial, err = netstorage.GetLabelsOnTimeRange(at, denyPartialResponse, tr, deadline)
labels, isPartial, err = netstorage.GetLabelsOnTimeRange(qt, at, denyPartialResponse, tr, deadline)
if err != nil {
return fmt.Errorf("cannot obtain labels on time range: %w", err)
}
@ -844,7 +853,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
if err != nil {
return err
}
labels, isPartial, err = labelsWithMatches(at, denyPartialResponse, matches, etfs, start, end, deadline)
labels, isPartial, err = labelsWithMatches(qt, at, denyPartialResponse, matches, etfs, start, end, deadline)
if err != nil {
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
}
@ -853,14 +862,18 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
w.Header().Set("Content-Type", "application/json")
bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw)
WriteLabelsResponse(bw, isPartial, labels)
qtDone := func() {
qt.Donef("/api/v1/labels")
}
WriteLabelsResponse(bw, isPartial, labels, qt, qtDone)
if err := bw.Flush(); err != nil {
return fmt.Errorf("cannot send labels response to remote client: %w", err)
}
return nil
}
func labelsWithMatches(at *auth.Token, denyPartialResponse bool, matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, bool, error) {
func labelsWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, matches []string, etfs [][]storage.TagFilter,
start, end int64, deadline searchutils.Deadline) ([]string, bool, error) {
tagFilterss, err := getTagFilterssFromMatches(matches)
if err != nil {
return nil, false, err
@ -877,7 +890,7 @@ func labelsWithMatches(at *auth.Token, denyPartialResponse bool, matches []strin
isPartial := false
if end-start > 24*3600*1000 {
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
mns, isPartialResponse, err := netstorage.SearchMetricNames(at, denyPartialResponse, sq, deadline)
mns, isPartialResponse, err := netstorage.SearchMetricNames(qt, at, denyPartialResponse, sq, deadline)
if err != nil {
return nil, false, fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
}
@ -891,13 +904,13 @@ func labelsWithMatches(at *auth.Token, denyPartialResponse bool, matches []strin
m["__name__"] = struct{}{}
}
} else {
rss, isPartialResponse, err := netstorage.ProcessSearchQuery(at, denyPartialResponse, sq, false, deadline)
rss, isPartialResponse, err := netstorage.ProcessSearchQuery(qt, at, denyPartialResponse, sq, false, deadline)
if err != nil {
return nil, false, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
isPartial = isPartialResponse
var mLock sync.Mutex
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
err = rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
mLock.Lock()
for _, tag := range rs.MetricName.Tags {
m[string(tag.Key)] = struct{}{}
@ -915,6 +928,7 @@ func labelsWithMatches(at *auth.Token, denyPartialResponse bool, matches []strin
labels = append(labels, label)
}
sort.Strings(labels)
qt.Printf("sort %d labels", len(labels))
return labels, isPartial, nil
}
@ -926,7 +940,7 @@ func SeriesCountHandler(startTime time.Time, at *auth.Token, w http.ResponseWrit
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
n, isPartial, err := netstorage.GetSeriesCount(at, denyPartialResponse, deadline)
n, isPartial, err := netstorage.GetSeriesCount(nil, at, denyPartialResponse, deadline)
if err != nil {
return fmt.Errorf("cannot obtain series count: %w", err)
}
@ -946,7 +960,7 @@ var seriesCountDuration = metrics.NewSummary(`vm_request_duration_seconds{path="
// SeriesHandler processes /api/v1/series request.
//
// See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
func SeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
defer seriesDuration.UpdateDuration(startTime)
ct := startTime.UnixNano() / 1e6
@ -976,10 +990,13 @@ func SeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
end = start + defaultStep
}
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, start, end, tagFilterss, *maxSeriesLimit)
qtDone := func() {
qt.Donef("/api/v1/series: start=%d, end=%d", start, end)
}
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
if end-start > 24*3600*1000 {
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
mns, isPartial, err := netstorage.SearchMetricNames(at, denyPartialResponse, sq, deadline)
mns, isPartial, err := netstorage.SearchMetricNames(qt, at, denyPartialResponse, sq, deadline)
if err != nil {
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
}
@ -996,14 +1013,14 @@ func SeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
close(resultsCh)
}()
// WriteSeriesResponse must consume all the data from resultsCh.
WriteSeriesResponse(bw, isPartial, resultsCh)
WriteSeriesResponse(bw, isPartial, resultsCh, qt, qtDone)
if err := bw.Flush(); err != nil {
return err
}
seriesDuration.UpdateDuration(startTime)
return nil
}
rss, isPartial, err := netstorage.ProcessSearchQuery(at, denyPartialResponse, sq, false, deadline)
rss, isPartial, err := netstorage.ProcessSearchQuery(qt, at, denyPartialResponse, sq, false, deadline)
if err != nil {
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
@ -1014,7 +1031,7 @@ func SeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
resultsCh := make(chan *quicktemplate.ByteBuffer)
doneCh := make(chan error)
go func() {
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
err := rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
if err := bw.Error(); err != nil {
return err
}
@ -1027,7 +1044,7 @@ func SeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
doneCh <- err
}()
// WriteSeriesResponse must consume all the data from resultsCh.
WriteSeriesResponse(bw, isPartial, resultsCh)
WriteSeriesResponse(bw, isPartial, resultsCh, qt, qtDone)
if err := bw.Flush(); err != nil {
return fmt.Errorf("cannot flush series response to remote client: %w", err)
}
@ -1043,10 +1060,11 @@ var seriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/
// QueryHandler processes /api/v1/query request.
//
// See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
func QueryHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
defer queryDuration.UpdateDuration(startTime)
ct := startTime.UnixNano() / 1e6
mayCache := !searchutils.GetBool(r, "nocache")
query := r.FormValue("query")
if len(query) == 0 {
return fmt.Errorf("missing `query` arg")
@ -1099,7 +1117,7 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
end: end,
filterss: filterss,
}
if err := exportHandler(at, w, ep, "promapi", 0, false); err != nil {
if err := exportHandler(qt, at, w, ep, "promapi", 0, false); err != nil {
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
}
queryDuration.UpdateDuration(startTime)
@ -1115,7 +1133,7 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
start -= offset
end := start
start = end - window
if err := queryRangeHandler(startTime, at, w, childQuery, start, end, step, r, ct, etfs); err != nil {
if err := queryRangeHandler(qt, startTime, at, w, childQuery, start, end, step, r, ct, etfs); err != nil {
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
}
queryDuration.UpdateDuration(startTime)
@ -1140,13 +1158,14 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
MaxSeries: *maxUniqueTimeseries,
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
Deadline: deadline,
MayCache: mayCache,
LookbackDelta: lookbackDelta,
RoundDigits: getRoundDigits(r),
EnforcedTagFilterss: etfs,
DenyPartialResponse: searchutils.GetDenyPartialResponse(r),
}
result, err := promql.Exec(&ec, query, true)
result, err := promql.Exec(qt, &ec, query, true)
if err != nil {
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %w", query, start, step, err)
}
@ -1162,7 +1181,10 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
w.Header().Set("Content-Type", "application/json")
bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw)
WriteQueryResponse(bw, ec.IsPartialResponse, result)
qtDone := func() {
qt.Donef("/api/v1/query: query=%s, time=%d: series=%d", query, start, len(result))
}
WriteQueryResponse(bw, ec.IsPartialResponse, result, qt, qtDone)
if err := bw.Flush(); err != nil {
return fmt.Errorf("cannot flush query response to remote client: %w", err)
}
@ -1174,7 +1196,7 @@ var queryDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v
// QueryRangeHandler processes /api/v1/query_range request.
//
// See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
func QueryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
func QueryRangeHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
defer queryRangeDuration.UpdateDuration(startTime)
ct := startTime.UnixNano() / 1e6
@ -1198,13 +1220,14 @@ func QueryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
if err != nil {
return err
}
if err := queryRangeHandler(startTime, at, w, query, start, end, step, r, ct, etfs); err != nil {
if err := queryRangeHandler(qt, startTime, at, w, query, start, end, step, r, ct, etfs); err != nil {
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
}
return nil
}
func queryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w http.ResponseWriter, query string,
start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
deadline := searchutils.GetDeadlineForQuery(r, startTime)
mayCache := !searchutils.GetBool(r, "nocache")
lookbackDelta, err := getMaxLookback(r)
@ -1241,7 +1264,7 @@ func queryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
DenyPartialResponse: searchutils.GetDenyPartialResponse(r),
}
result, err := promql.Exec(&ec, query, false)
result, err := promql.Exec(qt, &ec, query, false)
if err != nil {
return fmt.Errorf("cannot execute query: %w", err)
}
@ -1259,7 +1282,10 @@ func queryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
w.Header().Set("Content-Type", "application/json")
bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw)
WriteQueryRangeResponse(bw, ec.IsPartialResponse, result)
qtDone := func() {
qt.Donef("/api/v1/query_range: start=%d, end=%d, step=%d, query=%q: series=%d", start, end, step, query, len(result))
}
WriteQueryRangeResponse(bw, ec.IsPartialResponse, result, qt, qtDone)
if err := bw.Flush(); err != nil {
return fmt.Errorf("cannot send query range response to remote client: %w", err)
}

View file

@ -1,12 +1,17 @@
{% import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
) %}
{% stripspace %}
QueryRangeResponse generates response for /api/v1/query_range.
See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
{% func QueryRangeResponse(isPartial bool, rs []netstorage.Result) %}
{% func QueryRangeResponse(isPartial bool, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) %}
{
{% code
seriesCount := len(rs)
pointsCount := 0
%}
"status":"success",
"isPartial":{% if isPartial %}true{% else %}false{% endif %},
"data":{
@ -14,13 +19,20 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
"result":[
{% if len(rs) > 0 %}
{%= queryRangeLine(&rs[0]) %}
{% code pointsCount += len(rs[0].Values) %}
{% code rs = rs[1:] %}
{% for i := range rs %}
,{%= queryRangeLine(&rs[i]) %}
{% code pointsCount += len(rs[i].Values) %}
{% endfor %}
{% endif %}
]
}
{% code
qt.Printf("generate /api/v1/query_range response for series=%d, points=%d", seriesCount, pointsCount)
qtDone()
%}
{%= dumpQueryTrace(qt) %}
}
{% endfunc %}

View file

@ -7,124 +7,145 @@ package prometheus
//line app/vmselect/prometheus/query_range_response.qtpl:1
import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
)
// QueryRangeResponse generates response for /api/v1/query_range.See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
//line app/vmselect/prometheus/query_range_response.qtpl:8
//line app/vmselect/prometheus/query_range_response.qtpl:9
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vmselect/prometheus/query_range_response.qtpl:8
//line app/vmselect/prometheus/query_range_response.qtpl:9
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vmselect/prometheus/query_range_response.qtpl:8
func StreamQueryRangeResponse(qw422016 *qt422016.Writer, isPartial bool, rs []netstorage.Result) {
//line app/vmselect/prometheus/query_range_response.qtpl:8
qw422016.N().S(`{"status":"success","isPartial":`)
//line app/vmselect/prometheus/query_range_response.qtpl:11
if isPartial {
//line app/vmselect/prometheus/query_range_response.qtpl:11
qw422016.N().S(`true`)
//line app/vmselect/prometheus/query_range_response.qtpl:11
} else {
//line app/vmselect/prometheus/query_range_response.qtpl:11
qw422016.N().S(`false`)
//line app/vmselect/prometheus/query_range_response.qtpl:11
}
//line app/vmselect/prometheus/query_range_response.qtpl:11
qw422016.N().S(`,"data":{"resultType":"matrix","result":[`)
//line app/vmselect/prometheus/query_range_response.qtpl:15
if len(rs) > 0 {
//line app/vmselect/prometheus/query_range_response.qtpl:9
func StreamQueryRangeResponse(qw422016 *qt422016.Writer, isPartial bool, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/query_range_response.qtpl:9
qw422016.N().S(`{`)
//line app/vmselect/prometheus/query_range_response.qtpl:12
seriesCount := len(rs)
pointsCount := 0
//line app/vmselect/prometheus/query_range_response.qtpl:14
qw422016.N().S(`"status":"success","isPartial":`)
//line app/vmselect/prometheus/query_range_response.qtpl:16
if isPartial {
//line app/vmselect/prometheus/query_range_response.qtpl:16
qw422016.N().S(`true`)
//line app/vmselect/prometheus/query_range_response.qtpl:16
} else {
//line app/vmselect/prometheus/query_range_response.qtpl:16
qw422016.N().S(`false`)
//line app/vmselect/prometheus/query_range_response.qtpl:16
}
//line app/vmselect/prometheus/query_range_response.qtpl:16
qw422016.N().S(`,"data":{"resultType":"matrix","result":[`)
//line app/vmselect/prometheus/query_range_response.qtpl:20
if len(rs) > 0 {
//line app/vmselect/prometheus/query_range_response.qtpl:21
streamqueryRangeLine(qw422016, &rs[0])
//line app/vmselect/prometheus/query_range_response.qtpl:17
//line app/vmselect/prometheus/query_range_response.qtpl:22
pointsCount += len(rs[0].Values)
//line app/vmselect/prometheus/query_range_response.qtpl:23
rs = rs[1:]
//line app/vmselect/prometheus/query_range_response.qtpl:18
//line app/vmselect/prometheus/query_range_response.qtpl:24
for i := range rs {
//line app/vmselect/prometheus/query_range_response.qtpl:18
//line app/vmselect/prometheus/query_range_response.qtpl:24
qw422016.N().S(`,`)
//line app/vmselect/prometheus/query_range_response.qtpl:19
//line app/vmselect/prometheus/query_range_response.qtpl:25
streamqueryRangeLine(qw422016, &rs[i])
//line app/vmselect/prometheus/query_range_response.qtpl:20
//line app/vmselect/prometheus/query_range_response.qtpl:26
pointsCount += len(rs[i].Values)
//line app/vmselect/prometheus/query_range_response.qtpl:27
}
//line app/vmselect/prometheus/query_range_response.qtpl:21
//line app/vmselect/prometheus/query_range_response.qtpl:28
}
//line app/vmselect/prometheus/query_range_response.qtpl:21
qw422016.N().S(`]}}`)
//line app/vmselect/prometheus/query_range_response.qtpl:25
}
//line app/vmselect/prometheus/query_range_response.qtpl:28
qw422016.N().S(`]}`)
//line app/vmselect/prometheus/query_range_response.qtpl:32
qt.Printf("generate /api/v1/query_range response for series=%d, points=%d", seriesCount, pointsCount)
qtDone()
//line app/vmselect/prometheus/query_range_response.qtpl:25
func WriteQueryRangeResponse(qq422016 qtio422016.Writer, isPartial bool, rs []netstorage.Result) {
//line app/vmselect/prometheus/query_range_response.qtpl:25
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/query_range_response.qtpl:25
StreamQueryRangeResponse(qw422016, isPartial, rs)
//line app/vmselect/prometheus/query_range_response.qtpl:25
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/query_range_response.qtpl:25
}
//line app/vmselect/prometheus/query_range_response.qtpl:25
func QueryRangeResponse(isPartial bool, rs []netstorage.Result) string {
//line app/vmselect/prometheus/query_range_response.qtpl:25
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/query_range_response.qtpl:25
WriteQueryRangeResponse(qb422016, isPartial, rs)
//line app/vmselect/prometheus/query_range_response.qtpl:25
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/query_range_response.qtpl:25
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/query_range_response.qtpl:25
return qs422016
//line app/vmselect/prometheus/query_range_response.qtpl:25
}
//line app/vmselect/prometheus/query_range_response.qtpl:27
func streamqueryRangeLine(qw422016 *qt422016.Writer, r *netstorage.Result) {
//line app/vmselect/prometheus/query_range_response.qtpl:27
qw422016.N().S(`{"metric":`)
//line app/vmselect/prometheus/query_range_response.qtpl:29
streammetricNameObject(qw422016, &r.MetricName)
//line app/vmselect/prometheus/query_range_response.qtpl:29
qw422016.N().S(`,"values":`)
//line app/vmselect/prometheus/query_range_response.qtpl:30
streamvaluesWithTimestamps(qw422016, r.Values, r.Timestamps)
//line app/vmselect/prometheus/query_range_response.qtpl:30
//line app/vmselect/prometheus/query_range_response.qtpl:35
streamdumpQueryTrace(qw422016, qt)
//line app/vmselect/prometheus/query_range_response.qtpl:35
qw422016.N().S(`}`)
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
}
//line app/vmselect/prometheus/query_range_response.qtpl:32
func writequeryRangeLine(qq422016 qtio422016.Writer, r *netstorage.Result) {
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
func WriteQueryRangeResponse(qq422016 qtio422016.Writer, isPartial bool, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/query_range_response.qtpl:37
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/query_range_response.qtpl:32
streamqueryRangeLine(qw422016, r)
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
StreamQueryRangeResponse(qw422016, isPartial, rs, qt, qtDone)
//line app/vmselect/prometheus/query_range_response.qtpl:37
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
}
//line app/vmselect/prometheus/query_range_response.qtpl:32
func queryRangeLine(r *netstorage.Result) string {
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
func QueryRangeResponse(isPartial bool, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) string {
//line app/vmselect/prometheus/query_range_response.qtpl:37
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/query_range_response.qtpl:32
writequeryRangeLine(qb422016, r)
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
WriteQueryRangeResponse(qb422016, isPartial, rs, qt, qtDone)
//line app/vmselect/prometheus/query_range_response.qtpl:37
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
return qs422016
//line app/vmselect/prometheus/query_range_response.qtpl:32
//line app/vmselect/prometheus/query_range_response.qtpl:37
}
//line app/vmselect/prometheus/query_range_response.qtpl:39
func streamqueryRangeLine(qw422016 *qt422016.Writer, r *netstorage.Result) {
//line app/vmselect/prometheus/query_range_response.qtpl:39
qw422016.N().S(`{"metric":`)
//line app/vmselect/prometheus/query_range_response.qtpl:41
streammetricNameObject(qw422016, &r.MetricName)
//line app/vmselect/prometheus/query_range_response.qtpl:41
qw422016.N().S(`,"values":`)
//line app/vmselect/prometheus/query_range_response.qtpl:42
streamvaluesWithTimestamps(qw422016, r.Values, r.Timestamps)
//line app/vmselect/prometheus/query_range_response.qtpl:42
qw422016.N().S(`}`)
//line app/vmselect/prometheus/query_range_response.qtpl:44
}
//line app/vmselect/prometheus/query_range_response.qtpl:44
func writequeryRangeLine(qq422016 qtio422016.Writer, r *netstorage.Result) {
//line app/vmselect/prometheus/query_range_response.qtpl:44
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/query_range_response.qtpl:44
streamqueryRangeLine(qw422016, r)
//line app/vmselect/prometheus/query_range_response.qtpl:44
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/query_range_response.qtpl:44
}
//line app/vmselect/prometheus/query_range_response.qtpl:44
func queryRangeLine(r *netstorage.Result) string {
//line app/vmselect/prometheus/query_range_response.qtpl:44
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/query_range_response.qtpl:44
writequeryRangeLine(qb422016, r)
//line app/vmselect/prometheus/query_range_response.qtpl:44
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/query_range_response.qtpl:44
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/query_range_response.qtpl:44
return qs422016
//line app/vmselect/prometheus/query_range_response.qtpl:44
}

View file

@ -1,12 +1,14 @@
{% import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
) %}
{% stripspace %}
QueryResponse generates response for /api/v1/query.
See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
{% func QueryResponse(isPartial bool, rs []netstorage.Result) %}
{% func QueryResponse(isPartial bool, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) %}
{
{% code seriesCount := len(rs) %}
"status":"success",
"isPartial":{% if isPartial %}true{% else %}false{% endif %},
"data":{
@ -28,6 +30,11 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
{% endif %}
]
}
{% code
qt.Printf("generate /api/v1/query response for series=%d", seriesCount)
qtDone()
%}
{%= dumpQueryTrace(qt) %}
}
{% endfunc %}
{% endstripspace %}

View file

@ -7,100 +7,114 @@ package prometheus
//line app/vmselect/prometheus/query_response.qtpl:1
import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
)
// QueryResponse generates response for /api/v1/query.See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
//line app/vmselect/prometheus/query_response.qtpl:8
//line app/vmselect/prometheus/query_response.qtpl:9
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vmselect/prometheus/query_response.qtpl:8
//line app/vmselect/prometheus/query_response.qtpl:9
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vmselect/prometheus/query_response.qtpl:8
func StreamQueryResponse(qw422016 *qt422016.Writer, isPartial bool, rs []netstorage.Result) {
//line app/vmselect/prometheus/query_response.qtpl:8
qw422016.N().S(`{"status":"success","isPartial":`)
//line app/vmselect/prometheus/query_response.qtpl:9
func StreamQueryResponse(qw422016 *qt422016.Writer, isPartial bool, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/query_response.qtpl:9
qw422016.N().S(`{`)
//line app/vmselect/prometheus/query_response.qtpl:11
seriesCount := len(rs)
//line app/vmselect/prometheus/query_response.qtpl:11
qw422016.N().S(`"status":"success","isPartial":`)
//line app/vmselect/prometheus/query_response.qtpl:13
if isPartial {
//line app/vmselect/prometheus/query_response.qtpl:11
//line app/vmselect/prometheus/query_response.qtpl:13
qw422016.N().S(`true`)
//line app/vmselect/prometheus/query_response.qtpl:11
//line app/vmselect/prometheus/query_response.qtpl:13
} else {
//line app/vmselect/prometheus/query_response.qtpl:11
//line app/vmselect/prometheus/query_response.qtpl:13
qw422016.N().S(`false`)
//line app/vmselect/prometheus/query_response.qtpl:11
//line app/vmselect/prometheus/query_response.qtpl:13
}
//line app/vmselect/prometheus/query_response.qtpl:11
//line app/vmselect/prometheus/query_response.qtpl:13
qw422016.N().S(`,"data":{"resultType":"vector","result":[`)
//line app/vmselect/prometheus/query_response.qtpl:15
//line app/vmselect/prometheus/query_response.qtpl:17
if len(rs) > 0 {
//line app/vmselect/prometheus/query_response.qtpl:15
//line app/vmselect/prometheus/query_response.qtpl:17
qw422016.N().S(`{"metric":`)
//line app/vmselect/prometheus/query_response.qtpl:17
//line app/vmselect/prometheus/query_response.qtpl:19
streammetricNameObject(qw422016, &rs[0].MetricName)
//line app/vmselect/prometheus/query_response.qtpl:17
//line app/vmselect/prometheus/query_response.qtpl:19
qw422016.N().S(`,"value":`)
//line app/vmselect/prometheus/query_response.qtpl:18
streammetricRow(qw422016, rs[0].Timestamps[0], rs[0].Values[0])
//line app/vmselect/prometheus/query_response.qtpl:18
qw422016.N().S(`}`)
//line app/vmselect/prometheus/query_response.qtpl:20
streammetricRow(qw422016, rs[0].Timestamps[0], rs[0].Values[0])
//line app/vmselect/prometheus/query_response.qtpl:20
qw422016.N().S(`}`)
//line app/vmselect/prometheus/query_response.qtpl:22
rs = rs[1:]
//line app/vmselect/prometheus/query_response.qtpl:21
//line app/vmselect/prometheus/query_response.qtpl:23
for i := range rs {
//line app/vmselect/prometheus/query_response.qtpl:22
//line app/vmselect/prometheus/query_response.qtpl:24
r := &rs[i]
//line app/vmselect/prometheus/query_response.qtpl:22
//line app/vmselect/prometheus/query_response.qtpl:24
qw422016.N().S(`,{"metric":`)
//line app/vmselect/prometheus/query_response.qtpl:24
//line app/vmselect/prometheus/query_response.qtpl:26
streammetricNameObject(qw422016, &r.MetricName)
//line app/vmselect/prometheus/query_response.qtpl:24
//line app/vmselect/prometheus/query_response.qtpl:26
qw422016.N().S(`,"value":`)
//line app/vmselect/prometheus/query_response.qtpl:25
streammetricRow(qw422016, r.Timestamps[0], r.Values[0])
//line app/vmselect/prometheus/query_response.qtpl:25
qw422016.N().S(`}`)
//line app/vmselect/prometheus/query_response.qtpl:27
streammetricRow(qw422016, r.Timestamps[0], r.Values[0])
//line app/vmselect/prometheus/query_response.qtpl:27
qw422016.N().S(`}`)
//line app/vmselect/prometheus/query_response.qtpl:29
}
//line app/vmselect/prometheus/query_response.qtpl:28
//line app/vmselect/prometheus/query_response.qtpl:30
}
//line app/vmselect/prometheus/query_response.qtpl:28
qw422016.N().S(`]}}`)
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:30
qw422016.N().S(`]}`)
//line app/vmselect/prometheus/query_response.qtpl:34
qt.Printf("generate /api/v1/query response for series=%d", seriesCount)
qtDone()
//line app/vmselect/prometheus/query_response.qtpl:37
streamdumpQueryTrace(qw422016, qt)
//line app/vmselect/prometheus/query_response.qtpl:37
qw422016.N().S(`}`)
//line app/vmselect/prometheus/query_response.qtpl:39
}
//line app/vmselect/prometheus/query_response.qtpl:32
func WriteQueryResponse(qq422016 qtio422016.Writer, isPartial bool, rs []netstorage.Result) {
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:39
func WriteQueryResponse(qq422016 qtio422016.Writer, isPartial bool, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/query_response.qtpl:39
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/query_response.qtpl:32
StreamQueryResponse(qw422016, isPartial, rs)
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:39
StreamQueryResponse(qw422016, isPartial, rs, qt, qtDone)
//line app/vmselect/prometheus/query_response.qtpl:39
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:39
}
//line app/vmselect/prometheus/query_response.qtpl:32
func QueryResponse(isPartial bool, rs []netstorage.Result) string {
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:39
func QueryResponse(isPartial bool, rs []netstorage.Result, qt *querytracer.Tracer, qtDone func()) string {
//line app/vmselect/prometheus/query_response.qtpl:39
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/query_response.qtpl:32
WriteQueryResponse(qb422016, isPartial, rs)
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:39
WriteQueryResponse(qb422016, isPartial, rs, qt, qtDone)
//line app/vmselect/prometheus/query_response.qtpl:39
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:39
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:39
return qs422016
//line app/vmselect/prometheus/query_response.qtpl:32
//line app/vmselect/prometheus/query_response.qtpl:39
}

View file

@ -1,25 +1,38 @@
{% import (
"github.com/valyala/quicktemplate"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
) %}
{% stripspace %}
SeriesResponse generates response for /api/v1/series.
See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
{% func SeriesResponse(isPartial bool, resultsCh <-chan *quicktemplate.ByteBuffer) %}
{% func SeriesResponse(isPartial bool, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) %}
{
{% code seriesCount := 0 %}
"status":"success",
"isPartial":{% if isPartial %}true{% else %}false{% endif %},
"data":[
{% code bb, ok := <-resultsCh %}
{% if ok %}
{%z= bb.B %}
{% code quicktemplate.ReleaseByteBuffer(bb) %}
{% code
quicktemplate.ReleaseByteBuffer(bb)
seriesCount++
%}
{% for bb := range resultsCh %}
,{%z= bb.B %}
{% code quicktemplate.ReleaseByteBuffer(bb) %}
{% code
quicktemplate.ReleaseByteBuffer(bb)
seriesCount++
%}
{% endfor %}
{% endif %}
]
{% code
qt.Printf("generate response: series=%d", seriesCount)
qtDone()
%}
{%= dumpQueryTrace(qt) %}
}
{% endfunc %}
{% endstripspace %}

View file

@ -6,90 +6,106 @@ package prometheus
//line app/vmselect/prometheus/series_response.qtpl:1
import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/valyala/quicktemplate"
)
// SeriesResponse generates response for /api/v1/series.See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
//line app/vmselect/prometheus/series_response.qtpl:8
//line app/vmselect/prometheus/series_response.qtpl:9
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vmselect/prometheus/series_response.qtpl:8
//line app/vmselect/prometheus/series_response.qtpl:9
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vmselect/prometheus/series_response.qtpl:8
func StreamSeriesResponse(qw422016 *qt422016.Writer, isPartial bool, resultsCh <-chan *quicktemplate.ByteBuffer) {
//line app/vmselect/prometheus/series_response.qtpl:8
qw422016.N().S(`{"status":"success","isPartial":`)
//line app/vmselect/prometheus/series_response.qtpl:9
func StreamSeriesResponse(qw422016 *qt422016.Writer, isPartial bool, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/series_response.qtpl:9
qw422016.N().S(`{`)
//line app/vmselect/prometheus/series_response.qtpl:11
if isPartial {
seriesCount := 0
//line app/vmselect/prometheus/series_response.qtpl:11
qw422016.N().S(`true`)
//line app/vmselect/prometheus/series_response.qtpl:11
} else {
//line app/vmselect/prometheus/series_response.qtpl:11
qw422016.N().S(`false`)
//line app/vmselect/prometheus/series_response.qtpl:11
}
//line app/vmselect/prometheus/series_response.qtpl:11
qw422016.N().S(`,"data":[`)
qw422016.N().S(`"status":"success","isPartial":`)
//line app/vmselect/prometheus/series_response.qtpl:13
if isPartial {
//line app/vmselect/prometheus/series_response.qtpl:13
qw422016.N().S(`true`)
//line app/vmselect/prometheus/series_response.qtpl:13
} else {
//line app/vmselect/prometheus/series_response.qtpl:13
qw422016.N().S(`false`)
//line app/vmselect/prometheus/series_response.qtpl:13
}
//line app/vmselect/prometheus/series_response.qtpl:13
qw422016.N().S(`,"data":[`)
//line app/vmselect/prometheus/series_response.qtpl:15
bb, ok := <-resultsCh
//line app/vmselect/prometheus/series_response.qtpl:14
if ok {
//line app/vmselect/prometheus/series_response.qtpl:15
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/series_response.qtpl:16
quicktemplate.ReleaseByteBuffer(bb)
if ok {
//line app/vmselect/prometheus/series_response.qtpl:17
for bb := range resultsCh {
//line app/vmselect/prometheus/series_response.qtpl:17
qw422016.N().S(`,`)
//line app/vmselect/prometheus/series_response.qtpl:18
qw422016.N().Z(bb.B)
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/series_response.qtpl:19
quicktemplate.ReleaseByteBuffer(bb)
seriesCount++
//line app/vmselect/prometheus/series_response.qtpl:22
for bb := range resultsCh {
//line app/vmselect/prometheus/series_response.qtpl:22
qw422016.N().S(`,`)
//line app/vmselect/prometheus/series_response.qtpl:23
qw422016.N().Z(bb.B)
//line app/vmselect/prometheus/series_response.qtpl:25
quicktemplate.ReleaseByteBuffer(bb)
seriesCount++
//line app/vmselect/prometheus/series_response.qtpl:20
//line app/vmselect/prometheus/series_response.qtpl:28
}
//line app/vmselect/prometheus/series_response.qtpl:21
//line app/vmselect/prometheus/series_response.qtpl:29
}
//line app/vmselect/prometheus/series_response.qtpl:21
qw422016.N().S(`]}`)
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:29
qw422016.N().S(`]`)
//line app/vmselect/prometheus/series_response.qtpl:32
qt.Printf("generate response: series=%d", seriesCount)
qtDone()
//line app/vmselect/prometheus/series_response.qtpl:35
streamdumpQueryTrace(qw422016, qt)
//line app/vmselect/prometheus/series_response.qtpl:35
qw422016.N().S(`}`)
//line app/vmselect/prometheus/series_response.qtpl:37
}
//line app/vmselect/prometheus/series_response.qtpl:24
func WriteSeriesResponse(qq422016 qtio422016.Writer, isPartial bool, resultsCh <-chan *quicktemplate.ByteBuffer) {
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:37
func WriteSeriesResponse(qq422016 qtio422016.Writer, isPartial bool, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) {
//line app/vmselect/prometheus/series_response.qtpl:37
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/series_response.qtpl:24
StreamSeriesResponse(qw422016, isPartial, resultsCh)
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:37
StreamSeriesResponse(qw422016, isPartial, resultsCh, qt, qtDone)
//line app/vmselect/prometheus/series_response.qtpl:37
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:37
}
//line app/vmselect/prometheus/series_response.qtpl:24
func SeriesResponse(isPartial bool, resultsCh <-chan *quicktemplate.ByteBuffer) string {
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:37
func SeriesResponse(isPartial bool, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) string {
//line app/vmselect/prometheus/series_response.qtpl:37
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/series_response.qtpl:24
WriteSeriesResponse(qb422016, isPartial, resultsCh)
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:37
WriteSeriesResponse(qb422016, isPartial, resultsCh, qt, qtDone)
//line app/vmselect/prometheus/series_response.qtpl:37
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:37
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:37
return qs422016
//line app/vmselect/prometheus/series_response.qtpl:24
//line app/vmselect/prometheus/series_response.qtpl:37
}

View file

@ -1,4 +1,5 @@
{% import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
) %}
@ -45,4 +46,9 @@
]
{% endfunc %}
{% func dumpQueryTrace(qt *querytracer.Tracer) %}
{% code traceJSON := qt.ToJSON() %}
{% if traceJSON != "" %},"trace":{%s= traceJSON %}{% endif %}
{% endfunc %}
{% endstripspace %}

View file

@ -6,212 +6,255 @@ package prometheus
//line app/vmselect/prometheus/util.qtpl:1
import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
)
//line app/vmselect/prometheus/util.qtpl:7
//line app/vmselect/prometheus/util.qtpl:8
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vmselect/prometheus/util.qtpl:7
//line app/vmselect/prometheus/util.qtpl:8
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vmselect/prometheus/util.qtpl:7
//line app/vmselect/prometheus/util.qtpl:8
func streammetricNameObject(qw422016 *qt422016.Writer, mn *storage.MetricName) {
//line app/vmselect/prometheus/util.qtpl:7
//line app/vmselect/prometheus/util.qtpl:8
qw422016.N().S(`{`)
//line app/vmselect/prometheus/util.qtpl:9
//line app/vmselect/prometheus/util.qtpl:10
if len(mn.MetricGroup) > 0 {
//line app/vmselect/prometheus/util.qtpl:9
//line app/vmselect/prometheus/util.qtpl:10
qw422016.N().S(`"__name__":`)
//line app/vmselect/prometheus/util.qtpl:10
qw422016.N().QZ(mn.MetricGroup)
//line app/vmselect/prometheus/util.qtpl:10
if len(mn.Tags) > 0 {
//line app/vmselect/prometheus/util.qtpl:10
qw422016.N().S(`,`)
//line app/vmselect/prometheus/util.qtpl:10
}
//line app/vmselect/prometheus/util.qtpl:11
}
qw422016.N().QZ(mn.MetricGroup)
//line app/vmselect/prometheus/util.qtpl:11
if len(mn.Tags) > 0 {
//line app/vmselect/prometheus/util.qtpl:11
qw422016.N().S(`,`)
//line app/vmselect/prometheus/util.qtpl:11
}
//line app/vmselect/prometheus/util.qtpl:12
for j := range mn.Tags {
}
//line app/vmselect/prometheus/util.qtpl:13
for j := range mn.Tags {
//line app/vmselect/prometheus/util.qtpl:14
tag := &mn.Tags[j]
//line app/vmselect/prometheus/util.qtpl:14
//line app/vmselect/prometheus/util.qtpl:15
qw422016.N().QZ(tag.Key)
//line app/vmselect/prometheus/util.qtpl:14
//line app/vmselect/prometheus/util.qtpl:15
qw422016.N().S(`:`)
//line app/vmselect/prometheus/util.qtpl:14
//line app/vmselect/prometheus/util.qtpl:15
qw422016.N().QZ(tag.Value)
//line app/vmselect/prometheus/util.qtpl:14
//line app/vmselect/prometheus/util.qtpl:15
if j+1 < len(mn.Tags) {
//line app/vmselect/prometheus/util.qtpl:14
//line app/vmselect/prometheus/util.qtpl:15
qw422016.N().S(`,`)
//line app/vmselect/prometheus/util.qtpl:14
//line app/vmselect/prometheus/util.qtpl:15
}
//line app/vmselect/prometheus/util.qtpl:15
//line app/vmselect/prometheus/util.qtpl:16
}
//line app/vmselect/prometheus/util.qtpl:15
//line app/vmselect/prometheus/util.qtpl:16
qw422016.N().S(`}`)
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
}
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
func writemetricNameObject(qq422016 qtio422016.Writer, mn *storage.MetricName) {
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
streammetricNameObject(qw422016, mn)
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
}
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
func metricNameObject(mn *storage.MetricName) string {
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
writemetricNameObject(qb422016, mn)
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
return qs422016
//line app/vmselect/prometheus/util.qtpl:17
//line app/vmselect/prometheus/util.qtpl:18
}
//line app/vmselect/prometheus/util.qtpl:19
//line app/vmselect/prometheus/util.qtpl:20
func streammetricRow(qw422016 *qt422016.Writer, timestamp int64, value float64) {
//line app/vmselect/prometheus/util.qtpl:19
qw422016.N().S(`[`)
//line app/vmselect/prometheus/util.qtpl:20
qw422016.N().S(`[`)
//line app/vmselect/prometheus/util.qtpl:21
qw422016.N().F(float64(timestamp) / 1e3)
//line app/vmselect/prometheus/util.qtpl:20
//line app/vmselect/prometheus/util.qtpl:21
qw422016.N().S(`,"`)
//line app/vmselect/prometheus/util.qtpl:20
//line app/vmselect/prometheus/util.qtpl:21
qw422016.N().F(value)
//line app/vmselect/prometheus/util.qtpl:20
//line app/vmselect/prometheus/util.qtpl:21
qw422016.N().S(`"]`)
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
}
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
func writemetricRow(qq422016 qtio422016.Writer, timestamp int64, value float64) {
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
streammetricRow(qw422016, timestamp, value)
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
}
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
func metricRow(timestamp int64, value float64) string {
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
writemetricRow(qb422016, timestamp, value)
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
return qs422016
//line app/vmselect/prometheus/util.qtpl:21
//line app/vmselect/prometheus/util.qtpl:22
}
//line app/vmselect/prometheus/util.qtpl:23
//line app/vmselect/prometheus/util.qtpl:24
func streamvaluesWithTimestamps(qw422016 *qt422016.Writer, values []float64, timestamps []int64) {
//line app/vmselect/prometheus/util.qtpl:24
//line app/vmselect/prometheus/util.qtpl:25
if len(values) == 0 {
//line app/vmselect/prometheus/util.qtpl:24
//line app/vmselect/prometheus/util.qtpl:25
qw422016.N().S(`[]`)
//line app/vmselect/prometheus/util.qtpl:26
//line app/vmselect/prometheus/util.qtpl:27
return
//line app/vmselect/prometheus/util.qtpl:27
//line app/vmselect/prometheus/util.qtpl:28
}
//line app/vmselect/prometheus/util.qtpl:27
//line app/vmselect/prometheus/util.qtpl:28
qw422016.N().S(`[`)
//line app/vmselect/prometheus/util.qtpl:29
//line app/vmselect/prometheus/util.qtpl:30
/* inline metricRow call here for the sake of performance optimization */
//line app/vmselect/prometheus/util.qtpl:29
//line app/vmselect/prometheus/util.qtpl:30
qw422016.N().S(`[`)
//line app/vmselect/prometheus/util.qtpl:30
//line app/vmselect/prometheus/util.qtpl:31
qw422016.N().F(float64(timestamps[0]) / 1e3)
//line app/vmselect/prometheus/util.qtpl:30
//line app/vmselect/prometheus/util.qtpl:31
qw422016.N().S(`,"`)
//line app/vmselect/prometheus/util.qtpl:30
//line app/vmselect/prometheus/util.qtpl:31
qw422016.N().F(values[0])
//line app/vmselect/prometheus/util.qtpl:30
//line app/vmselect/prometheus/util.qtpl:31
qw422016.N().S(`"]`)
//line app/vmselect/prometheus/util.qtpl:32
//line app/vmselect/prometheus/util.qtpl:33
timestamps = timestamps[1:]
values = values[1:]
//line app/vmselect/prometheus/util.qtpl:35
//line app/vmselect/prometheus/util.qtpl:36
if len(values) > 0 {
//line app/vmselect/prometheus/util.qtpl:37
//line app/vmselect/prometheus/util.qtpl:38
// Remove bounds check inside the loop below
_ = timestamps[len(values)-1]
//line app/vmselect/prometheus/util.qtpl:40
for i, v := range values {
//line app/vmselect/prometheus/util.qtpl:41
for i, v := range values {
//line app/vmselect/prometheus/util.qtpl:42
/* inline metricRow call here for the sake of performance optimization */
//line app/vmselect/prometheus/util.qtpl:41
//line app/vmselect/prometheus/util.qtpl:42
qw422016.N().S(`,[`)
//line app/vmselect/prometheus/util.qtpl:42
qw422016.N().F(float64(timestamps[i]) / 1e3)
//line app/vmselect/prometheus/util.qtpl:42
qw422016.N().S(`,"`)
//line app/vmselect/prometheus/util.qtpl:42
qw422016.N().F(v)
//line app/vmselect/prometheus/util.qtpl:42
qw422016.N().S(`"]`)
//line app/vmselect/prometheus/util.qtpl:43
qw422016.N().F(float64(timestamps[i]) / 1e3)
//line app/vmselect/prometheus/util.qtpl:43
qw422016.N().S(`,"`)
//line app/vmselect/prometheus/util.qtpl:43
qw422016.N().F(v)
//line app/vmselect/prometheus/util.qtpl:43
qw422016.N().S(`"]`)
//line app/vmselect/prometheus/util.qtpl:44
}
//line app/vmselect/prometheus/util.qtpl:44
//line app/vmselect/prometheus/util.qtpl:45
}
//line app/vmselect/prometheus/util.qtpl:44
//line app/vmselect/prometheus/util.qtpl:45
qw422016.N().S(`]`)
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
}
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
func writevaluesWithTimestamps(qq422016 qtio422016.Writer, values []float64, timestamps []int64) {
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
streamvaluesWithTimestamps(qw422016, values, timestamps)
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
}
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
func valuesWithTimestamps(values []float64, timestamps []int64) string {
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
writevaluesWithTimestamps(qb422016, values, timestamps)
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
return qs422016
//line app/vmselect/prometheus/util.qtpl:46
//line app/vmselect/prometheus/util.qtpl:47
}
//line app/vmselect/prometheus/util.qtpl:49
func streamdumpQueryTrace(qw422016 *qt422016.Writer, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/util.qtpl:50
traceJSON := qt.ToJSON()
//line app/vmselect/prometheus/util.qtpl:51
if traceJSON != "" {
//line app/vmselect/prometheus/util.qtpl:51
qw422016.N().S(`,"trace":`)
//line app/vmselect/prometheus/util.qtpl:51
qw422016.N().S(traceJSON)
//line app/vmselect/prometheus/util.qtpl:51
}
//line app/vmselect/prometheus/util.qtpl:52
}
//line app/vmselect/prometheus/util.qtpl:52
func writedumpQueryTrace(qq422016 qtio422016.Writer, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/util.qtpl:52
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/util.qtpl:52
streamdumpQueryTrace(qw422016, qt)
//line app/vmselect/prometheus/util.qtpl:52
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/util.qtpl:52
}
//line app/vmselect/prometheus/util.qtpl:52
func dumpQueryTrace(qt *querytracer.Tracer) string {
//line app/vmselect/prometheus/util.qtpl:52
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/util.qtpl:52
writedumpQueryTrace(qb422016, qt)
//line app/vmselect/prometheus/util.qtpl:52
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/util.qtpl:52
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/util.qtpl:52
return qs422016
//line app/vmselect/prometheus/util.qtpl:52
}

View file

@ -17,6 +17,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/metricsql"
@ -104,6 +105,7 @@ type EvalConfig struct {
Deadline searchutils.Deadline
// Whether the response can be cached.
MayCache bool
// LookbackDelta is analog to `-query.lookback-delta` from Prometheus.
@ -207,19 +209,40 @@ func getTimestamps(start, end, step int64) []int64 {
return timestamps
}
func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
func evalExpr(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
qt = qt.NewChild()
rv, err := evalExprInternal(qt, ec, e)
if err != nil {
return nil, err
}
if qt.Enabled() {
query := e.AppendString(nil)
seriesCount := len(rv)
pointsPerSeries := 0
if len(rv) > 0 {
pointsPerSeries = len(rv[0].Timestamps)
}
pointsCount := seriesCount * pointsPerSeries
mayCache := ec.mayCache()
qt.Donef("eval: query=%s, timeRange=[%d..%d], step=%d, mayCache=%v: series=%d, points=%d, pointsPerSeries=%d",
query, ec.Start, ec.End, ec.Step, mayCache, seriesCount, pointsCount, pointsPerSeries)
}
return rv, nil
}
func evalExprInternal(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
if me, ok := e.(*metricsql.MetricExpr); ok {
re := &metricsql.RollupExpr{
Expr: me,
}
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
rv, err := evalRollupFunc(qt, ec, "default_rollup", rollupDefault, e, re, nil)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, me.AppendString(nil), err)
}
return rv, nil
}
if re, ok := e.(*metricsql.RollupExpr); ok {
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
rv, err := evalRollupFunc(qt, ec, "default_rollup", rollupDefault, e, re, nil)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, re.AppendString(nil), err)
}
@ -228,26 +251,12 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
if fe, ok := e.(*metricsql.FuncExpr); ok {
nrf := getRollupFunc(fe.Name)
if nrf == nil {
args, err := evalExprs(ec, fe.Args)
if err != nil {
return nil, err
}
tf := getTransformFunc(fe.Name)
if tf == nil {
return nil, fmt.Errorf(`unknown func %q`, fe.Name)
}
tfa := &transformFuncArg{
ec: ec,
fe: fe,
args: args,
}
rv, err := tf(tfa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
}
return rv, nil
qtChild := qt.NewChild()
rv, err := evalTransformFunc(qtChild, ec, fe)
qtChild.Donef("transform %s(): series=%d", fe.Name, len(rv))
return rv, err
}
args, re, err := evalRollupFuncArgs(ec, fe)
args, re, err := evalRollupFuncArgs(qt, ec, fe)
if err != nil {
return nil, err
}
@ -255,79 +264,23 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
if err != nil {
return nil, err
}
rv, err := evalRollupFunc(ec, fe.Name, rf, e, re, nil)
rv, err := evalRollupFunc(qt, ec, fe.Name, rf, e, re, nil)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
}
return rv, nil
}
if ae, ok := e.(*metricsql.AggrFuncExpr); ok {
if callbacks := getIncrementalAggrFuncCallbacks(ae.Name); callbacks != nil {
fe, nrf := tryGetArgRollupFuncWithMetricExpr(ae)
if fe != nil {
// There is an optimized path for calculating metricsql.AggrFuncExpr over rollupFunc over metricsql.MetricExpr.
// The optimized path saves RAM for aggregates over big number of time series.
args, re, err := evalRollupFuncArgs(ec, fe)
if err != nil {
return nil, err
}
rf, err := nrf(args)
if err != nil {
return nil, err
}
iafc := newIncrementalAggrFuncContext(ae, callbacks)
return evalRollupFunc(ec, fe.Name, rf, e, re, iafc)
}
}
args, err := evalExprs(ec, ae.Args)
if err != nil {
return nil, err
}
af := getAggrFunc(ae.Name)
if af == nil {
return nil, fmt.Errorf(`unknown func %q`, ae.Name)
}
afa := &aggrFuncArg{
ae: ae,
args: args,
ec: ec,
}
rv, err := af(afa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, ae.AppendString(nil), err)
}
return rv, nil
qtChild := qt.NewChild()
rv, err := evalAggrFunc(qtChild, ec, ae)
qtChild.Donef("aggregate %s(): series=%d", ae.Name, len(rv))
return rv, err
}
if be, ok := e.(*metricsql.BinaryOpExpr); ok {
bf := getBinaryOpFunc(be.Op)
if bf == nil {
return nil, fmt.Errorf(`unknown binary op %q`, be.Op)
}
var err error
var tssLeft, tssRight []*timeseries
switch strings.ToLower(be.Op) {
case "and", "if":
// Fetch right-side series at first, since it usually contains
// lower number of time series for `and` and `if` operator.
// This should produce more specific label filters for the left side of the query.
// This, in turn, should reduce the time to select series for the left side of the query.
tssRight, tssLeft, err = execBinaryOpArgs(ec, be.Right, be.Left, be)
default:
tssLeft, tssRight, err = execBinaryOpArgs(ec, be.Left, be.Right, be)
}
if err != nil {
return nil, fmt.Errorf("cannot execute %q: %w", be.AppendString(nil), err)
}
bfa := &binaryOpFuncArg{
be: be,
left: tssLeft,
right: tssRight,
}
rv, err := bf(bfa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, be.AppendString(nil), err)
}
return rv, nil
qtChild := qt.NewChild()
rv, err := evalBinaryOp(qtChild, ec, be)
qtChild.Donef("binary op %q: series=%d", be.Op, len(rv))
return rv, err
}
if ne, ok := e.(*metricsql.NumberExpr); ok {
rv := evalNumber(ec, ne.N)
@ -346,7 +299,98 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
return nil, fmt.Errorf("unexpected expression %q", e.AppendString(nil))
}
func execBinaryOpArgs(ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *metricsql.BinaryOpExpr) ([]*timeseries, []*timeseries, error) {
func evalTransformFunc(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]*timeseries, error) {
args, err := evalExprs(qt, ec, fe.Args)
if err != nil {
return nil, err
}
tf := getTransformFunc(fe.Name)
if tf == nil {
return nil, fmt.Errorf(`unknown func %q`, fe.Name)
}
tfa := &transformFuncArg{
ec: ec,
fe: fe,
args: args,
}
rv, err := tf(tfa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
}
return rv, nil
}
func evalAggrFunc(qt *querytracer.Tracer, ec *EvalConfig, ae *metricsql.AggrFuncExpr) ([]*timeseries, error) {
if callbacks := getIncrementalAggrFuncCallbacks(ae.Name); callbacks != nil {
fe, nrf := tryGetArgRollupFuncWithMetricExpr(ae)
if fe != nil {
// There is an optimized path for calculating metricsql.AggrFuncExpr over rollupFunc over metricsql.MetricExpr.
// The optimized path saves RAM for aggregates over big number of time series.
args, re, err := evalRollupFuncArgs(qt, ec, fe)
if err != nil {
return nil, err
}
rf, err := nrf(args)
if err != nil {
return nil, err
}
iafc := newIncrementalAggrFuncContext(ae, callbacks)
return evalRollupFunc(qt, ec, fe.Name, rf, ae, re, iafc)
}
}
args, err := evalExprs(qt, ec, ae.Args)
if err != nil {
return nil, err
}
af := getAggrFunc(ae.Name)
if af == nil {
return nil, fmt.Errorf(`unknown func %q`, ae.Name)
}
afa := &aggrFuncArg{
ae: ae,
args: args,
ec: ec,
}
rv, err := af(afa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, ae.AppendString(nil), err)
}
return rv, nil
}
func evalBinaryOp(qt *querytracer.Tracer, ec *EvalConfig, be *metricsql.BinaryOpExpr) ([]*timeseries, error) {
bf := getBinaryOpFunc(be.Op)
if bf == nil {
return nil, fmt.Errorf(`unknown binary op %q`, be.Op)
}
var err error
var tssLeft, tssRight []*timeseries
switch strings.ToLower(be.Op) {
case "and", "if":
// Fetch right-side series at first, since it usually contains
// lower number of time series for `and` and `if` operator.
// This should produce more specific label filters for the left side of the query.
// This, in turn, should reduce the time to select series for the left side of the query.
tssRight, tssLeft, err = execBinaryOpArgs(qt, ec, be.Right, be.Left, be)
default:
tssLeft, tssRight, err = execBinaryOpArgs(qt, ec, be.Left, be.Right, be)
}
if err != nil {
return nil, fmt.Errorf("cannot execute %q: %w", be.AppendString(nil), err)
}
bfa := &binaryOpFuncArg{
be: be,
left: tssLeft,
right: tssRight,
}
rv, err := bf(bfa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %w`, be.AppendString(nil), err)
}
return rv, nil
}
func execBinaryOpArgs(qt *querytracer.Tracer, ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *metricsql.BinaryOpExpr) ([]*timeseries, []*timeseries, error) {
// Execute binary operation in the following way:
//
// 1) execute the exprFirst
@ -370,7 +414,7 @@ func execBinaryOpArgs(ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *
//
// - Queries, which get additional labels from `info` metrics.
// See https://www.robustperception.io/exposing-the-software-version-to-prometheus
tssFirst, err := evalExpr(ec, exprFirst)
tssFirst, err := evalExpr(qt, ec, exprFirst)
if err != nil {
return nil, nil, err
}
@ -383,7 +427,7 @@ func execBinaryOpArgs(ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *
lfs = metricsql.TrimFiltersByGroupModifier(lfs, be)
exprSecond = metricsql.PushdownBinaryOpFilters(exprSecond, lfs)
}
tssSecond, err := evalExpr(ec, exprSecond)
tssSecond, err := evalExpr(qt, ec, exprSecond)
if err != nil {
return nil, nil, err
}
@ -520,10 +564,10 @@ func tryGetArgRollupFuncWithMetricExpr(ae *metricsql.AggrFuncExpr) (*metricsql.F
return nil, nil
}
func evalExprs(ec *EvalConfig, es []metricsql.Expr) ([][]*timeseries, error) {
func evalExprs(qt *querytracer.Tracer, ec *EvalConfig, es []metricsql.Expr) ([][]*timeseries, error) {
var rvs [][]*timeseries
for _, e := range es {
rv, err := evalExpr(ec, e)
rv, err := evalExpr(qt, ec, e)
if err != nil {
return nil, err
}
@ -532,7 +576,7 @@ func evalExprs(ec *EvalConfig, es []metricsql.Expr) ([][]*timeseries, error) {
return rvs, nil
}
func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, *metricsql.RollupExpr, error) {
func evalRollupFuncArgs(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, *metricsql.RollupExpr, error) {
var re *metricsql.RollupExpr
rollupArgIdx := metricsql.GetRollupArgIdx(fe)
if len(fe.Args) <= rollupArgIdx {
@ -545,7 +589,7 @@ func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{},
args[i] = re
continue
}
ts, err := evalExpr(ec, arg)
ts, err := evalExpr(qt, ec, arg)
if err != nil {
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %w", i+1, fe.AppendString(nil), err)
}
@ -585,11 +629,12 @@ func getRollupExprArg(arg metricsql.Expr) *metricsql.RollupExpr {
// expr may contain:
// - rollupFunc(m) if iafc is nil
// - aggrFunc(rollupFunc(m)) if iafc isn't nil
func evalRollupFunc(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr,
re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
if re.At == nil {
return evalRollupFuncWithoutAt(ec, funcName, rf, expr, re, iafc)
return evalRollupFuncWithoutAt(qt, ec, funcName, rf, expr, re, iafc)
}
tssAt, err := evalExpr(ec, re.At)
tssAt, err := evalExpr(qt, ec, re.At)
if err != nil {
return nil, fmt.Errorf("cannot evaluate `@` modifier: %w", err)
}
@ -600,7 +645,7 @@ func evalRollupFunc(ec *EvalConfig, funcName string, rf rollupFunc, expr metrics
ecNew := copyEvalConfig(ec)
ecNew.Start = atTimestamp
ecNew.End = atTimestamp
tss, err := evalRollupFuncWithoutAt(ecNew, funcName, rf, expr, re, iafc)
tss, err := evalRollupFuncWithoutAt(qt, ecNew, funcName, rf, expr, re, iafc)
if err != nil {
return nil, err
}
@ -618,7 +663,8 @@ func evalRollupFunc(ec *EvalConfig, funcName string, rf rollupFunc, expr metrics
return tss, nil
}
func evalRollupFuncWithoutAt(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
func evalRollupFuncWithoutAt(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
funcName = strings.ToLower(funcName)
ecNew := ec
var offset int64
@ -645,12 +691,12 @@ func evalRollupFuncWithoutAt(ec *EvalConfig, funcName string, rf rollupFunc, exp
var rvs []*timeseries
var err error
if me, ok := re.Expr.(*metricsql.MetricExpr); ok {
rvs, err = evalRollupFuncWithMetricExpr(ecNew, funcName, rf, expr, me, iafc, re.Window)
rvs, err = evalRollupFuncWithMetricExpr(qt, ecNew, funcName, rf, expr, me, iafc, re.Window)
} else {
if iafc != nil {
logger.Panicf("BUG: iafc must be nil for rollup %q over subquery %q", funcName, re.AppendString(nil))
}
rvs, err = evalRollupFuncWithSubquery(ecNew, funcName, rf, expr, re)
rvs, err = evalRollupFuncWithSubquery(qt, ecNew, funcName, rf, expr, re)
}
if err != nil {
return nil, err
@ -694,8 +740,10 @@ func aggregateAbsentOverTime(ec *EvalConfig, expr metricsql.Expr, tss []*timeser
return rvs
}
func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr) ([]*timeseries, error) {
func evalRollupFuncWithSubquery(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr) ([]*timeseries, error) {
// TODO: determine whether to use rollupResultCacheV here.
qt = qt.NewChild()
defer qt.Donef("subquery")
step := re.Step.Duration(ec.Step)
if step == 0 {
step = ec.Step
@ -711,7 +759,7 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc,
}
// unconditionally align start and end args to step for subquery as Prometheus does.
ecSQ.Start, ecSQ.End = alignStartEnd(ecSQ.Start, ecSQ.End, ecSQ.Step)
tssSQ, err := evalExpr(ecSQ, re.Expr)
tssSQ, err := evalExpr(qt, ecSQ, re.Expr)
if err != nil {
return nil, err
}
@ -746,6 +794,7 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc,
}
return values, timestamps
})
qt.Printf("rollup %s() over %d series returned by subquery: series=%d", funcName, len(tssSQ), len(tss))
return tss, nil
}
@ -821,15 +870,20 @@ var (
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
)
func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc,
func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
var rollupMemorySize int64
window := windowExpr.Duration(ec.Step)
qt = qt.NewChild()
defer func() {
qt.Donef("rollup %s(): timeRange=[%d..%d], step=%d, window=%d, neededMemoryBytes=%d", funcName, ec.Start, ec.End, ec.Step, window, rollupMemorySize)
}()
if me.IsEmpty() {
return evalNumber(ec, nan), nil
}
window := windowExpr.Duration(ec.Step)
// Search for partial results in cache.
tssCached, start := rollupResultCacheV.Get(ec, expr, window)
tssCached, start := rollupResultCacheV.Get(qt, ec, expr, window)
if start > ec.End {
// The result is fully cached.
rollupResultCacheFullHits.Inc()
@ -859,7 +913,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
minTimestamp -= ec.Step
}
sq := storage.NewSearchQuery(ec.AuthToken.AccountID, ec.AuthToken.ProjectID, minTimestamp, ec.End, tfss, ec.MaxSeries)
rss, isPartial, err := netstorage.ProcessSearchQuery(ec.AuthToken, ec.DenyPartialResponse, sq, true, ec.Deadline)
rss, isPartial, err := netstorage.ProcessSearchQuery(qt, ec.AuthToken, ec.DenyPartialResponse, sq, true, ec.Deadline)
if err != nil {
return nil, err
}
@ -894,7 +948,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
}
}
rollupPoints := mulNoOverflow(pointsPerTimeseries, int64(timeseriesLen*len(rcs)))
rollupMemorySize := mulNoOverflow(rollupPoints, 16)
rollupMemorySize = mulNoOverflow(rollupPoints, 16)
rml := getRollupMemoryLimiter()
if !rml.Get(uint64(rollupMemorySize)) {
rss.Cancel()
@ -911,16 +965,16 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
keepMetricNames := getKeepMetricNames(expr)
var tss []*timeseries
if iafc != nil {
tss, err = evalRollupWithIncrementalAggregate(funcName, keepMetricNames, iafc, rss, rcs, preFunc, sharedTimestamps)
tss, err = evalRollupWithIncrementalAggregate(qt, funcName, keepMetricNames, iafc, rss, rcs, preFunc, sharedTimestamps)
} else {
tss, err = evalRollupNoIncrementalAggregate(funcName, keepMetricNames, rss, rcs, preFunc, sharedTimestamps)
tss, err = evalRollupNoIncrementalAggregate(qt, funcName, keepMetricNames, rss, rcs, preFunc, sharedTimestamps)
}
if err != nil {
return nil, err
}
tss = mergeTimeseries(tssCached, tss, start, ec)
if !isPartial {
rollupResultCacheV.Put(ec, expr, window, tss)
rollupResultCacheV.Put(qt, ec, expr, window, tss)
}
return tss, nil
}
@ -937,9 +991,12 @@ func getRollupMemoryLimiter() *memoryLimiter {
return &rollupMemoryLimiter
}
func evalRollupWithIncrementalAggregate(funcName string, keepMetricNames bool, iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool,
iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
qt = qt.NewChild()
defer qt.Donef("rollup %s() with incremental aggregation %s() over %d series", funcName, iafc.ae.Name, rss.Len())
err := rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
preFunc(rs.Values, rs.Timestamps)
ts := getTimeseries()
@ -966,14 +1023,17 @@ func evalRollupWithIncrementalAggregate(funcName string, keepMetricNames bool, i
return nil, err
}
tss := iafc.finalizeTimeseries()
qt.Printf("series after aggregation with %s(): %d", iafc.ae.Name, len(tss))
return tss, nil
}
func evalRollupNoIncrementalAggregate(funcName string, keepMetricNames bool, rss *netstorage.Results, rcs []*rollupConfig,
func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
qt = qt.NewChild()
defer qt.Donef("rollup %s() over %d series", funcName, rss.Len())
tss := make([]*timeseries, 0, rss.Len()*len(rcs))
var tssLock sync.Mutex
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
err := rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
preFunc(rs.Values, rs.Timestamps)
for _, rc := range rcs {

View file

@ -13,6 +13,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/metricsql"
@ -26,7 +27,7 @@ var (
)
// Exec executes q for the given ec.
func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result, error) {
func Exec(qt *querytracer.Tracer, ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result, error) {
if querystats.Enabled() {
startTime := time.Now()
ac := ec.AuthToken
@ -41,25 +42,29 @@ func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result,
}
qid := activeQueriesV.Add(ec, q)
rv, err := evalExpr(ec, e)
rv, err := evalExpr(qt, ec, e)
activeQueriesV.Remove(qid)
if err != nil {
return nil, err
}
if isFirstPointOnly {
// Remove all the points except the first one from every time series.
for _, ts := range rv {
ts.Values = ts.Values[:1]
ts.Timestamps = ts.Timestamps[:1]
}
qt.Printf("leave only the first point in every series")
}
maySort := maySortResults(e, rv)
result, err := timeseriesToResult(rv, maySort)
if err != nil {
return nil, err
}
if maySort {
qt.Printf("sort series by metric name and labels")
} else {
qt.Printf("do not sort series by metric name and labels")
}
if n := ec.RoundDigits; n < 100 {
for i := range result {
values := result[i].Values
@ -67,6 +72,7 @@ func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result,
values[j] = decimal.RoundToDecimalDigits(v, n)
}
}
qt.Printf("round series values to %d decimal digits after the point", n)
}
return result, err
}

View file

@ -76,7 +76,7 @@ func TestExecSuccess(t *testing.T) {
RoundDigits: 100,
}
for i := 0; i < 5; i++ {
result, err := Exec(ec, q, false)
result, err := Exec(nil, ec, q, false)
if err != nil {
t.Fatalf(`unexpected error when executing %q: %s`, q, err)
}
@ -7742,14 +7742,14 @@ func TestExecError(t *testing.T) {
RoundDigits: 100,
}
for i := 0; i < 4; i++ {
rv, err := Exec(ec, q, false)
rv, err := Exec(nil, ec, q, false)
if err == nil {
t.Fatalf(`expecting non-nil error on %q`, q)
}
if rv != nil {
t.Fatalf(`expecting nil rv`)
}
rv, err = Exec(ec, q, true)
rv, err = Exec(nil, ec, q, true)
if err == nil {
t.Fatalf(`expecting non-nil error on %q`, q)
}

View file

@ -4,6 +4,7 @@ import (
"crypto/rand"
"flag"
"fmt"
"io/ioutil"
"sync"
"sync/atomic"
"time"
@ -12,8 +13,10 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
"github.com/VictoriaMetrics/fastcache"
@ -62,8 +65,10 @@ func InitRollupResultCache(cachePath string) {
if len(rollupResultCachePath) > 0 {
logger.Infof("loading rollupResult cache from %q...", rollupResultCachePath)
c = workingsetcache.Load(rollupResultCachePath, cacheSize)
mustLoadRollupResultCacheKeyPrefix(rollupResultCachePath)
} else {
c = workingsetcache.New(cacheSize)
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
}
if *disableCache {
c.Reset()
@ -121,9 +126,10 @@ func StopRollupResultCache() {
logger.Infof("saving rollupResult cache to %q...", rollupResultCachePath)
startTime := time.Now()
if err := rollupResultCacheV.c.Save(rollupResultCachePath); err != nil {
logger.Errorf("cannot close rollupResult cache at %q: %s", rollupResultCachePath, err)
logger.Errorf("cannot save rollupResult cache at %q: %s", rollupResultCachePath, err)
return
}
mustSaveRollupResultCacheKeyPrefix(rollupResultCachePath)
var fcs fastcache.Stats
rollupResultCacheV.c.UpdateStats(&fcs)
rollupResultCacheV.c.Stop()
@ -147,8 +153,14 @@ func ResetRollupResultCache() {
logger.Infof("rollupResult cache has been cleared")
}
func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
func (rrc *rollupResultCache) Get(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
qt = qt.NewChild()
if qt.Enabled() {
query := expr.AppendString(nil)
defer qt.Donef("rollup cache get: query=%s, timeRange=[%d..%d], step=%d, window=%d", query, ec.Start, ec.End, ec.Step, window)
}
if !ec.mayCache() {
qt.Printf("do not fetch series from cache, since it is disabled in the current context")
return nil, ec.Start
}
@ -159,6 +171,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilterss)
metainfoBuf := rrc.c.Get(nil, bb.B)
if len(metainfoBuf) == 0 {
qt.Printf("nothing found")
return nil, ec.Start
}
var mi rollupResultCacheMetainfo
@ -167,6 +180,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
}
key := mi.GetBestKey(ec.Start, ec.End)
if key.prefix == 0 && key.suffix == 0 {
qt.Printf("nothing found on the timeRange")
return nil, ec.Start
}
bb.B = key.Marshal(bb.B[:0])
@ -178,18 +192,22 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
metainfoBuf = mi.Marshal(metainfoBuf[:0])
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilterss)
rrc.c.Set(bb.B, metainfoBuf)
qt.Printf("missing cache entry")
return nil, ec.Start
}
// Decompress into newly allocated byte slice, since tss returned from unmarshalTimeseriesFast
// refers to the byte slice, so it cannot be returned to the resultBufPool.
qt.Printf("load compressed entry from cache with size %d bytes", len(compressedResultBuf.B))
resultBuf, err := encoding.DecompressZSTD(nil, compressedResultBuf.B)
if err != nil {
logger.Panicf("BUG: cannot decompress resultBuf from rollupResultCache: %s; it looks like it was improperly saved", err)
}
qt.Printf("unpack the entry into %d bytes", len(resultBuf))
tss, err = unmarshalTimeseriesFast(resultBuf)
if err != nil {
logger.Panicf("BUG: cannot unmarshal timeseries from rollupResultCache: %s; it looks like it was improperly saved", err)
}
qt.Printf("unmarshal %d series", len(tss))
// Extract values for the matching timestamps
timestamps := tss[0].Timestamps
@ -199,10 +217,12 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
}
if i == len(timestamps) {
// no matches.
qt.Printf("no datapoints found in the cached series on the given timeRange")
return nil, ec.Start
}
if timestamps[i] != ec.Start {
// The cached range doesn't cover the requested range.
qt.Printf("cached series don't cover the given timeRange")
return nil, ec.Start
}
@ -223,13 +243,20 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
timestamps = tss[0].Timestamps
newStart = timestamps[len(timestamps)-1] + ec.Step
qt.Printf("return %d series on a timeRange=[%d..%d]", len(tss), ec.Start, newStart-ec.Step)
return tss, newStart
}
var resultBufPool bytesutil.ByteBufferPool
func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
func (rrc *rollupResultCache) Put(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
qt = qt.NewChild()
if qt.Enabled() {
query := expr.AppendString(nil)
defer qt.Donef("rollup cache put: query=%s, timeRange=[%d..%d], step=%d, window=%d, series=%d", query, ec.Start, ec.End, ec.Step, window, len(tss))
}
if len(tss) == 0 || !ec.mayCache() {
qt.Printf("do not store series to cache, since it is disabled in the current context")
return
}
@ -244,6 +271,7 @@ func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window in
i++
if i == 0 {
// Nothing to store in the cache.
qt.Printf("nothing to store in the cache, since all the points have timestamps bigger than %d", deadline)
return
}
if i < len(timestamps) {
@ -258,52 +286,96 @@ func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window in
}
// Store tss in the cache.
metainfoKey := bbPool.Get()
defer bbPool.Put(metainfoKey)
metainfoBuf := bbPool.Get()
defer bbPool.Put(metainfoBuf)
metainfoKey.B = marshalRollupResultCacheKey(metainfoKey.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilterss)
metainfoBuf.B = rrc.c.Get(metainfoBuf.B[:0], metainfoKey.B)
var mi rollupResultCacheMetainfo
if len(metainfoBuf.B) > 0 {
if err := mi.Unmarshal(metainfoBuf.B); err != nil {
logger.Panicf("BUG: cannot unmarshal rollupResultCacheMetainfo: %s; it looks like it was improperly saved", err)
}
}
start := timestamps[0]
end := timestamps[len(timestamps)-1]
if mi.CoversTimeRange(start, end) {
qt.Printf("series on the given timeRange=[%d..%d] already exist in the cache", start, end)
return
}
maxMarshaledSize := getRollupResultCacheSize() / 4
resultBuf := resultBufPool.Get()
defer resultBufPool.Put(resultBuf)
resultBuf.B = marshalTimeseriesFast(resultBuf.B[:0], tss, maxMarshaledSize, ec.Step)
if len(resultBuf.B) == 0 {
tooBigRollupResults.Inc()
qt.Printf("cannot store series in the cache, since they would occupy more than %d bytes", maxMarshaledSize)
return
}
qt.Printf("marshal %d series on a timeRange=[%d..%d] into %d bytes", len(tss), start, end, len(resultBuf.B))
compressedResultBuf := resultBufPool.Get()
defer resultBufPool.Put(compressedResultBuf)
compressedResultBuf.B = encoding.CompressZSTDLevel(compressedResultBuf.B[:0], resultBuf.B, 1)
bb := bbPool.Get()
defer bbPool.Put(bb)
qt.Printf("compress %d bytes into %d bytes", len(resultBuf.B), len(compressedResultBuf.B))
var key rollupResultCacheKey
key.prefix = rollupResultCacheKeyPrefix
key.suffix = atomic.AddUint64(&rollupResultCacheKeySuffix, 1)
bb.B = key.Marshal(bb.B[:0])
rrc.c.SetBig(bb.B, compressedResultBuf.B)
rollupResultKey := key.Marshal(nil)
rrc.c.SetBig(rollupResultKey, compressedResultBuf.B)
qt.Printf("store %d bytes in the cache", len(compressedResultBuf.B))
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilterss)
metainfoBuf := rrc.c.Get(nil, bb.B)
var mi rollupResultCacheMetainfo
if len(metainfoBuf) > 0 {
if err := mi.Unmarshal(metainfoBuf); err != nil {
logger.Panicf("BUG: cannot unmarshal rollupResultCacheMetainfo: %s; it looks like it was improperly saved", err)
}
}
mi.AddKey(key, timestamps[0], timestamps[len(timestamps)-1])
metainfoBuf = mi.Marshal(metainfoBuf[:0])
rrc.c.Set(bb.B, metainfoBuf)
metainfoBuf.B = mi.Marshal(metainfoBuf.B[:0])
rrc.c.Set(metainfoKey.B, metainfoBuf.B)
}
var (
rollupResultCacheKeyPrefix = func() uint64 {
var buf [8]byte
if _, err := rand.Read(buf[:]); err != nil {
// do not use logger.Panicf, since it isn't initialized yet.
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %w", err))
}
return encoding.UnmarshalUint64(buf[:])
}()
rollupResultCacheKeyPrefix uint64
rollupResultCacheKeySuffix = uint64(time.Now().UnixNano())
)
func newRollupResultCacheKeyPrefix() uint64 {
var buf [8]byte
if _, err := rand.Read(buf[:]); err != nil {
// do not use logger.Panicf, since it isn't initialized yet.
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %w", err))
}
return encoding.UnmarshalUint64(buf[:])
}
func mustLoadRollupResultCacheKeyPrefix(path string) {
path = path + ".key.prefix"
if !fs.IsPathExist(path) {
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
return
}
data, err := ioutil.ReadFile(path)
if err != nil {
logger.Errorf("cannot load %s: %s; reset rollupResult cache", path, err)
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
return
}
if len(data) != 8 {
logger.Errorf("unexpected size of %s; want 8 bytes; got %d bytes; reset rollupResult cache", path, len(data))
rollupResultCacheKeyPrefix = newRollupResultCacheKeyPrefix()
return
}
rollupResultCacheKeyPrefix = encoding.UnmarshalUint64(data)
}
func mustSaveRollupResultCacheKeyPrefix(path string) {
path = path + ".key.prefix"
data := encoding.MarshalUint64(nil, rollupResultCacheKeyPrefix)
fs.MustRemoveAll(path)
if err := fs.WriteFileAtomically(path, data); err != nil {
logger.Fatalf("cannot store rollupResult cache key prefix to %q: %s", path, err)
}
}
var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
// Increment this value every time the format of the cache changes.
@ -446,20 +518,36 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
return nil
}
func (mi *rollupResultCacheMetainfo) CoversTimeRange(start, end int64) bool {
if start > end {
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", start, end)
}
for i := range mi.entries {
e := &mi.entries[i]
if start >= e.start && end <= e.end {
return true
}
}
return false
}
func (mi *rollupResultCacheMetainfo) GetBestKey(start, end int64) rollupResultCacheKey {
if start > end {
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", start, end)
}
var bestKey rollupResultCacheKey
bestD := int64(1<<63 - 1)
dMax := int64(0)
for i := range mi.entries {
e := &mi.entries[i]
if start < e.start || end <= e.start {
if start < e.start {
continue
}
d := start - e.start
if d < bestD {
bestD = d
d := e.end - start
if end <= e.end {
d = end - start
}
if d >= dMax {
dMax = d
bestKey = e.key
}
}

View file

@ -23,6 +23,7 @@ func TestRollupResultCacheInitStop(t *testing.T) {
StopRollupResultCache()
}
fs.MustRemoveAll(cacheFilePath)
fs.MustRemoveAll(cacheFilePath + ".key.prefix")
})
}
@ -61,7 +62,7 @@ func TestRollupResultCache(t *testing.T) {
// Try obtaining an empty value.
t.Run("empty", func(t *testing.T) {
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != ec.Start {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, ec.Start)
}
@ -79,8 +80,8 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{0, 1, 2},
},
}
rollupResultCacheV.Put(ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 1400 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
}
@ -100,8 +101,8 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{0, 1, 2},
},
}
rollupResultCacheV.Put(ec, ae, window, tss)
tss, newStart := rollupResultCacheV.Get(ec, ae, window)
rollupResultCacheV.Put(nil, ec, ae, window, tss)
tss, newStart := rollupResultCacheV.Get(nil, ec, ae, window)
if newStart != 1400 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
}
@ -123,8 +124,8 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{333, 0, 1, 2},
},
}
rollupResultCacheV.Put(ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 1000 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
}
@ -142,8 +143,8 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{0, 1, 2},
},
}
rollupResultCacheV.Put(ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 1000 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
}
@ -161,8 +162,8 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{0, 1, 2},
},
}
rollupResultCacheV.Put(ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 1000 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
}
@ -180,8 +181,8 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{0, 1, 2},
},
}
rollupResultCacheV.Put(ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 1000 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
}
@ -199,8 +200,8 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{0, 1, 2, 3, 4, 5, 6, 7},
},
}
rollupResultCacheV.Put(ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 2200 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
}
@ -222,8 +223,8 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{1, 2, 3, 4, 5, 6},
},
}
rollupResultCacheV.Put(ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 2200 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
}
@ -247,8 +248,8 @@ func TestRollupResultCache(t *testing.T) {
}
tss = append(tss, ts)
}
rollupResultCacheV.Put(ec, fe, window, tss)
tssResult, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss)
tssResult, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 2200 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
}
@ -276,10 +277,10 @@ func TestRollupResultCache(t *testing.T) {
Values: []float64{0, 1, 2},
},
}
rollupResultCacheV.Put(ec, fe, window, tss1)
rollupResultCacheV.Put(ec, fe, window, tss2)
rollupResultCacheV.Put(ec, fe, window, tss3)
tss, newStart := rollupResultCacheV.Get(ec, fe, window)
rollupResultCacheV.Put(nil, ec, fe, window, tss1)
rollupResultCacheV.Put(nil, ec, fe, window, tss2)
rollupResultCacheV.Put(nil, ec, fe, window, tss3)
tss, newStart := rollupResultCacheV.Get(nil, ec, fe, window)
if newStart != 1400 {
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
}

View file

@ -20,6 +20,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/clusternative"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metrics"
)
@ -293,6 +294,7 @@ type vmselectRequestCtx struct {
sizeBuf []byte
dataBuf []byte
qt *querytracer.Tracer
sq storage.SearchQuery
tfss []*storage.TagFilters
sr storage.Search
@ -475,6 +477,13 @@ func (s *Server) processVMSelectRequest(ctx *vmselectRequestCtx) error {
}
rpcName := string(ctx.dataBuf)
// Initialize query tracing.
traceEnabled, err := ctx.readBool()
if err != nil {
return fmt.Errorf("cannot read traceEnabled: %w", err)
}
ctx.qt = querytracer.New(traceEnabled)
// Limit the time required for reading request args.
if err := ctx.bc.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil {
return fmt.Errorf("cannot set read deadline for reading request args: %w", err)
@ -491,32 +500,47 @@ func (s *Server) processVMSelectRequest(ctx *vmselectRequestCtx) error {
ctx.timeout = uint64(timeout)
ctx.deadline = fasttime.UnixTimestamp() + uint64(timeout)
// Process the rpcName call.
if err := s.processVMSelectRPC(ctx, rpcName); err != nil {
return err
}
// Finish query trace.
ctx.qt.Donef("%s() at vmstorage", rpcName)
traceJSON := ctx.qt.ToJSON()
if err := ctx.writeString(traceJSON); err != nil {
return fmt.Errorf("cannot send trace with length %d bytes to vmselect: %w", len(traceJSON), err)
}
return nil
}
func (s *Server) processVMSelectRPC(ctx *vmselectRequestCtx, rpcName string) error {
switch rpcName {
case "search_v5":
case "search_v6":
return s.processVMSelectSearch(ctx)
case "searchMetricNames_v2":
case "searchMetricNames_v3":
return s.processVMSelectSearchMetricNames(ctx)
case "labelValuesOnTimeRange_v2":
case "labelValuesOnTimeRange_v3":
return s.processVMSelectLabelValuesOnTimeRange(ctx)
case "labelValues_v3":
case "labelValues_v4":
return s.processVMSelectLabelValues(ctx)
case "tagValueSuffixes_v2":
case "tagValueSuffixes_v3":
return s.processVMSelectTagValueSuffixes(ctx)
case "labelEntries_v3":
case "labelEntries_v4":
return s.processVMSelectLabelEntries(ctx)
case "labelsOnTimeRange_v2":
case "labelsOnTimeRange_v3":
return s.processVMSelectLabelsOnTimeRange(ctx)
case "labels_v3":
case "labels_v4":
return s.processVMSelectLabels(ctx)
case "seriesCount_v3":
case "seriesCount_v4":
return s.processVMSelectSeriesCount(ctx)
case "tsdbStatus_v3":
case "tsdbStatus_v4":
return s.processVMSelectTSDBStatus(ctx)
case "tsdbStatusWithFilters_v2":
case "tsdbStatusWithFilters_v3":
return s.processVMSelectTSDBStatusWithFilters(ctx)
case "deleteMetrics_v4":
case "deleteMetrics_v5":
return s.processVMSelectDeleteMetrics(ctx)
case "registerMetricNames_v2":
case "registerMetricNames_v3":
return s.processVMSelectRegisterMetricNames(ctx)
default:
return fmt.Errorf("unsupported rpcName: %q", ctx.dataBuf)
@ -1018,7 +1042,7 @@ func (s *Server) processVMSelectSearchMetricNames(ctx *vmselectRequestCtx) error
return ctx.writeErrorMessage(err)
}
maxMetrics := ctx.getMaxMetrics()
mns, err := s.storage.SearchMetricNames(ctx.tfss, tr, maxMetrics, ctx.deadline)
mns, err := s.storage.SearchMetricNames(ctx.qt, ctx.tfss, tr, maxMetrics, ctx.deadline)
if err != nil {
return ctx.writeErrorMessage(err)
}
@ -1039,6 +1063,7 @@ func (s *Server) processVMSelectSearchMetricNames(ctx *vmselectRequestCtx) error
return fmt.Errorf("cannot send metricName #%d: %w", i+1, err)
}
}
ctx.qt.Printf("sent %d series to vmselect", len(mns))
return nil
}
@ -1067,7 +1092,7 @@ func (s *Server) processVMSelectSearch(ctx *vmselectRequestCtx) error {
}
startTime := time.Now()
maxMetrics := ctx.getMaxMetrics()
ctx.sr.Init(s.storage, ctx.tfss, tr, maxMetrics, ctx.deadline)
ctx.sr.Init(ctx.qt, s.storage, ctx.tfss, tr, maxMetrics, ctx.deadline)
indexSearchDuration.UpdateDuration(startTime)
defer ctx.sr.MustClose()
if err := ctx.sr.Error(); err != nil {
@ -1080,7 +1105,9 @@ func (s *Server) processVMSelectSearch(ctx *vmselectRequestCtx) error {
}
// Send found blocks to vmselect.
blocksRead := 0
for ctx.sr.NextMetricBlock() {
blocksRead++
ctx.mb.MetricName = ctx.sr.MetricBlockRef.MetricName
ctx.sr.MetricBlockRef.BlockRef.MustReadBlock(&ctx.mb.Block, fetchData)
@ -1095,6 +1122,7 @@ func (s *Server) processVMSelectSearch(ctx *vmselectRequestCtx) error {
if err := ctx.sr.Error(); err != nil {
return fmt.Errorf("search error: %w", err)
}
ctx.qt.Printf("sent %d blocks to vmselect", blocksRead)
// Send 'end of response' marker
if err := ctx.writeString(""); err != nil {

View file

@ -17808,6 +17808,16 @@
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/svgo/node_modules/nth-check": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz",
"integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==",
"dev": true,
"peer": true,
"dependencies": {
"boolbase": "~1.0.0"
}
},
"node_modules/symbol-tree": {
"version": "3.2.4",
"resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz",
@ -32692,7 +32702,7 @@
"boolbase": "^1.0.0",
"css-what": "^3.2.1",
"domutils": "^1.7.0",
"nth-check": "^2.0.1"
"nth-check": "^1.0.2"
}
},
"css-what": {
@ -32743,6 +32753,16 @@
"argparse": "^1.0.7",
"esprima": "^4.0.0"
}
},
"nth-check": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz",
"integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==",
"dev": true,
"peer": true,
"requires": {
"boolbase": "~1.0.0"
}
}
}
},

View file

@ -69,9 +69,6 @@
"overrides": {
"react-app-rewired": {
"nth-check": "^2.0.1"
},
"css-select": {
"nth-check": "^2.0.1"
}
}
}

View file

@ -15,6 +15,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
* FEATURE: support query tracing, which allows determining bottlenecks during query processing. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1403).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): remove dependency on Internet access in `http://vmagent:8429/targets` page. Previously the page layout was broken without Internet access. See [shis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2594).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): remove dependency on Internet access in [web API pages](https://docs.victoriametrics.com/vmalert.html#web). Previously the functionality and the layout of these pages was broken without Internet access. See [shis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2594).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): expose `/api/v1/status/config` endpoint in the same way as Prometheus does. See [these docs](https://prometheus.io/docs/prometheus/latest/querying/api/#config).

View file

@ -1365,6 +1365,69 @@ VictoriaMetrics returns TSDB stats at `/api/v1/status/tsdb` page in the way simi
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.
## Query tracing
VictoriaMetrics supports query tracing, which can be used for determining bottlenecks during query processing.
Query tracing can be enabled for a specific query by passing `trace=1` query arg.
In this case VictoriaMetrics puts query trace into `trace` field in the output JSON.
For example, the following command:
```bash
curl http://localhost:8428/api/v1/query_range -d 'query=2*rand()' -d 'start=-1h' -d 'step=1m' -d 'trace=1' | jq -r '.trace'
```
would return the following trace:
```json
{
"duration_msec": 0.099,
"message": "/api/v1/query_range: start=1654034340000, end=1654037880000, step=60000, query=\"2*rand()\": series=1",
"children": [
{
"duration_msec": 0.034,
"message": "eval: query=2 * rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
"children": [
{
"duration_msec": 0.032,
"message": "binary op \"*\": series=1",
"children": [
{
"duration_msec": 0.009,
"message": "eval: query=2, timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60"
},
{
"duration_msec": 0.017,
"message": "eval: query=rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
"children": [
{
"duration_msec": 0.015,
"message": "transform rand(): series=1"
}
]
}
]
}
]
},
{
"duration_msec": 0.004,
"message": "sort series by metric name and labels"
},
{
"duration_msec": 0.044,
"message": "generate /api/v1/query_range response for series=1, points=60"
}
]
}
```
All the durations and timestamps in traces are in milliseconds.
Query tracing is allowed by default. It can be denied by passing `-denyQueryTracing` command-line flag to VictoriaMetrics.
## Cardinality limiter
By default VictoriaMetrics doesn't limit the number of stored time series. The limit can be enforced by setting the following command-line flags:

View file

@ -1369,6 +1369,69 @@ VictoriaMetrics returns TSDB stats at `/api/v1/status/tsdb` page in the way simi
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.
## Query tracing
VictoriaMetrics supports query tracing, which can be used for determining bottlenecks during query processing.
Query tracing can be enabled for a specific query by passing `trace=1` query arg.
In this case VictoriaMetrics puts query trace into `trace` field in the output JSON.
For example, the following command:
```bash
curl http://localhost:8428/api/v1/query_range -d 'query=2*rand()' -d 'start=-1h' -d 'step=1m' -d 'trace=1' | jq -r '.trace'
```
would return the following trace:
```json
{
"duration_msec": 0.099,
"message": "/api/v1/query_range: start=1654034340000, end=1654037880000, step=60000, query=\"2*rand()\": series=1",
"children": [
{
"duration_msec": 0.034,
"message": "eval: query=2 * rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
"children": [
{
"duration_msec": 0.032,
"message": "binary op \"*\": series=1",
"children": [
{
"duration_msec": 0.009,
"message": "eval: query=2, timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60"
},
{
"duration_msec": 0.017,
"message": "eval: query=rand(), timeRange=[1654034340000..1654037880000], step=60000, mayCache=true: series=1, points=60, pointsPerSeries=60",
"children": [
{
"duration_msec": 0.015,
"message": "transform rand(): series=1"
}
]
}
]
}
]
},
{
"duration_msec": 0.004,
"message": "sort series by metric name and labels"
},
{
"duration_msec": 0.044,
"message": "generate /api/v1/query_range response for series=1, points=60"
}
]
}
```
All the durations and timestamps in traces are in milliseconds.
Query tracing is allowed by default. It can be denied by passing `-denyQueryTracing` command-line flag to VictoriaMetrics.
## Cardinality limiter
By default VictoriaMetrics doesn't limit the number of stored time series. The limit can be enforced by setting the following command-line flags:

View file

@ -68,11 +68,11 @@ func mustInitClusterMemberID() {
if idx := strings.LastIndexByte(s, '-'); idx >= 0 {
s = s[idx+1:]
}
n, err := strconv.Atoi(s)
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
logger.Fatalf("cannot parse -promscrape.cluster.memberNum=%q: %s", *clusterMemberNum, err)
}
clusterMemberID = n
clusterMemberID = int(n)
}
// Config represents essential parts from Prometheus config defined at https://prometheus.io/docs/prometheus/latest/configuration/configuration/

245
lib/querytracer/tracer.go Normal file
View file

@ -0,0 +1,245 @@
package querytracer
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"strings"
"time"
)
var denyQueryTracing = flag.Bool("denyQueryTracing", false, "Whether to disable the ability to trace queries. See https://docs.victoriametrics.com/#query-tracing")
// Tracer represents query tracer.
//
// It must be created via New call.
// Each created tracer must be finalized via Donef call.
//
// Tracer may contain sub-tracers (branches) in order to build tree-like execution order.
// Call Tracer.NewChild func for adding sub-tracer.
type Tracer struct {
// startTime is the time when Tracer was created
startTime time.Time
// doneTime is the time when Donef was called
doneTime time.Time
// message is the message generated by Printf or Donef call.
message string
// children is a list of children Tracer objects
children []*Tracer
// span contains span for the given Tracer. It is added via Tracer.AddSpan().
// If span is non-nil, then the remaining fields aren't used.
span *span
}
// New creates a new instance of the tracer.
//
// If enabled isn't set, then all function calls to the returned object will be no-op.
//
// Donef must be called when the tracer should be finished.
func New(enabled bool) *Tracer {
if *denyQueryTracing || !enabled {
return nil
}
return &Tracer{
startTime: time.Now(),
}
}
// Enabled returns true if the t is enabled.
func (t *Tracer) Enabled() bool {
return t != nil
}
// NewChild adds a new child Tracer to t.
//
// NewChild cannot be called from concurrent goroutines.
// Create children tracers from a single goroutine and then pass them
// to concurrent goroutines.
func (t *Tracer) NewChild() *Tracer {
if t == nil {
return nil
}
if t.message != "" {
panic(fmt.Errorf("BUG: NewChild() cannot be called after Donef(%q) call", t.message))
}
child := &Tracer{
startTime: time.Now(),
}
t.children = append(t.children, child)
return child
}
// Donef finishes t.
//
// Donef cannot be called multiple times.
// Other Tracer functions cannot be called after Donef call.
func (t *Tracer) Donef(format string, args ...interface{}) {
if t == nil {
return
}
if t.message != "" {
panic(fmt.Errorf("BUG: Donef() already called with message %q", t.message))
}
t.message = fmt.Sprintf(format, args...)
t.doneTime = time.Now()
}
// Printf adds new message to t.
//
// Printf cannot be called from concurrent goroutines.
func (t *Tracer) Printf(format string, args ...interface{}) {
if t == nil {
return
}
if t.message != "" {
panic(fmt.Errorf("BUG: Printf() cannot be called after Done(%q) call", t.message))
}
now := time.Now()
child := &Tracer{
startTime: now,
doneTime: now,
message: fmt.Sprintf(format, args...),
}
t.children = append(t.children, child)
}
// AddJSON adds a sub-trace to t.
//
// The jsonTrace must be encoded with ToJSON.
//
// AddJSON cannot be called from concurrent goroutines.
func (t *Tracer) AddJSON(jsonTrace []byte) error {
if t == nil {
return nil
}
if len(jsonTrace) == 0 {
return nil
}
var s *span
if err := json.Unmarshal(jsonTrace, &s); err != nil {
return fmt.Errorf("cannot unmarshal json trace: %s", err)
}
child := &Tracer{
span: s,
}
t.children = append(t.children, child)
return nil
}
// String returns string representation of t.
//
// String must be called when t methods aren't called by other goroutines.
func (t *Tracer) String() string {
if t == nil {
return ""
}
s := t.toSpan()
var bb bytes.Buffer
s.writePlaintextWithIndent(&bb, 0)
return bb.String()
}
// ToJSON returns JSON representation of t.
//
// ToJSON must be called when t methods aren't called by other goroutines.
func (t *Tracer) ToJSON() string {
if t == nil {
return ""
}
s := t.toSpan()
data, err := json.Marshal(s)
if err != nil {
panic(fmt.Errorf("BUG: unexpected error from json.Marshal: %w", err))
}
return string(data)
}
func (t *Tracer) toSpan() *span {
s, _ := t.toSpanInternal(time.Now())
return s
}
func (t *Tracer) toSpanInternal(prevTime time.Time) (*span, time.Time) {
if t.span != nil {
return t.span, prevTime
}
if t.doneTime == t.startTime {
// a single-line trace
d := t.startTime.Sub(prevTime)
s := &span{
DurationMsec: float64(d.Microseconds()) / 1000,
Message: t.message,
}
return s, t.doneTime
}
// tracer with children
msg := t.message
doneTime := t.doneTime
if msg == "" {
msg = "missing Tracer.Donef() call"
doneTime = t.getLastChildDoneTime(t.startTime)
}
d := doneTime.Sub(t.startTime)
var children []*span
var sChild *span
prevChildTime := t.startTime
for _, child := range t.children {
sChild, prevChildTime = child.toSpanInternal(prevChildTime)
children = append(children, sChild)
}
s := &span{
DurationMsec: float64(d.Microseconds()) / 1000,
Message: msg,
Children: children,
}
return s, doneTime
}
func (t *Tracer) getLastChildDoneTime(defaultTime time.Time) time.Time {
if len(t.children) == 0 {
return defaultTime
}
lastChild := t.children[len(t.children)-1]
return lastChild.getLastChildDoneTime(lastChild.startTime)
}
// span represents a single trace span
type span struct {
// DurationMsec is the duration for the current trace span in microseconds.
DurationMsec float64 `json:"duration_msec"`
// Message is a trace message
Message string `json:"message"`
// Children contains children spans
Children []*span `json:"children,omitempty"`
}
func (s *span) writePlaintextWithIndent(w io.Writer, indent int) {
prefix := ""
for i := 0; i < indent; i++ {
prefix += "| "
}
prefix += "- "
msg := s.messageWithPrefix(prefix)
fmt.Fprintf(w, "%s%.03fms: %s\n", prefix, s.DurationMsec, msg)
childIndent := indent + 1
for _, sChild := range s.Children {
sChild.writePlaintextWithIndent(w, childIndent)
}
}
func (s *span) messageWithPrefix(prefix string) string {
prefix = strings.Replace(prefix, "-", "|", 1)
lines := strings.Split(s.Message, "\n")
result := lines[:1]
for i := range lines[1:] {
ln := lines[i+1]
if ln == "" {
continue
}
ln = prefix + ln
result = append(result, ln)
}
return strings.Join(result, "\n")
}

View file

@ -0,0 +1,209 @@
package querytracer
import (
"regexp"
"testing"
)
func TestTracerDisabled(t *testing.T) {
qt := New(false)
if qt.Enabled() {
t.Fatalf("query tracer must be disabled")
}
qtChild := qt.NewChild()
if qtChild.Enabled() {
t.Fatalf("query tracer must be disabled")
}
qtChild.Printf("foo %d", 123)
qtChild.Donef("child done %d", 456)
qt.Printf("parent %d", 789)
if err := qt.AddJSON([]byte("foobar")); err != nil {
t.Fatalf("unexpected error in AddJSON: %s", err)
}
qt.Donef("test")
s := qt.String()
if s != "" {
t.Fatalf("unexpected trace; got %s; want empty", s)
}
s = qt.ToJSON()
if s != "" {
t.Fatalf("unexpected json trace; got %s; want empty", s)
}
}
func TestTracerEnabled(t *testing.T) {
qt := New(true)
if !qt.Enabled() {
t.Fatalf("query tracer must be enabled")
}
qtChild := qt.NewChild()
if !qtChild.Enabled() {
t.Fatalf("child query tracer must be enabled")
}
qtChild.Printf("foo %d", 123)
qtChild.Donef("child done %d", 456)
qt.Printf("parent %d", 789)
qt.Donef("test")
s := qt.String()
sExpected := `- 0ms: test
| - 0ms: child done 456
| | - 0ms: foo 123
| - 0ms: parent 789
`
if !areEqualTracesSkipDuration(s, sExpected) {
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
}
}
func TestTracerMultiline(t *testing.T) {
qt := New(true)
qt.Printf("line3\nline4\n")
qt.Donef("line1\nline2")
s := qt.String()
sExpected := `- 0ms: line1
| line2
| - 0ms: line3
| | line4
`
if !areEqualTracesSkipDuration(s, sExpected) {
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
}
}
func TestTracerToJSON(t *testing.T) {
qt := New(true)
if !qt.Enabled() {
t.Fatalf("query tracer must be enabled")
}
qtChild := qt.NewChild()
if !qtChild.Enabled() {
t.Fatalf("child query tracer must be enabled")
}
qtChild.Printf("foo %d", 123)
qtChild.Donef("child done %d", 456)
qt.Printf("parent %d", 789)
qt.Donef("test")
s := qt.ToJSON()
sExpected := `{"duration_msec":0,"message":"test","children":[` +
`{"duration_msec":0,"message":"child done 456","children":[` +
`{"duration_msec":0,"message":"foo 123"}]},` +
`{"duration_msec":0,"message":"parent 789"}]}`
if !areEqualJSONTracesSkipDuration(s, sExpected) {
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
}
}
func TestTraceAddJSON(t *testing.T) {
qtChild := New(true)
qtChild.Printf("foo")
qtChild.Donef("child")
jsonTrace := qtChild.ToJSON()
qt := New(true)
qt.Printf("first_line")
if err := qt.AddJSON([]byte(jsonTrace)); err != nil {
t.Fatalf("unexpected error in AddJSON: %s", err)
}
qt.Printf("last_line")
if err := qt.AddJSON(nil); err != nil {
t.Fatalf("unexpected error in AddJSON(nil): %s", err)
}
qt.Donef("parent")
s := qt.String()
sExpected := `- 0ms: parent
| - 0ms: first_line
| - 0ms: child
| | - 0ms: foo
| - 0ms: last_line
`
if !areEqualTracesSkipDuration(s, sExpected) {
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
}
jsonS := qt.ToJSON()
jsonSExpected := `{"duration_msec":0,"message":"parent","children":[` +
`{"duration_msec":0,"message":"first_line"},` +
`{"duration_msec":0,"message":"child","children":[` +
`{"duration_msec":0,"message":"foo"}]},` +
`{"duration_msec":0,"message":"last_line"}]}`
if !areEqualJSONTracesSkipDuration(jsonS, jsonSExpected) {
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", jsonS, jsonSExpected)
}
}
func TestTraceMissingDonef(t *testing.T) {
qt := New(true)
qt.Printf("parent printf")
qtChild := qt.NewChild()
qtChild.Printf("child printf")
qt.Printf("another parent printf")
s := qt.String()
sExpected := `- 0ms: missing Tracer.Donef() call
| - 0ms: parent printf
| - 0ms: missing Tracer.Donef() call
| | - 0ms: child printf
| - 0ms: another parent printf
`
if !areEqualTracesSkipDuration(s, sExpected) {
t.Fatalf("unexpected trace\ngot\n%s\nwant\n%s", s, sExpected)
}
}
func TestZeroDurationInTrace(t *testing.T) {
s := `- 123ms: missing Tracer.Donef() call
| - 0ms: parent printf
| - 54ms: missing Tracer.Donef() call
| | - 45ms: child printf
| - 0ms: another parent printf
`
result := zeroDurationsInTrace(s)
resultExpected := `- 0ms: missing Tracer.Donef() call
| - 0ms: parent printf
| - 0ms: missing Tracer.Donef() call
| | - 0ms: child printf
| - 0ms: another parent printf
`
if result != resultExpected {
t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected)
}
}
func TestZeroJSONDurationInTrace(t *testing.T) {
s := `{"duration_msec":123,"message":"parent","children":[` +
`{"duration_msec":0,"message":"first_line"},` +
`{"duration_msec":434,"message":"child","children":[` +
`{"duration_msec":343,"message":"foo"}]},` +
`{"duration_msec":0,"message":"last_line"}]}`
result := zeroJSONDurationsInTrace(s)
resultExpected := `{"duration_msec":0,"message":"parent","children":[` +
`{"duration_msec":0,"message":"first_line"},` +
`{"duration_msec":0,"message":"child","children":[` +
`{"duration_msec":0,"message":"foo"}]},` +
`{"duration_msec":0,"message":"last_line"}]}`
if result != resultExpected {
t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected)
}
}
func areEqualTracesSkipDuration(s1, s2 string) bool {
s1 = zeroDurationsInTrace(s1)
s2 = zeroDurationsInTrace(s2)
return s1 == s2
}
func zeroDurationsInTrace(s string) string {
return skipDurationRe.ReplaceAllString(s, " 0ms: ")
}
var skipDurationRe = regexp.MustCompile(" [0-9.]+ms: ")
func areEqualJSONTracesSkipDuration(s1, s2 string) bool {
s1 = zeroJSONDurationsInTrace(s1)
s2 = zeroJSONDurationsInTrace(s2)
return s1 == s2
}
func zeroJSONDurationsInTrace(s string) string {
return skipJSONDurationRe.ReplaceAllString(s, `"duration_msec":0`)
}
var skipJSONDurationRe = regexp.MustCompile(`"duration_msec":[0-9.]+`)

View file

@ -21,6 +21,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
"github.com/VictoriaMetrics/fastcache"
@ -1645,7 +1646,7 @@ func (db *indexDB) DeleteTSIDs(tfss []*TagFilters) (int, error) {
MaxTimestamp: (1 << 63) - 1,
}
is := db.getIndexSearch(tfss[0].accountID, tfss[0].projectID, noDeadline)
metricIDs, err := is.searchMetricIDs(tfss, tr, 2e9)
metricIDs, err := is.searchMetricIDs(nil, tfss, tr, 2e9)
db.putIndexSearch(is)
if err != nil {
return 0, err
@ -1736,7 +1737,7 @@ func (is *indexSearch) loadDeletedMetricIDs() (*uint64set.Set, error) {
}
// searchTSIDs returns sorted tsids matching the given tfss over the given tr.
func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
func (db *indexDB) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
if len(tfss) == 0 {
return nil, nil
}
@ -1750,7 +1751,8 @@ func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
tfKeyBuf.B = marshalTagFiltersKey(tfKeyBuf.B[:0], tfss, tr, true)
tsids, ok := db.getFromTagFiltersCache(tfKeyBuf.B)
if ok {
// Fast path - tsids found in the cache.
// Fast path - tsids found in the cache
qt.Printf("found %d matching series ids in the cache; they occupy %d bytes of memory", len(tsids), memorySizeForTSIDs(tsids))
return tsids, nil
}
@ -1758,7 +1760,7 @@ func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
accountID := tfss[0].accountID
projectID := tfss[0].projectID
is := db.getIndexSearch(accountID, projectID, deadline)
localTSIDs, err := is.searchTSIDs(tfss, tr, maxMetrics)
localTSIDs, err := is.searchTSIDs(qt, tfss, tr, maxMetrics)
db.putIndexSearch(is)
if err != nil {
return nil, err
@ -1777,7 +1779,7 @@ func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
return
}
is := extDB.getIndexSearch(accountID, projectID, deadline)
extTSIDs, err = is.searchTSIDs(tfss, tr, maxMetrics)
extTSIDs, err = is.searchTSIDs(qt, tfss, tr, maxMetrics)
extDB.putIndexSearch(is)
sort.Slice(extTSIDs, func(i, j int) bool { return extTSIDs[i].Less(&extTSIDs[j]) })
@ -1794,13 +1796,19 @@ func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
// Sort the found tsids, since they must be passed to TSID search
// in the sorted order.
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })
qt.Printf("sort the found %d series ids", len(tsids))
// Store TSIDs in the cache.
db.putToTagFiltersCache(tsids, tfKeyBuf.B)
qt.Printf("store the found %d series ids in cache; they occupy %d bytes of memory", len(tsids), memorySizeForTSIDs(tsids))
return tsids, err
}
func memorySizeForTSIDs(tsids []TSID) int {
return len(tsids) * int(unsafe.Sizeof(TSID{}))
}
var tagFiltersKeyBufPool bytesutil.ByteBufferPool
func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error {
@ -1925,7 +1933,7 @@ func (is *indexSearch) containsTimeRange(tr TimeRange) (bool, error) {
return true, nil
}
func (is *indexSearch) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]TSID, error) {
func (is *indexSearch) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]TSID, error) {
ok, err := is.containsTimeRange(tr)
if err != nil {
return nil, err
@ -1934,7 +1942,7 @@ func (is *indexSearch) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics
// Fast path - the index doesn't contain data for the given tr.
return nil, nil
}
metricIDs, err := is.searchMetricIDs(tfss, tr, maxMetrics)
metricIDs, err := is.searchMetricIDs(qt, tfss, tr, maxMetrics)
if err != nil {
return nil, err
}
@ -1979,6 +1987,7 @@ func (is *indexSearch) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics
i++
}
tsids = tsids[:i]
qt.Printf("load %d series ids from %d metric ids", len(tsids), len(metricIDs))
// Do not sort the found tsids, since they will be sorted later.
return tsids, nil
@ -2211,17 +2220,19 @@ func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer)
return true, nil
}
func (is *indexSearch) searchMetricIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]uint64, error) {
func (is *indexSearch) searchMetricIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]uint64, error) {
metricIDs, err := is.searchMetricIDsInternal(tfss, tr, maxMetrics)
if err != nil {
return nil, err
}
qt.Printf("found %d matching metric ids", metricIDs.Len())
if metricIDs.Len() == 0 {
// Nothing found
return nil, nil
}
sortedMetricIDs := metricIDs.AppendTo(nil)
qt.Printf("sort %d matching metric ids", len(sortedMetricIDs))
// Filter out deleted metricIDs.
dmis := is.db.s.getDeletedMetricIDs()
@ -2232,6 +2243,7 @@ func (is *indexSearch) searchMetricIDs(tfss []*TagFilters, tr TimeRange, maxMetr
metricIDsFiltered = append(metricIDsFiltered, metricID)
}
}
qt.Printf("%d metric ids after removing deleted metric ids", len(metricIDsFiltered))
sortedMetricIDs = metricIDsFiltered
}

View file

@ -874,7 +874,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add(nil, nil, true, false); err != nil {
return fmt.Errorf("cannot add no-op negative filter: %w", err)
}
tsidsFound, err := db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err := db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search by exact tag filter: %w", err)
}
@ -883,7 +883,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
}
// Verify tag cache.
tsidsCached, err := db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsCached, err := db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search by exact tag filter: %w", err)
}
@ -895,7 +895,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add(nil, mn.MetricGroup, true, false); err != nil {
return fmt.Errorf("cannot add negative filter for zeroing search results: %w", err)
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search by exact tag filter with full negative: %w", err)
}
@ -913,7 +913,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add(nil, []byte(re), false, true); err != nil {
return fmt.Errorf("cannot create regexp tag filter for Graphite wildcard")
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search by regexp tag filter for Graphite wildcard: %w", err)
}
@ -930,7 +930,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add([]byte("non-existent-tag"), []byte("foo|"), false, true); err != nil {
return fmt.Errorf("cannot create regexp tag filter for non-existing tag: %w", err)
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search with a filter matching empty tag: %w", err)
}
@ -950,7 +950,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add([]byte("non-existent-tag2"), []byte("bar|"), false, true); err != nil {
return fmt.Errorf("cannot create regexp tag filter for non-existing tag2: %w", err)
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search with multipel filters matching empty tags: %w", err)
}
@ -978,7 +978,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add(nil, nil, true, true); err != nil {
return fmt.Errorf("cannot add no-op negative filter with regexp: %w", err)
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search by regexp tag filter: %w", err)
}
@ -988,7 +988,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add(nil, mn.MetricGroup, true, true); err != nil {
return fmt.Errorf("cannot add negative filter for zeroing search results: %w", err)
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search by regexp tag filter with full negative: %w", err)
}
@ -1004,7 +1004,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add(nil, mn.MetricGroup, false, true); err != nil {
return fmt.Errorf("cannot create tag filter for MetricGroup matching zero results: %w", err)
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search by non-existing tag filter: %w", err)
}
@ -1020,7 +1020,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
// Search with empty filter. It should match all the results for (accountID, projectID).
tfs.Reset(mn.AccountID, mn.ProjectID)
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search for common prefix: %w", err)
}
@ -1033,7 +1033,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs.Add(nil, nil, false, false); err != nil {
return fmt.Errorf("cannot create tag filter for empty metricGroup: %w", err)
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search for empty metricGroup: %w", err)
}
@ -1050,7 +1050,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
if err := tfs2.Add(nil, mn.MetricGroup, false, false); err != nil {
return fmt.Errorf("cannot create tag filter for MetricGroup: %w", err)
}
tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs1, tfs2}, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, []*TagFilters{tfs1, tfs2}, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search for empty metricGroup: %w", err)
}
@ -1059,7 +1059,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
}
// Verify empty tfss
tsidsFound, err = db.searchTSIDs(nil, tr, 1e5, noDeadline)
tsidsFound, err = db.searchTSIDs(nil, nil, tr, 1e5, noDeadline)
if err != nil {
return fmt.Errorf("cannot search for nil tfss: %w", err)
}
@ -1823,7 +1823,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
MinTimestamp: int64(now - 2*msecPerHour - 1),
MaxTimestamp: int64(now),
}
matchedTSIDs, err := db.searchTSIDs([]*TagFilters{tfs}, tr, 10000, noDeadline)
matchedTSIDs, err := db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 10000, noDeadline)
if err != nil {
t.Fatalf("error searching tsids: %v", err)
}
@ -1837,7 +1837,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
MaxTimestamp: int64(now),
}
matchedTSIDs, err = db.searchTSIDs([]*TagFilters{tfs}, tr, 10000, noDeadline)
matchedTSIDs, err = db.searchTSIDs(nil, []*TagFilters{tfs}, tr, 10000, noDeadline)
if err != nil {
t.Fatalf("error searching tsids: %v", err)
}

View file

@ -165,7 +165,7 @@ func BenchmarkHeadPostingForMatchers(b *testing.B) {
MaxTimestamp: timestampFromTime(time.Now()),
}
for i := 0; i < b.N; i++ {
metricIDs, err := is.searchMetricIDs(tfss, tr, 2e9)
metricIDs, err := is.searchMetricIDs(nil, tfss, tr, 2e9)
if err != nil {
b.Fatalf("unexpected error in searchMetricIDs: %s", err)
}

View file

@ -3,11 +3,13 @@ package storage
import (
"fmt"
"io"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
)
@ -168,7 +170,9 @@ func (s *Search) reset() {
// MustClose must be called when the search is done.
//
// Init returns the upper bound on the number of found time series.
func (s *Search) Init(storage *Storage, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) int {
func (s *Search) Init(qt *querytracer.Tracer, storage *Storage, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) int {
qt = qt.NewChild()
defer qt.Donef("init series search: filters=%s, timeRange=%s", tfss, &tr)
if s.needClosing {
logger.Panicf("BUG: missing MustClose call before the next call to Init")
}
@ -179,15 +183,15 @@ func (s *Search) Init(storage *Storage, tfss []*TagFilters, tr TimeRange, maxMet
s.deadline = deadline
s.needClosing = true
tsids, err := storage.searchTSIDs(tfss, tr, maxMetrics, deadline)
tsids, err := storage.searchTSIDs(qt, tfss, tr, maxMetrics, deadline)
if err == nil {
err = storage.prefetchMetricNames(tsids, deadline)
err = storage.prefetchMetricNames(qt, tsids, deadline)
}
// It is ok to call Init on error from storage.searchTSIDs.
// Init must be called before returning because it will fail
// on Seach.MustClose otherwise.
s.ts.Init(storage.tb, tsids, tr)
qt.Printf("search for parts with data for %d series", len(tsids))
if err != nil {
s.err = err
return 0
@ -295,9 +299,24 @@ type TagFilter struct {
// String returns string representation of tf.
func (tf *TagFilter) String() string {
var bb bytesutil.ByteBuffer
fmt.Fprintf(&bb, "{Key=%q, Value=%q, IsNegative: %v, IsRegexp: %v}", tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp)
return string(bb.B)
op := tf.getOp()
if len(tf.Key) == 0 {
return fmt.Sprintf("__name__%s%q", op, tf.Value)
}
return fmt.Sprintf("%s%s%q", tf.Key, op, tf.Value)
}
func (tf *TagFilter) getOp() string {
if tf.IsNegative {
if tf.IsRegexp {
return "!~"
}
return "!="
}
if tf.IsRegexp {
return "=~"
}
return "="
}
// Marshal appends marshaled tf to dst and returns the result.
@ -360,17 +379,19 @@ func (tf *TagFilter) Unmarshal(src []byte) ([]byte, error) {
// String returns string representation of the search query.
func (sq *SearchQuery) String() string {
var bb bytesutil.ByteBuffer
fmt.Fprintf(&bb, "AccountID=%d, ProjectID=%d, MinTimestamp=%s, MaxTimestamp=%s, TagFilters=[\n",
sq.AccountID, sq.ProjectID, timestampToTime(sq.MinTimestamp), timestampToTime(sq.MaxTimestamp))
for _, tagFilters := range sq.TagFilterss {
for _, tf := range tagFilters {
fmt.Fprintf(&bb, "%s", tf.String())
}
fmt.Fprintf(&bb, "\n")
a := make([]string, len(sq.TagFilterss))
for i, tfs := range sq.TagFilterss {
a[i] = tagFiltersToString(tfs)
}
fmt.Fprintf(&bb, "]")
return string(bb.B)
return fmt.Sprintf("accountID=%d, projectID=%d, filters=%s, timeRange=[%d..%d]", sq.AccountID, sq.ProjectID, a, sq.MinTimestamp, sq.MaxTimestamp)
}
func tagFiltersToString(tfs []TagFilter) string {
a := make([]string, len(tfs))
for i, tf := range tfs {
a[i] = tf.String()
}
return "{" + strings.Join(a, ",") + "}"
}
// Marshal appends marshaled sq to dst and returns the result.

View file

@ -209,7 +209,7 @@ func testSearchInternal(st *Storage, tr TimeRange, mrs []MetricRow, accountsCoun
}
// Search
s.Init(st, []*TagFilters{tfs}, tr, 1e5, noDeadline)
s.Init(nil, st, []*TagFilters{tfs}, tr, 1e5, noDeadline)
var mbs []metricBlock
for s.NextMetricBlock() {
var b Block

View file

@ -24,6 +24,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
@ -1122,15 +1123,17 @@ func nextRetentionDuration(retentionMsecs int64) time.Duration {
}
// SearchMetricNames returns metric names matching the given tfss on the given tr.
func (s *Storage) SearchMetricNames(tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]MetricName, error) {
tsids, err := s.searchTSIDs(tfss, tr, maxMetrics, deadline)
func (s *Storage) SearchMetricNames(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]MetricName, error) {
qt = qt.NewChild()
defer qt.Donef("search for matching metric names")
tsids, err := s.searchTSIDs(qt, tfss, tr, maxMetrics, deadline)
if err != nil {
return nil, err
}
if len(tsids) == 0 {
return nil, nil
}
if err = s.prefetchMetricNames(tsids, deadline); err != nil {
if err = s.prefetchMetricNames(qt, tsids, deadline); err != nil {
return nil, err
}
accountID := tsids[0].AccountID
@ -1165,7 +1168,9 @@ func (s *Storage) SearchMetricNames(tfss []*TagFilters, tr TimeRange, maxMetrics
}
// searchTSIDs returns sorted TSIDs for the given tfss and the given tr.
func (s *Storage) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
func (s *Storage) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
qt = qt.NewChild()
defer qt.Donef("search for matching series ids")
// Do not cache tfss -> tsids here, since the caching is performed
// on idb level.
@ -1186,6 +1191,7 @@ func (s *Storage) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
t := timerpool.Get(timeout)
select {
case searchTSIDsConcurrencyCh <- struct{}{}:
qt.Printf("wait in the queue because %d concurrent search requests are already performed", cap(searchTSIDsConcurrencyCh))
timerpool.Put(t)
case <-t.C:
timerpool.Put(t)
@ -1194,7 +1200,7 @@ func (s *Storage) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
cap(searchTSIDsConcurrencyCh), timeout.Seconds())
}
}
tsids, err := s.idb().searchTSIDs(tfss, tr, maxMetrics, deadline)
tsids, err := s.idb().searchTSIDs(qt, tfss, tr, maxMetrics, deadline)
<-searchTSIDsConcurrencyCh
if err != nil {
return nil, fmt.Errorf("error when searching tsids: %w", err)
@ -1214,8 +1220,11 @@ var (
// It is expected that all the tsdis have the same (accountID, projectID)
//
// This should speed-up further searchMetricNameWithCache calls for metricIDs from tsids.
func (s *Storage) prefetchMetricNames(tsids []TSID, deadline uint64) error {
func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, tsids []TSID, deadline uint64) error {
qt = qt.NewChild()
defer qt.Donef("prefetch metric names for %d series ids", len(tsids))
if len(tsids) == 0 {
qt.Printf("nothing to prefetch")
return nil
}
accountID := tsids[0].AccountID
@ -1233,8 +1242,10 @@ func (s *Storage) prefetchMetricNames(tsids []TSID, deadline uint64) error {
}
metricIDs = append(metricIDs, metricID)
}
qt.Printf("%d out of %d metric names must be pre-fetched", len(metricIDs), len(tsids))
if len(metricIDs) < 500 {
// It is cheaper to skip pre-fetching and obtain metricNames inline.
qt.Printf("skip pre-fetching metric names for low number of metrid ids=%d", len(metricIDs))
return nil
}
atomic.AddUint64(&s.slowMetricNameLoads, uint64(len(metricIDs)))
@ -1281,6 +1292,7 @@ func (s *Storage) prefetchMetricNames(tsids []TSID, deadline uint64) error {
if err != nil {
return err
}
qt.Printf("pre-fetch metric names for %d metric ids", len(metricIDs))
// Store the pre-fetched metricIDs, so they aren't pre-fetched next time.
s.prefetchedMetricIDsLock.Lock()
@ -1299,6 +1311,7 @@ func (s *Storage) prefetchMetricNames(tsids []TSID, deadline uint64) error {
}
s.prefetchedMetricIDs.Store(prefetchedMetricIDsNew)
s.prefetchedMetricIDsLock.Unlock()
qt.Printf("cache metric ids for pre-fetched metric names")
return nil
}

View file

@ -675,7 +675,7 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error {
metricBlocksCount := func(tfs *TagFilters) int {
// Verify the number of blocks
n := 0
sr.Init(s, []*TagFilters{tfs}, tr, 1e5, noDeadline)
sr.Init(nil, s, []*TagFilters{tfs}, tr, 1e5, noDeadline)
for sr.NextMetricBlock() {
n++
}
@ -934,7 +934,7 @@ func testStorageRegisterMetricNames(s *Storage) error {
if err := tfs.Add([]byte("add_id"), []byte("0"), false, false); err != nil {
return fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
}
mns, err := s.SearchMetricNames([]*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline)
mns, err := s.SearchMetricNames(nil, []*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline)
if err != nil {
return fmt.Errorf("error in SearchMetricNames: %w", err)
}
@ -957,7 +957,7 @@ func testStorageRegisterMetricNames(s *Storage) error {
if err := tfs.Add([]byte("add_id"), []byte("0"), false, false); err != nil {
return fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
}
mns, err = s.SearchMetricNames([]*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline)
mns, err = s.SearchMetricNames(nil, []*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline)
if err != nil {
return fmt.Errorf("error in SearchMetricNames for incorrect accountID, projectID: %w", err)
}

View file

@ -26,9 +26,7 @@ type TimeRange struct {
}
func (tr *TimeRange) String() string {
minTime := timestampToTime(tr.MinTimestamp)
maxTime := timestampToTime(tr.MaxTimestamp)
return fmt.Sprintf("[%s - %s]", minTime, maxTime)
return fmt.Sprintf("[%d..%d]", tr.MinTimestamp, tr.MaxTimestamp)
}
// timestampToPartitionName returns partition name for the given timestamp.