all: improve query tracing coverage for indexdb search

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1403
This commit is contained in:
Aliaksandr Valialkin 2022-06-09 19:46:26 +03:00
parent ad44eadd94
commit cb39eada77
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
11 changed files with 255 additions and 153 deletions

View file

@ -378,7 +378,7 @@ func selectHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
case "prometheus/api/v1/status/tsdb": case "prometheus/api/v1/status/tsdb":
statusTSDBRequests.Inc() statusTSDBRequests.Inc()
httpserver.EnableCORS(w, r) httpserver.EnableCORS(w, r)
if err := prometheus.TSDBStatusHandler(startTime, at, w, r); err != nil { if err := prometheus.TSDBStatusHandler(qt, startTime, at, w, r); err != nil {
statusTSDBErrors.Inc() statusTSDBErrors.Inc()
sendPrometheusError(w, r, err) sendPrometheusError(w, r, err)
return true return true

View file

@ -690,7 +690,7 @@ const secsPerDay = 3600 * 24
// See https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats // See https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
// //
// It can accept `match[]` filters in order to narrow down the search. // It can accept `match[]` filters in order to narrow down the search.
func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error { func TSDBStatusHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
defer tsdbStatusDuration.UpdateDuration(startTime) defer tsdbStatusDuration.UpdateDuration(startTime)
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime) deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
@ -728,12 +728,12 @@ func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
var status *storage.TSDBStatus var status *storage.TSDBStatus
var isPartial bool var isPartial bool
if len(matches) == 0 && len(etfs) == 0 { if len(matches) == 0 && len(etfs) == 0 {
status, isPartial, err = netstorage.GetTSDBStatusForDate(nil, at, denyPartialResponse, deadline, date, topN, *maxTSDBStatusSeries) status, isPartial, err = netstorage.GetTSDBStatusForDate(qt, at, denyPartialResponse, deadline, date, topN, *maxTSDBStatusSeries)
if err != nil { if err != nil {
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err) return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
} }
} else { } else {
status, isPartial, err = tsdbStatusWithMatches(at, denyPartialResponse, matches, etfs, date, topN, *maxTSDBStatusSeries, deadline) status, isPartial, err = tsdbStatusWithMatches(qt, at, denyPartialResponse, matches, etfs, date, topN, *maxTSDBStatusSeries, deadline)
if err != nil { if err != nil {
return fmt.Errorf("cannot obtain tsdb status with matches for date=%d, topN=%d: %w", date, topN, err) return fmt.Errorf("cannot obtain tsdb status with matches for date=%d, topN=%d: %w", date, topN, err)
} }
@ -742,14 +742,14 @@ func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
bw := bufferedwriter.Get(w) bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw) defer bufferedwriter.Put(bw)
WriteTSDBStatusResponse(bw, isPartial, status) WriteTSDBStatusResponse(bw, isPartial, status, qt)
if err := bw.Flush(); err != nil { if err := bw.Flush(); err != nil {
return fmt.Errorf("cannot send tsdb status response to remote client: %w", err) return fmt.Errorf("cannot send tsdb status response to remote client: %w", err)
} }
return nil return nil
} }
func tsdbStatusWithMatches(at *auth.Token, denyPartialResponse bool, matches []string, etfs [][]storage.TagFilter, date uint64, topN, maxMetrics int, deadline searchutils.Deadline) (*storage.TSDBStatus, bool, error) { func tsdbStatusWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, matches []string, etfs [][]storage.TagFilter, date uint64, topN, maxMetrics int, deadline searchutils.Deadline) (*storage.TSDBStatus, bool, error) {
tagFilterss, err := getTagFilterssFromMatches(matches) tagFilterss, err := getTagFilterssFromMatches(matches)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
@ -761,7 +761,7 @@ func tsdbStatusWithMatches(at *auth.Token, denyPartialResponse bool, matches []s
start := int64(date*secsPerDay) * 1000 start := int64(date*secsPerDay) * 1000
end := int64(date*secsPerDay+secsPerDay) * 1000 end := int64(date*secsPerDay+secsPerDay) * 1000
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, start, end, tagFilterss, maxMetrics) sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, start, end, tagFilterss, maxMetrics)
status, isPartial, err := netstorage.GetTSDBStatusWithFilters(nil, at, denyPartialResponse, deadline, sq, topN) status, isPartial, err := netstorage.GetTSDBStatusWithFilters(qt, at, denyPartialResponse, deadline, sq, topN)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }

View file

@ -1,8 +1,11 @@
{% import "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" %} {% import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
) %}
{% stripspace %} {% stripspace %}
TSDBStatusResponse generates response for /api/v1/status/tsdb . TSDBStatusResponse generates response for /api/v1/status/tsdb .
{% func TSDBStatusResponse(isPartial bool, status *storage.TSDBStatus) %} {% func TSDBStatusResponse(isPartial bool, status *storage.TSDBStatus, qt *querytracer.Tracer) %}
{ {
"status":"success", "status":"success",
"isPartial":{% if isPartial %}true{% else %}false{% endif %}, "isPartial":{% if isPartial %}true{% else %}false{% endif %},
@ -13,6 +16,8 @@ TSDBStatusResponse generates response for /api/v1/status/tsdb .
"seriesCountByLabelValuePair":{%= tsdbStatusEntries(status.SeriesCountByLabelValuePair) %}, "seriesCountByLabelValuePair":{%= tsdbStatusEntries(status.SeriesCountByLabelValuePair) %},
"labelValueCountByLabelName":{%= tsdbStatusEntries(status.LabelValueCountByLabelName) %} "labelValueCountByLabelName":{%= tsdbStatusEntries(status.LabelValueCountByLabelName) %}
} }
{% code qt.Done() %}
{%= dumpQueryTrace(qt) %}
} }
{% endfunc %} {% endfunc %}

View file

@ -5,139 +5,149 @@
package prometheus package prometheus
//line app/vmselect/prometheus/tsdb_status_response.qtpl:1 //line app/vmselect/prometheus/tsdb_status_response.qtpl:1
import "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
)
// TSDBStatusResponse generates response for /api/v1/status/tsdb . // TSDBStatusResponse generates response for /api/v1/status/tsdb .
//line app/vmselect/prometheus/tsdb_status_response.qtpl:5 //line app/vmselect/prometheus/tsdb_status_response.qtpl:8
import ( import (
qtio422016 "io" qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate" qt422016 "github.com/valyala/quicktemplate"
) )
//line app/vmselect/prometheus/tsdb_status_response.qtpl:5 //line app/vmselect/prometheus/tsdb_status_response.qtpl:8
var ( var (
_ = qtio422016.Copy _ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer _ = qt422016.AcquireByteBuffer
) )
//line app/vmselect/prometheus/tsdb_status_response.qtpl:5 //line app/vmselect/prometheus/tsdb_status_response.qtpl:8
func StreamTSDBStatusResponse(qw422016 *qt422016.Writer, isPartial bool, status *storage.TSDBStatus) { func StreamTSDBStatusResponse(qw422016 *qt422016.Writer, isPartial bool, status *storage.TSDBStatus, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:5 //line app/vmselect/prometheus/tsdb_status_response.qtpl:8
qw422016.N().S(`{"status":"success","isPartial":`) qw422016.N().S(`{"status":"success","isPartial":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8 //line app/vmselect/prometheus/tsdb_status_response.qtpl:11
if isPartial { if isPartial {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8 //line app/vmselect/prometheus/tsdb_status_response.qtpl:11
qw422016.N().S(`true`) qw422016.N().S(`true`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8 //line app/vmselect/prometheus/tsdb_status_response.qtpl:11
} else { } else {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8 //line app/vmselect/prometheus/tsdb_status_response.qtpl:11
qw422016.N().S(`false`) qw422016.N().S(`false`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8 //line app/vmselect/prometheus/tsdb_status_response.qtpl:11
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8 //line app/vmselect/prometheus/tsdb_status_response.qtpl:11
qw422016.N().S(`,"data":{"totalSeries":`) qw422016.N().S(`,"data":{"totalSeries":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:10 //line app/vmselect/prometheus/tsdb_status_response.qtpl:13
qw422016.N().DUL(status.TotalSeries) qw422016.N().DUL(status.TotalSeries)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:10 //line app/vmselect/prometheus/tsdb_status_response.qtpl:13
qw422016.N().S(`,"totalLabelValuePairs":`) qw422016.N().S(`,"totalLabelValuePairs":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:11 //line app/vmselect/prometheus/tsdb_status_response.qtpl:14
qw422016.N().DUL(status.TotalLabelValuePairs) qw422016.N().DUL(status.TotalLabelValuePairs)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:11 //line app/vmselect/prometheus/tsdb_status_response.qtpl:14
qw422016.N().S(`,"seriesCountByMetricName":`) qw422016.N().S(`,"seriesCountByMetricName":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:12 //line app/vmselect/prometheus/tsdb_status_response.qtpl:15
streamtsdbStatusEntries(qw422016, status.SeriesCountByMetricName) streamtsdbStatusEntries(qw422016, status.SeriesCountByMetricName)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:12 //line app/vmselect/prometheus/tsdb_status_response.qtpl:15
qw422016.N().S(`,"seriesCountByLabelValuePair":`) qw422016.N().S(`,"seriesCountByLabelValuePair":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:13 //line app/vmselect/prometheus/tsdb_status_response.qtpl:16
streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelValuePair) streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelValuePair)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:13 //line app/vmselect/prometheus/tsdb_status_response.qtpl:16
qw422016.N().S(`,"labelValueCountByLabelName":`) qw422016.N().S(`,"labelValueCountByLabelName":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:14 //line app/vmselect/prometheus/tsdb_status_response.qtpl:17
streamtsdbStatusEntries(qw422016, status.LabelValueCountByLabelName) streamtsdbStatusEntries(qw422016, status.LabelValueCountByLabelName)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:14
qw422016.N().S(`}}`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17 //line app/vmselect/prometheus/tsdb_status_response.qtpl:17
}
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
func WriteTSDBStatusResponse(qq422016 qtio422016.Writer, isPartial bool, status *storage.TSDBStatus) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
StreamTSDBStatusResponse(qw422016, isPartial, status)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
}
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
func TSDBStatusResponse(isPartial bool, status *storage.TSDBStatus) string {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
WriteTSDBStatusResponse(qb422016, isPartial, status)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
return qs422016
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
}
//line app/vmselect/prometheus/tsdb_status_response.qtpl:19
func streamtsdbStatusEntries(qw422016 *qt422016.Writer, a []storage.TopHeapEntry) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:19
qw422016.N().S(`[`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:21
for i, e := range a {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:21
qw422016.N().S(`{"name":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
qw422016.N().Q(e.Name)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
qw422016.N().S(`,"value":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
qw422016.N().D(int(e.Count))
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
qw422016.N().S(`}`) qw422016.N().S(`}`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26 //line app/vmselect/prometheus/tsdb_status_response.qtpl:19
if i+1 < len(a) { qt.Done()
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26
qw422016.N().S(`,`) //line app/vmselect/prometheus/tsdb_status_response.qtpl:20
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26 streamdumpQueryTrace(qw422016, qt)
} //line app/vmselect/prometheus/tsdb_status_response.qtpl:20
//line app/vmselect/prometheus/tsdb_status_response.qtpl:27 qw422016.N().S(`}`)
} //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
//line app/vmselect/prometheus/tsdb_status_response.qtpl:27
qw422016.N().S(`]`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
func writetsdbStatusEntries(qq422016 qtio422016.Writer, a []storage.TopHeapEntry) { func WriteTSDBStatusResponse(qq422016 qtio422016.Writer, isPartial bool, status *storage.TSDBStatus, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
qw422016 := qt422016.AcquireWriter(qq422016) qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
streamtsdbStatusEntries(qw422016, a) StreamTSDBStatusResponse(qw422016, isPartial, status, qt)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
qt422016.ReleaseWriter(qw422016) qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
func tsdbStatusEntries(a []storage.TopHeapEntry) string { func TSDBStatusResponse(isPartial bool, status *storage.TSDBStatus, qt *querytracer.Tracer) string {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
qb422016 := qt422016.AcquireByteBuffer() qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
writetsdbStatusEntries(qb422016, a) WriteTSDBStatusResponse(qb422016, isPartial, status, qt)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
qs422016 := string(qb422016.B) qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
qt422016.ReleaseByteBuffer(qb422016) qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
return qs422016 return qs422016
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:22
}
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
func streamtsdbStatusEntries(qw422016 *qt422016.Writer, a []storage.TopHeapEntry) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
qw422016.N().S(`[`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26
for i, e := range a {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26
qw422016.N().S(`{"name":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:28
qw422016.N().Q(e.Name)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:28
qw422016.N().S(`,"value":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29
qw422016.N().D(int(e.Count))
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29
qw422016.N().S(`}`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:31
if i+1 < len(a) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:31
qw422016.N().S(`,`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:31
}
//line app/vmselect/prometheus/tsdb_status_response.qtpl:32
}
//line app/vmselect/prometheus/tsdb_status_response.qtpl:32
qw422016.N().S(`]`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
}
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
func writetsdbStatusEntries(qq422016 qtio422016.Writer, a []storage.TopHeapEntry) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
streamtsdbStatusEntries(qw422016, a)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
}
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
func tsdbStatusEntries(a []storage.TopHeapEntry) string {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
writetsdbStatusEntries(qb422016, a)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
return qs422016
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
} }

View file

@ -223,12 +223,16 @@ func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
if len(tmp) != 2 { if len(tmp) != 2 {
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match) return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
} }
if tmp[0] == "__name__" {
// This is required for storage.Search.
tmp[0] = ""
}
tagFilters = append(tagFilters, storage.TagFilter{ tagFilters = append(tagFilters, storage.TagFilter{
Key: []byte(tmp[0]), Key: []byte(tmp[0]),
Value: []byte(tmp[1]), Value: []byte(tmp[1]),
}) })
} }
extraFilters := r.Form["extra_filters"] extraFilters := append([]string{}, r.Form["extra_filters"]...)
extraFilters = append(extraFilters, r.Form["extra_filters[]"]...) extraFilters = append(extraFilters, r.Form["extra_filters[]"]...)
if len(extraFilters) == 0 { if len(extraFilters) == 0 {
if len(tagFilters) == 0 { if len(tagFilters) == 0 {

View file

@ -939,7 +939,7 @@ func (s *Server) processVMSelectTSDBStatus(ctx *vmselectRequestCtx) error {
} }
// Execute the request // Execute the request
status, err := s.storage.GetTSDBStatusWithFiltersForDate(accountID, projectID, nil, uint64(date), int(topN), maxMetrics, ctx.deadline) status, err := s.storage.GetTSDBStatusWithFiltersForDate(ctx.qt, accountID, projectID, nil, uint64(date), int(topN), maxMetrics, ctx.deadline)
if err != nil { if err != nil {
return ctx.writeErrorMessage(err) return ctx.writeErrorMessage(err)
} }
@ -994,7 +994,7 @@ func (s *Server) processVMSelectTSDBStatusWithFilters(ctx *vmselectRequestCtx) e
} }
maxMetrics := ctx.getMaxMetrics() maxMetrics := ctx.getMaxMetrics()
date := uint64(ctx.sq.MinTimestamp) / (24 * 3600 * 1000) date := uint64(ctx.sq.MinTimestamp) / (24 * 3600 * 1000)
status, err := s.storage.GetTSDBStatusWithFiltersForDate(ctx.sq.AccountID, ctx.sq.ProjectID, ctx.tfss, date, int(topN), maxMetrics, ctx.deadline) status, err := s.storage.GetTSDBStatusWithFiltersForDate(ctx.qt, ctx.sq.AccountID, ctx.sq.ProjectID, ctx.tfss, date, int(topN), maxMetrics, ctx.deadline)
if err != nil { if err != nil {
return ctx.writeErrorMessage(err) return ctx.writeErrorMessage(err)
} }

View file

@ -312,13 +312,17 @@ func (db *indexDB) decRef() {
logger.Infof("indexDB %q has been dropped", tbPath) logger.Infof("indexDB %q has been dropped", tbPath)
} }
func (db *indexDB) getFromTagFiltersCache(key []byte) ([]TSID, bool) { func (db *indexDB) getFromTagFiltersCache(qt *querytracer.Tracer, key []byte) ([]TSID, bool) {
qt = qt.NewChild("search for tsids in tag filters cache")
defer qt.Done()
compressedBuf := tagBufPool.Get() compressedBuf := tagBufPool.Get()
defer tagBufPool.Put(compressedBuf) defer tagBufPool.Put(compressedBuf)
compressedBuf.B = db.tagFiltersCache.GetBig(compressedBuf.B[:0], key) compressedBuf.B = db.tagFiltersCache.GetBig(compressedBuf.B[:0], key)
if len(compressedBuf.B) == 0 { if len(compressedBuf.B) == 0 {
qt.Printf("cache miss")
return nil, false return nil, false
} }
qt.Printf("found tsids with compressed size: %d bytes", len(compressedBuf.B))
buf := tagBufPool.Get() buf := tagBufPool.Get()
defer tagBufPool.Put(buf) defer tagBufPool.Put(buf)
var err error var err error
@ -326,22 +330,29 @@ func (db *indexDB) getFromTagFiltersCache(key []byte) ([]TSID, bool) {
if err != nil { if err != nil {
logger.Panicf("FATAL: cannot decompress tsids from tagFiltersCache: %s", err) logger.Panicf("FATAL: cannot decompress tsids from tagFiltersCache: %s", err)
} }
qt.Printf("decompressed tsids to %d bytes", len(buf.B))
tsids, err := unmarshalTSIDs(nil, buf.B) tsids, err := unmarshalTSIDs(nil, buf.B)
if err != nil { if err != nil {
logger.Panicf("FATAL: cannot unmarshal tsids from tagFiltersCache: %s", err) logger.Panicf("FATAL: cannot unmarshal tsids from tagFiltersCache: %s", err)
} }
qt.Printf("unmarshaled %d tsids", len(tsids))
return tsids, true return tsids, true
} }
var tagBufPool bytesutil.ByteBufferPool var tagBufPool bytesutil.ByteBufferPool
func (db *indexDB) putToTagFiltersCache(tsids []TSID, key []byte) { func (db *indexDB) putToTagFiltersCache(qt *querytracer.Tracer, tsids []TSID, key []byte) {
qt = qt.NewChild("put %d tsids in cache", len(tsids))
defer qt.Done()
buf := tagBufPool.Get() buf := tagBufPool.Get()
buf.B = marshalTSIDs(buf.B[:0], tsids) buf.B = marshalTSIDs(buf.B[:0], tsids)
qt.Printf("marshaled %d tsids into %d bytes", len(tsids), len(buf.B))
compressedBuf := tagBufPool.Get() compressedBuf := tagBufPool.Get()
compressedBuf.B = encoding.CompressZSTDLevel(compressedBuf.B[:0], buf.B, 1) compressedBuf.B = encoding.CompressZSTDLevel(compressedBuf.B[:0], buf.B, 1)
qt.Printf("compressed %d tsids into %d bytes", len(tsids), len(compressedBuf.B))
tagBufPool.Put(buf) tagBufPool.Put(buf)
db.tagFiltersCache.SetBig(key, compressedBuf.B) db.tagFiltersCache.SetBig(key, compressedBuf.B)
qt.Printf("store %d compressed tsids into cache", len(tsids))
tagBufPool.Put(compressedBuf) tagBufPool.Put(compressedBuf)
} }
@ -1356,9 +1367,11 @@ func (is *indexSearch) getSeriesCount() (uint64, error) {
} }
// GetTSDBStatusWithFiltersForDate returns topN entries for tsdb status for the given tfss, date, accountID and projectID. // GetTSDBStatusWithFiltersForDate returns topN entries for tsdb status for the given tfss, date, accountID and projectID.
func (db *indexDB) GetTSDBStatusWithFiltersForDate(accountID, projectID uint32, tfss []*TagFilters, date uint64, topN, maxMetrics int, deadline uint64) (*TSDBStatus, error) { func (db *indexDB) GetTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, accountID, projectID uint32, tfss []*TagFilters, date uint64, topN, maxMetrics int, deadline uint64) (*TSDBStatus, error) {
qtChild := qt.NewChild("collect tsdb stats in the current indexdb")
is := db.getIndexSearch(accountID, projectID, deadline) is := db.getIndexSearch(accountID, projectID, deadline)
status, err := is.getTSDBStatusWithFiltersForDate(tfss, date, topN, maxMetrics) status, err := is.getTSDBStatusWithFiltersForDate(qtChild, tfss, date, topN, maxMetrics)
qtChild.Done()
db.putIndexSearch(is) db.putIndexSearch(is)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1367,8 +1380,10 @@ func (db *indexDB) GetTSDBStatusWithFiltersForDate(accountID, projectID uint32,
return status, nil return status, nil
} }
ok := db.doExtDB(func(extDB *indexDB) { ok := db.doExtDB(func(extDB *indexDB) {
qtChild := qt.NewChild("collect tsdb stats in the previous indexdb")
is := extDB.getIndexSearch(accountID, projectID, deadline) is := extDB.getIndexSearch(accountID, projectID, deadline)
status, err = is.getTSDBStatusWithFiltersForDate(tfss, date, topN, maxMetrics) status, err = is.getTSDBStatusWithFiltersForDate(qtChild, tfss, date, topN, maxMetrics)
qtChild.Done()
extDB.putIndexSearch(is) extDB.putIndexSearch(is)
}) })
if ok && err != nil { if ok && err != nil {
@ -1378,14 +1393,14 @@ func (db *indexDB) GetTSDBStatusWithFiltersForDate(accountID, projectID uint32,
} }
// getTSDBStatusWithFiltersForDate returns topN entries for tsdb status for the given tfss and the given date. // getTSDBStatusWithFiltersForDate returns topN entries for tsdb status for the given tfss and the given date.
func (is *indexSearch) getTSDBStatusWithFiltersForDate(tfss []*TagFilters, date uint64, topN, maxMetrics int) (*TSDBStatus, error) { func (is *indexSearch) getTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, tfss []*TagFilters, date uint64, topN, maxMetrics int) (*TSDBStatus, error) {
var filter *uint64set.Set var filter *uint64set.Set
if len(tfss) > 0 { if len(tfss) > 0 {
tr := TimeRange{ tr := TimeRange{
MinTimestamp: int64(date) * msecPerDay, MinTimestamp: int64(date) * msecPerDay,
MaxTimestamp: int64(date+1)*msecPerDay - 1, MaxTimestamp: int64(date+1)*msecPerDay - 1,
} }
metricIDs, err := is.searchMetricIDsInternal(tfss, tr, maxMetrics) metricIDs, err := is.searchMetricIDsInternal(qt, tfss, tr, maxMetrics)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1771,14 +1786,16 @@ func (db *indexDB) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr Ti
tfss = convertToCompositeTagFilterss(tfss) tfss = convertToCompositeTagFilterss(tfss)
} }
qtChild := qt.NewChild("search for tsids in the current indexdb")
tfKeyBuf := tagFiltersKeyBufPool.Get() tfKeyBuf := tagFiltersKeyBufPool.Get()
defer tagFiltersKeyBufPool.Put(tfKeyBuf) defer tagFiltersKeyBufPool.Put(tfKeyBuf)
tfKeyBuf.B = marshalTagFiltersKey(tfKeyBuf.B[:0], tfss, tr, true) tfKeyBuf.B = marshalTagFiltersKey(tfKeyBuf.B[:0], tfss, tr, true)
tsids, ok := db.getFromTagFiltersCache(tfKeyBuf.B) tsids, ok := db.getFromTagFiltersCache(qtChild, tfKeyBuf.B)
if ok { if ok {
// Fast path - tsids found in the cache // Fast path - tsids found in the cache
qt.Printf("found %d matching series ids in the cache; they occupy %d bytes of memory", len(tsids), memorySizeForTSIDs(tsids)) qtChild.Done()
return tsids, nil return tsids, nil
} }
@ -1786,30 +1803,34 @@ func (db *indexDB) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr Ti
accountID := tfss[0].accountID accountID := tfss[0].accountID
projectID := tfss[0].projectID projectID := tfss[0].projectID
is := db.getIndexSearch(accountID, projectID, deadline) is := db.getIndexSearch(accountID, projectID, deadline)
localTSIDs, err := is.searchTSIDs(qt, tfss, tr, maxMetrics) localTSIDs, err := is.searchTSIDs(qtChild, tfss, tr, maxMetrics)
db.putIndexSearch(is) db.putIndexSearch(is)
if err != nil { if err != nil {
return nil, err return nil, err
} }
qtChild.Done()
var extTSIDs []TSID var extTSIDs []TSID
if db.doExtDB(func(extDB *indexDB) { if db.doExtDB(func(extDB *indexDB) {
qtChild := qt.NewChild("search for tsids in the previous indexdb")
defer qtChild.Done()
tfKeyExtBuf := tagFiltersKeyBufPool.Get() tfKeyExtBuf := tagFiltersKeyBufPool.Get()
defer tagFiltersKeyBufPool.Put(tfKeyExtBuf) defer tagFiltersKeyBufPool.Put(tfKeyExtBuf)
// Data in extDB cannot be changed, so use unversioned keys for tag cache. // Data in extDB cannot be changed, so use unversioned keys for tag cache.
tfKeyExtBuf.B = marshalTagFiltersKey(tfKeyExtBuf.B[:0], tfss, tr, false) tfKeyExtBuf.B = marshalTagFiltersKey(tfKeyExtBuf.B[:0], tfss, tr, false)
tsids, ok := extDB.getFromTagFiltersCache(tfKeyExtBuf.B) tsids, ok := extDB.getFromTagFiltersCache(qtChild, tfKeyExtBuf.B)
if ok { if ok {
extTSIDs = tsids extTSIDs = tsids
return return
} }
is := extDB.getIndexSearch(accountID, projectID, deadline) is := extDB.getIndexSearch(accountID, projectID, deadline)
extTSIDs, err = is.searchTSIDs(qt, tfss, tr, maxMetrics) extTSIDs, err = is.searchTSIDs(qtChild, tfss, tr, maxMetrics)
extDB.putIndexSearch(is) extDB.putIndexSearch(is)
sort.Slice(extTSIDs, func(i, j int) bool { return extTSIDs[i].Less(&extTSIDs[j]) }) sort.Slice(extTSIDs, func(i, j int) bool { return extTSIDs[i].Less(&extTSIDs[j]) })
extDB.putToTagFiltersCache(extTSIDs, tfKeyExtBuf.B) extDB.putToTagFiltersCache(qtChild, extTSIDs, tfKeyExtBuf.B)
}) { }) {
if err != nil { if err != nil {
return nil, err return nil, err
@ -1818,23 +1839,19 @@ func (db *indexDB) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr Ti
// Merge localTSIDs with extTSIDs. // Merge localTSIDs with extTSIDs.
tsids = mergeTSIDs(localTSIDs, extTSIDs) tsids = mergeTSIDs(localTSIDs, extTSIDs)
qt.Printf("merge %d tsids from the current indexdb with %d tsids from the previous indexdb; result: %d tsids", len(localTSIDs), len(extTSIDs), len(tsids))
// Sort the found tsids, since they must be passed to TSID search // Sort the found tsids, since they must be passed to TSID search
// in the sorted order. // in the sorted order.
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) }) sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })
qt.Printf("sort the found %d series ids", len(tsids)) qt.Printf("sort %d tsids", len(tsids))
// Store TSIDs in the cache. // Store TSIDs in the cache.
db.putToTagFiltersCache(tsids, tfKeyBuf.B) db.putToTagFiltersCache(qt, tsids, tfKeyBuf.B)
qt.Printf("store the found %d series ids in cache; they occupy %d bytes of memory", len(tsids), memorySizeForTSIDs(tsids))
return tsids, err return tsids, err
} }
func memorySizeForTSIDs(tsids []TSID) int {
return len(tsids) * int(unsafe.Sizeof(TSID{}))
}
var tagFiltersKeyBufPool bytesutil.ByteBufferPool var tagFiltersKeyBufPool bytesutil.ByteBufferPool
func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error { func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error {
@ -2013,7 +2030,7 @@ func (is *indexSearch) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, t
i++ i++
} }
tsids = tsids[:i] tsids = tsids[:i]
qt.Printf("load %d series ids from %d metric ids", len(tsids), len(metricIDs)) qt.Printf("load %d tsids from %d metric ids", len(tsids), len(metricIDs))
// Do not sort the found tsids, since they will be sorted later. // Do not sort the found tsids, since they will be sorted later.
return tsids, nil return tsids, nil
@ -2045,9 +2062,13 @@ func (is *indexSearch) getTSIDByMetricID(dst *TSID, metricID uint64) error {
// updateMetricIDsByMetricNameMatch matches metricName values for the given srcMetricIDs against tfs // updateMetricIDsByMetricNameMatch matches metricName values for the given srcMetricIDs against tfs
// and adds matching metrics to metricIDs. // and adds matching metrics to metricIDs.
func (is *indexSearch) updateMetricIDsByMetricNameMatch(metricIDs, srcMetricIDs *uint64set.Set, tfs []*tagFilter) error { func (is *indexSearch) updateMetricIDsByMetricNameMatch(qt *querytracer.Tracer, metricIDs, srcMetricIDs *uint64set.Set, tfs []*tagFilter) error {
qt = qt.NewChild("filter out %d metric ids with filters=%s", srcMetricIDs.Len(), tfs)
defer qt.Done()
// sort srcMetricIDs in order to speed up Seek below. // sort srcMetricIDs in order to speed up Seek below.
sortedMetricIDs := srcMetricIDs.AppendTo(nil) sortedMetricIDs := srcMetricIDs.AppendTo(nil)
qt.Printf("sort %d metric ids", len(sortedMetricIDs))
kb := &is.kb kb := &is.kb
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixTagToMetricIDs) kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixTagToMetricIDs)
@ -2087,6 +2108,7 @@ func (is *indexSearch) updateMetricIDsByMetricNameMatch(metricIDs, srcMetricIDs
} }
metricIDs.Add(metricID) metricIDs.Add(metricID)
} }
qt.Printf("apply filters %s; resulting metric ids: %d", tfs, metricIDs.Len())
return nil return nil
} }
@ -2247,11 +2269,10 @@ func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer)
} }
func (is *indexSearch) searchMetricIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]uint64, error) { func (is *indexSearch) searchMetricIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]uint64, error) {
metricIDs, err := is.searchMetricIDsInternal(tfss, tr, maxMetrics) metricIDs, err := is.searchMetricIDsInternal(qt, tfss, tr, maxMetrics)
if err != nil { if err != nil {
return nil, err return nil, err
} }
qt.Printf("found %d matching metric ids", metricIDs.Len())
if metricIDs.Len() == 0 { if metricIDs.Len() == 0 {
// Nothing found // Nothing found
return nil, nil return nil, nil
@ -2269,14 +2290,16 @@ func (is *indexSearch) searchMetricIDs(qt *querytracer.Tracer, tfss []*TagFilter
metricIDsFiltered = append(metricIDsFiltered, metricID) metricIDsFiltered = append(metricIDsFiltered, metricID)
} }
} }
qt.Printf("%d metric ids after removing deleted metric ids", len(metricIDsFiltered)) qt.Printf("left %d metric ids after removing deleted metric ids", len(metricIDsFiltered))
sortedMetricIDs = metricIDsFiltered sortedMetricIDs = metricIDsFiltered
} }
return sortedMetricIDs, nil return sortedMetricIDs, nil
} }
func (is *indexSearch) searchMetricIDsInternal(tfss []*TagFilters, tr TimeRange, maxMetrics int) (*uint64set.Set, error) { func (is *indexSearch) searchMetricIDsInternal(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int) (*uint64set.Set, error) {
qt = qt.NewChild("search for metric ids: filters=%s, timeRange=%s, maxMetrics=%d", tfss, &tr, maxMetrics)
defer qt.Done()
metricIDs := &uint64set.Set{} metricIDs := &uint64set.Set{}
for _, tfs := range tfss { for _, tfs := range tfss {
if len(tfs.tfs) == 0 { if len(tfs.tfs) == 0 {
@ -2286,7 +2309,11 @@ func (is *indexSearch) searchMetricIDsInternal(tfss []*TagFilters, tr TimeRange,
logger.Panicf(`BUG: cannot add {__name__!=""} filter: %s`, err) logger.Panicf(`BUG: cannot add {__name__!=""} filter: %s`, err)
} }
} }
if err := is.updateMetricIDsForTagFilters(metricIDs, tfs, tr, maxMetrics+1); err != nil { qtChild := qt.NewChild("update metric ids: filters=%s, timeRange=%s", tfs, &tr)
prevMetricIDsLen := metricIDs.Len()
err := is.updateMetricIDsForTagFilters(qtChild, metricIDs, tfs, tr, maxMetrics+1)
qtChild.Donef("updated %d metric ids", metricIDs.Len()-prevMetricIDsLen)
if err != nil {
return nil, err return nil, err
} }
if metricIDs.Len() > maxMetrics { if metricIDs.Len() > maxMetrics {
@ -2297,8 +2324,8 @@ func (is *indexSearch) searchMetricIDsInternal(tfss []*TagFilters, tr TimeRange,
return metricIDs, nil return metricIDs, nil
} }
func (is *indexSearch) updateMetricIDsForTagFilters(metricIDs *uint64set.Set, tfs *TagFilters, tr TimeRange, maxMetrics int) error { func (is *indexSearch) updateMetricIDsForTagFilters(qt *querytracer.Tracer, metricIDs *uint64set.Set, tfs *TagFilters, tr TimeRange, maxMetrics int) error {
err := is.tryUpdatingMetricIDsForDateRange(metricIDs, tfs, tr, maxMetrics) err := is.tryUpdatingMetricIDsForDateRange(qt, metricIDs, tfs, tr, maxMetrics)
if err == nil { if err == nil {
// Fast path: found metricIDs by date range. // Fast path: found metricIDs by date range.
return nil return nil
@ -2308,8 +2335,9 @@ func (is *indexSearch) updateMetricIDsForTagFilters(metricIDs *uint64set.Set, tf
} }
// Slow path - fall back to search in the global inverted index. // Slow path - fall back to search in the global inverted index.
qt.Printf("cannot find metric ids in per-day index; fall back to global index")
atomic.AddUint64(&is.db.globalSearchCalls, 1) atomic.AddUint64(&is.db.globalSearchCalls, 1)
m, err := is.getMetricIDsForDateAndFilters(0, tfs, maxMetrics) m, err := is.getMetricIDsForDateAndFilters(qt, 0, tfs, maxMetrics)
if err != nil { if err != nil {
if errors.Is(err, errFallbackToGlobalSearch) { if errors.Is(err, errFallbackToGlobalSearch) {
return fmt.Errorf("the number of matching timeseries exceeds %d; either narrow down the search "+ return fmt.Errorf("the number of matching timeseries exceeds %d; either narrow down the search "+
@ -2321,7 +2349,7 @@ func (is *indexSearch) updateMetricIDsForTagFilters(metricIDs *uint64set.Set, tf
return nil return nil
} }
func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, maxMetrics int, maxLoopsCount int64) (*uint64set.Set, int64, error) { func (is *indexSearch) getMetricIDsForTagFilter(qt *querytracer.Tracer, tf *tagFilter, maxMetrics int, maxLoopsCount int64) (*uint64set.Set, int64, error) {
if tf.isNegative { if tf.isNegative {
logger.Panicf("BUG: isNegative must be false") logger.Panicf("BUG: isNegative must be false")
} }
@ -2329,6 +2357,7 @@ func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, maxMetrics int, m
if len(tf.orSuffixes) > 0 { if len(tf.orSuffixes) > 0 {
// Fast path for orSuffixes - seek for rows for each value from orSuffixes. // Fast path for orSuffixes - seek for rows for each value from orSuffixes.
loopsCount, err := is.updateMetricIDsForOrSuffixes(tf, metricIDs, maxMetrics, maxLoopsCount) loopsCount, err := is.updateMetricIDsForOrSuffixes(tf, metricIDs, maxMetrics, maxLoopsCount)
qt.Printf("found %d metric ids for filter={%s} using exact search; spent %d loops", metricIDs.Len(), tf, loopsCount)
if err != nil { if err != nil {
return nil, loopsCount, fmt.Errorf("error when searching for metricIDs for tagFilter in fast path: %w; tagFilter=%s", err, tf) return nil, loopsCount, fmt.Errorf("error when searching for metricIDs for tagFilter in fast path: %w; tagFilter=%s", err, tf)
} }
@ -2337,6 +2366,7 @@ func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, maxMetrics int, m
// Slow path - scan for all the rows with the given prefix. // Slow path - scan for all the rows with the given prefix.
loopsCount, err := is.getMetricIDsForTagFilterSlow(tf, metricIDs.Add, maxLoopsCount) loopsCount, err := is.getMetricIDsForTagFilterSlow(tf, metricIDs.Add, maxLoopsCount)
qt.Printf("found %d metric ids for filter={%s} using prefix search; spent %d loops", metricIDs.Len(), tf, loopsCount)
if err != nil { if err != nil {
return nil, loopsCount, fmt.Errorf("error when searching for metricIDs for tagFilter in slow path: %w; tagFilter=%s", err, tf) return nil, loopsCount, fmt.Errorf("error when searching for metricIDs for tagFilter in slow path: %w; tagFilter=%s", err, tf)
} }
@ -2497,7 +2527,7 @@ var errFallbackToGlobalSearch = errors.New("fall back from per-day index search
const maxDaysForPerDaySearch = 40 const maxDaysForPerDaySearch = 40
func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set, tfs *TagFilters, tr TimeRange, maxMetrics int) error { func (is *indexSearch) tryUpdatingMetricIDsForDateRange(qt *querytracer.Tracer, metricIDs *uint64set.Set, tfs *TagFilters, tr TimeRange, maxMetrics int) error {
atomic.AddUint64(&is.db.dateRangeSearchCalls, 1) atomic.AddUint64(&is.db.dateRangeSearchCalls, 1)
minDate := uint64(tr.MinTimestamp) / msecPerDay minDate := uint64(tr.MinTimestamp) / msecPerDay
maxDate := uint64(tr.MaxTimestamp) / msecPerDay maxDate := uint64(tr.MaxTimestamp) / msecPerDay
@ -2507,7 +2537,7 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set
} }
if minDate == maxDate { if minDate == maxDate {
// Fast path - query only a single date. // Fast path - query only a single date.
m, err := is.getMetricIDsForDateAndFilters(minDate, tfs, maxMetrics) m, err := is.getMetricIDsForDateAndFilters(qt, minDate, tfs, maxMetrics)
if err != nil { if err != nil {
return err return err
} }
@ -2517,15 +2547,21 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set
} }
// Slower path - search for metricIDs for each day in parallel. // Slower path - search for metricIDs for each day in parallel.
qt = qt.NewChild("parallel search for metric ids in per-day index: filters=%s, dayRange=[%d..%d]", tfs, minDate, maxDate)
defer qt.Done()
wg := getWaitGroup() wg := getWaitGroup()
var errGlobal error var errGlobal error
var mu sync.Mutex // protects metricIDs + errGlobal vars from concurrent access below var mu sync.Mutex // protects metricIDs + errGlobal vars from concurrent access below
for minDate <= maxDate { for minDate <= maxDate {
qtChild := qt.NewChild("parallel thread for date=%d", minDate)
wg.Add(1) wg.Add(1)
go func(date uint64) { go func(date uint64) {
defer wg.Done() defer func() {
qtChild.Done()
wg.Done()
}()
isLocal := is.db.getIndexSearch(is.accountID, is.projectID, is.deadline) isLocal := is.db.getIndexSearch(is.accountID, is.projectID, is.deadline)
m, err := isLocal.getMetricIDsForDateAndFilters(date, tfs, maxMetrics) m, err := isLocal.getMetricIDsForDateAndFilters(qtChild, date, tfs, maxMetrics)
is.db.putIndexSearch(isLocal) is.db.putIndexSearch(isLocal)
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
@ -2552,7 +2588,9 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set
return nil return nil
} }
func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilters, maxMetrics int) (*uint64set.Set, error) { func (is *indexSearch) getMetricIDsForDateAndFilters(qt *querytracer.Tracer, date uint64, tfs *TagFilters, maxMetrics int) (*uint64set.Set, error) {
qt = qt.NewChild("search for metric ids on a particular day: filters=%s, date=%d, maxMetrics=%d", tfs, date, maxMetrics)
defer qt.Done()
// Sort tfs by loopsCount needed for performing each filter. // Sort tfs by loopsCount needed for performing each filter.
// This stats is usually collected from the previous queries. // This stats is usually collected from the previous queries.
// This way we limit the amount of work below by applying fast filters at first. // This way we limit the amount of work below by applying fast filters at first.
@ -2604,7 +2642,8 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
} }
} }
// Populate metricIDs for the first non-negative filter with the cost smaller than maxLoopsCount. // Populate metricIDs for the first non-negative filter with the smallest cost.
qtChild := qt.NewChild("search for the first non-negative filter with the smallest cost")
var metricIDs *uint64set.Set var metricIDs *uint64set.Set
tfwsRemaining := tfws[:0] tfwsRemaining := tfws[:0]
maxDateMetrics := intMax maxDateMetrics := intMax
@ -2618,10 +2657,11 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
continue continue
} }
maxLoopsCount := getFirstPositiveLoopsCount(tfws[i+1:]) maxLoopsCount := getFirstPositiveLoopsCount(tfws[i+1:])
m, loopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, tfs.commonPrefix, maxDateMetrics, maxLoopsCount) m, loopsCount, err := is.getMetricIDsForDateTagFilter(qtChild, tf, date, tfs.commonPrefix, maxDateMetrics, maxLoopsCount)
if err != nil { if err != nil {
if errors.Is(err, errTooManyLoops) { if errors.Is(err, errTooManyLoops) {
// The tf took too many loops compared to the next filter. Postpone applying this filter. // The tf took too many loops compared to the next filter. Postpone applying this filter.
qtChild.Printf("the filter={%s} took more than %d loops; postpone it", tf, maxLoopsCount)
storeLoopsCount(&tfw, 2*loopsCount) storeLoopsCount(&tfw, 2*loopsCount)
tfwsRemaining = append(tfwsRemaining, tfw) tfwsRemaining = append(tfwsRemaining, tfw)
continue continue
@ -2632,6 +2672,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
} }
if m.Len() >= maxDateMetrics { if m.Len() >= maxDateMetrics {
// Too many time series found by a single tag filter. Move the filter to the end of list. // Too many time series found by a single tag filter. Move the filter to the end of list.
qtChild.Printf("the filter={%s} matches at least %d series; postpone it", tf, maxDateMetrics)
storeLoopsCount(&tfw, int64Max-1) storeLoopsCount(&tfw, int64Max-1)
tfwsRemaining = append(tfwsRemaining, tfw) tfwsRemaining = append(tfwsRemaining, tfw)
continue continue
@ -2639,14 +2680,17 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
storeLoopsCount(&tfw, loopsCount) storeLoopsCount(&tfw, loopsCount)
metricIDs = m metricIDs = m
tfwsRemaining = append(tfwsRemaining, tfws[i+1:]...) tfwsRemaining = append(tfwsRemaining, tfws[i+1:]...)
qtChild.Printf("the filter={%s} matches less than %d series (actually %d series); use it", tf, maxDateMetrics, metricIDs.Len())
break break
} }
qtChild.Done()
tfws = tfwsRemaining tfws = tfwsRemaining
if metricIDs == nil { if metricIDs == nil {
// All the filters in tfs are negative or match too many time series. // All the filters in tfs are negative or match too many time series.
// Populate all the metricIDs for the given (date), // Populate all the metricIDs for the given (date),
// so later they can be filtered out with negative filters. // so later they can be filtered out with negative filters.
qt.Printf("all the filters are negative or match more than %d time series; fall back to searching for all the metric ids", maxDateMetrics)
m, err := is.getMetricIDsForDate(date, maxDateMetrics) m, err := is.getMetricIDsForDate(date, maxDateMetrics)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot obtain all the metricIDs: %w", err) return nil, fmt.Errorf("cannot obtain all the metricIDs: %w", err)
@ -2656,6 +2700,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
return nil, errFallbackToGlobalSearch return nil, errFallbackToGlobalSearch
} }
metricIDs = m metricIDs = m
qt.Printf("found %d metric ids", metricIDs.Len())
} }
sort.Slice(tfws, func(i, j int) bool { sort.Slice(tfws, func(i, j int) bool {
@ -2685,6 +2730,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
// when the intial tag filters significantly reduce the number of found metricIDs, // when the intial tag filters significantly reduce the number of found metricIDs,
// so the remaining filters could be performed via much faster metricName matching instead // so the remaining filters could be performed via much faster metricName matching instead
// of slow selecting of matching metricIDs. // of slow selecting of matching metricIDs.
qtChild = qt.NewChild("intersect the remaining %d filters with the found %d metric ids", len(tfws), metricIDs.Len())
var tfsPostponed []*tagFilter var tfsPostponed []*tagFilter
for i, tfw := range tfws { for i, tfw := range tfws {
tf := tfw.tf tf := tfw.tf
@ -2705,10 +2751,11 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
if maxLoopsCount == int64Max { if maxLoopsCount == int64Max {
maxLoopsCount = int64(metricIDsLen) * loopsCountPerMetricNameMatch maxLoopsCount = int64(metricIDsLen) * loopsCountPerMetricNameMatch
} }
m, filterLoopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, tfs.commonPrefix, intMax, maxLoopsCount) m, filterLoopsCount, err := is.getMetricIDsForDateTagFilter(qtChild, tf, date, tfs.commonPrefix, intMax, maxLoopsCount)
if err != nil { if err != nil {
if errors.Is(err, errTooManyLoops) { if errors.Is(err, errTooManyLoops) {
// Postpone tf, since it took more loops than the next filter may need. // Postpone tf, since it took more loops than the next filter may need.
qtChild.Printf("postpone filter={%s}, since it took more than %d loops", tf, maxLoopsCount)
storeFilterLoopsCount(&tfw, 2*filterLoopsCount) storeFilterLoopsCount(&tfw, 2*filterLoopsCount)
tfsPostponed = append(tfsPostponed, tf) tfsPostponed = append(tfsPostponed, tf)
continue continue
@ -2720,22 +2767,28 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
storeFilterLoopsCount(&tfw, filterLoopsCount) storeFilterLoopsCount(&tfw, filterLoopsCount)
if tf.isNegative || tf.isEmptyMatch { if tf.isNegative || tf.isEmptyMatch {
metricIDs.Subtract(m) metricIDs.Subtract(m)
qtChild.Printf("subtract %d metric ids from the found %d metric ids for filter={%s}; resulting metric ids: %d", m.Len(), metricIDsLen, tf, metricIDs.Len())
} else { } else {
metricIDs.Intersect(m) metricIDs.Intersect(m)
qtChild.Printf("intersect %d metric ids with the found %d metric ids for filter={%s}; resulting metric ids: %d", m.Len(), metricIDsLen, tf, metricIDs.Len())
} }
} }
qtChild.Done()
if metricIDs.Len() == 0 { if metricIDs.Len() == 0 {
// There is no need in applying tfsPostponed, since the result is empty. // There is no need in applying tfsPostponed, since the result is empty.
qt.Printf("found zero metric ids")
return nil, nil return nil, nil
} }
if len(tfsPostponed) > 0 { if len(tfsPostponed) > 0 {
// Apply the postponed filters via metricName match. // Apply the postponed filters via metricName match.
qt.Printf("apply postponed filters=%s to %d metrics ids", tfsPostponed, metricIDs.Len())
var m uint64set.Set var m uint64set.Set
if err := is.updateMetricIDsByMetricNameMatch(&m, metricIDs, tfsPostponed); err != nil { if err := is.updateMetricIDsByMetricNameMatch(qt, &m, metricIDs, tfsPostponed); err != nil {
return nil, err return nil, err
} }
return &m, nil return &m, nil
} }
qt.Printf("found %d metric ids", metricIDs.Len())
return metricIDs, nil return metricIDs, nil
} }
@ -2844,6 +2897,27 @@ func marshalCompositeTagKey(dst, name, key []byte) []byte {
return dst return dst
} }
func unmarshalCompositeTagKey(src []byte) ([]byte, []byte, error) {
if len(src) == 0 {
return nil, nil, fmt.Errorf("composite tag key cannot be empty")
}
if src[0] != compositeTagKeyPrefix {
return nil, nil, fmt.Errorf("missing composite tag key prefix in %q", src)
}
src = src[1:]
tail, n, err := encoding.UnmarshalVarUint64(src)
if err != nil {
return nil, nil, fmt.Errorf("cannot unmarshal metric name length from composite tag key: %w", err)
}
src = tail
if uint64(len(src)) < n {
return nil, nil, fmt.Errorf("missing metric name with length %d in composite tag key %q", n, src)
}
name := src[:n]
key := src[n:]
return name, key, nil
}
func reverseBytes(dst, src []byte) []byte { func reverseBytes(dst, src []byte) []byte {
for i := len(src) - 1; i >= 0; i-- { for i := len(src) - 1; i >= 0; i-- {
dst = append(dst, src[i]) dst = append(dst, src[i])
@ -2869,7 +2943,10 @@ func (is *indexSearch) hasDateMetricID(date, metricID uint64) (bool, error) {
return true, nil return true, nil
} }
func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64, commonPrefix []byte, maxMetrics int, maxLoopsCount int64) (*uint64set.Set, int64, error) { func (is *indexSearch) getMetricIDsForDateTagFilter(qt *querytracer.Tracer, tf *tagFilter, date uint64, commonPrefix []byte,
maxMetrics int, maxLoopsCount int64) (*uint64set.Set, int64, error) {
qt = qt.NewChild("get metric ids for filter and date: filter={%s}, date=%d, maxMetrics=%d, maxLoopsCount=%d", tf, date, maxMetrics, maxLoopsCount)
defer qt.Done()
if !bytes.HasPrefix(tf.prefix, commonPrefix) { if !bytes.HasPrefix(tf.prefix, commonPrefix) {
logger.Panicf("BUG: unexpected tf.prefix %q; must start with commonPrefix %q", tf.prefix, commonPrefix) logger.Panicf("BUG: unexpected tf.prefix %q; must start with commonPrefix %q", tf.prefix, commonPrefix)
} }
@ -2888,7 +2965,7 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64,
tfNew := *tf tfNew := *tf
tfNew.isNegative = false // isNegative for the original tf is handled by the caller. tfNew.isNegative = false // isNegative for the original tf is handled by the caller.
tfNew.prefix = kb.B tfNew.prefix = kb.B
metricIDs, loopsCount, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics, maxLoopsCount) metricIDs, loopsCount, err := is.getMetricIDsForTagFilter(qt, &tfNew, maxMetrics, maxLoopsCount)
if err != nil { if err != nil {
return nil, loopsCount, err return nil, loopsCount, err
} }
@ -2900,16 +2977,19 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64,
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601 // This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601
// See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395 // See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395
maxLoopsCount -= loopsCount maxLoopsCount -= loopsCount
tfNew = tagFilter{} var tfGross tagFilter
if err := tfNew.Init(prefix, tf.key, []byte(".+"), false, true); err != nil { if err := tfGross.Init(prefix, tf.key, []byte(".+"), false, true); err != nil {
logger.Panicf(`BUG: cannot init tag filter: {%q=~".+"}: %s`, tf.key, err) logger.Panicf(`BUG: cannot init tag filter: {%q=~".+"}: %s`, tf.key, err)
} }
m, lc, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics, maxLoopsCount) m, lc, err := is.getMetricIDsForTagFilter(qt, &tfGross, maxMetrics, maxLoopsCount)
loopsCount += lc loopsCount += lc
if err != nil { if err != nil {
return nil, loopsCount, err return nil, loopsCount, err
} }
mLen := m.Len()
m.Subtract(metricIDs) m.Subtract(metricIDs)
qt.Printf("subtract %d metric ids for filter={%s} from %d metric ids for filter={%s}", metricIDs.Len(), &tfNew, mLen, &tfGross)
qt.Printf("found %d metric ids, spent %d loops", m.Len(), loopsCount)
return m, loopsCount, nil return m, loopsCount, nil
} }

View file

@ -1850,7 +1850,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
} }
// Check GetTSDBStatusWithFiltersForDate with nil filters. // Check GetTSDBStatusWithFiltersForDate with nil filters.
status, err := db.GetTSDBStatusWithFiltersForDate(accountID, projectID, nil, baseDate, 5, 1e6, noDeadline) status, err := db.GetTSDBStatusWithFiltersForDate(nil, accountID, projectID, nil, baseDate, 5, 1e6, noDeadline)
if err != nil { if err != nil {
t.Fatalf("error in GetTSDBStatusWithFiltersForDate with nil filters: %s", err) t.Fatalf("error in GetTSDBStatusWithFiltersForDate with nil filters: %s", err)
} }
@ -1926,7 +1926,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
if err := tfs.Add([]byte("day"), []byte("0"), false, false); err != nil { if err := tfs.Add([]byte("day"), []byte("0"), false, false); err != nil {
t.Fatalf("cannot add filter: %s", err) t.Fatalf("cannot add filter: %s", err)
} }
status, err = db.GetTSDBStatusWithFiltersForDate(accountID, projectID, []*TagFilters{tfs}, baseDate, 5, 1e6, noDeadline) status, err = db.GetTSDBStatusWithFiltersForDate(nil, accountID, projectID, []*TagFilters{tfs}, baseDate, 5, 1e6, noDeadline)
if err != nil { if err != nil {
t.Fatalf("error in GetTSDBStatusWithFiltersForDate: %s", err) t.Fatalf("error in GetTSDBStatusWithFiltersForDate: %s", err)
} }
@ -1955,7 +1955,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
if err := tfs.Add([]byte("uniqueid"), []byte("0|1|3"), false, true); err != nil { if err := tfs.Add([]byte("uniqueid"), []byte("0|1|3"), false, true); err != nil {
t.Fatalf("cannot add filter: %s", err) t.Fatalf("cannot add filter: %s", err)
} }
status, err = db.GetTSDBStatusWithFiltersForDate(accountID, projectID, []*TagFilters{tfs}, baseDate, 5, 1e6, noDeadline) status, err = db.GetTSDBStatusWithFiltersForDate(nil, accountID, projectID, []*TagFilters{tfs}, baseDate, 5, 1e6, noDeadline)
if err != nil { if err != nil {
t.Fatalf("error in GetTSDBStatusWithFiltersForDate: %s", err) t.Fatalf("error in GetTSDBStatusWithFiltersForDate: %s", err)
} }

View file

@ -1124,7 +1124,7 @@ func nextRetentionDuration(retentionMsecs int64) time.Duration {
// SearchMetricNames returns metric names matching the given tfss on the given tr. // SearchMetricNames returns metric names matching the given tfss on the given tr.
func (s *Storage) SearchMetricNames(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]MetricName, error) { func (s *Storage) SearchMetricNames(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]MetricName, error) {
qt = qt.NewChild("search for matching metric names") qt = qt.NewChild("search for matching metric names: filters=%s, timeRange=%s", tfss, &tr)
defer qt.Done() defer qt.Done()
tsids, err := s.searchTSIDs(qt, tfss, tr, maxMetrics, deadline) tsids, err := s.searchTSIDs(qt, tfss, tr, maxMetrics, deadline)
if err != nil { if err != nil {
@ -1169,7 +1169,7 @@ func (s *Storage) SearchMetricNames(qt *querytracer.Tracer, tfss []*TagFilters,
// searchTSIDs returns sorted TSIDs for the given tfss and the given tr. // searchTSIDs returns sorted TSIDs for the given tfss and the given tr.
func (s *Storage) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) { func (s *Storage) searchTSIDs(qt *querytracer.Tracer, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) ([]TSID, error) {
qt = qt.NewChild("search for matching series ids") qt = qt.NewChild("search for matching tsids: filters=%s, timeRange=%s", tfss, &tr)
defer qt.Done() defer qt.Done()
// Do not cache tfss -> tsids here, since the caching is performed // Do not cache tfss -> tsids here, since the caching is performed
// on idb level. // on idb level.
@ -1221,7 +1221,7 @@ var (
// //
// This should speed-up further searchMetricNameWithCache calls for metricIDs from tsids. // This should speed-up further searchMetricNameWithCache calls for metricIDs from tsids.
func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, tsids []TSID, deadline uint64) error { func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, tsids []TSID, deadline uint64) error {
qt = qt.NewChild("prefetch metric names for %d series ids", len(tsids)) qt = qt.NewChild("prefetch metric names for %d tsids", len(tsids))
defer qt.Done() defer qt.Done()
if len(tsids) == 0 { if len(tsids) == 0 {
qt.Printf("nothing to prefetch") qt.Printf("nothing to prefetch")
@ -1583,8 +1583,8 @@ func (s *Storage) GetSeriesCount(accountID, projectID uint32, deadline uint64) (
} }
// GetTSDBStatusWithFiltersForDate returns TSDB status data for /api/v1/status/tsdb with match[] filters and the given (accountID, projectID). // GetTSDBStatusWithFiltersForDate returns TSDB status data for /api/v1/status/tsdb with match[] filters and the given (accountID, projectID).
func (s *Storage) GetTSDBStatusWithFiltersForDate(accountID, projectID uint32, tfss []*TagFilters, date uint64, topN, maxMetrics int, deadline uint64) (*TSDBStatus, error) { func (s *Storage) GetTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, accountID, projectID uint32, tfss []*TagFilters, date uint64, topN, maxMetrics int, deadline uint64) (*TSDBStatus, error) {
return s.idb().GetTSDBStatusWithFiltersForDate(accountID, projectID, tfss, date, topN, maxMetrics, deadline) return s.idb().GetTSDBStatusWithFiltersForDate(qt, accountID, projectID, tfss, date, topN, maxMetrics, deadline)
} }
// MetricRow is a metric to insert into storage. // MetricRow is a metric to insert into storage.

View file

@ -217,18 +217,11 @@ func (tfs *TagFilters) addTagFilter() *tagFilter {
// String returns human-readable value for tfs. // String returns human-readable value for tfs.
func (tfs *TagFilters) String() string { func (tfs *TagFilters) String() string {
var bb bytes.Buffer a := make([]string, 0, len(tfs.tfs))
fmt.Fprintf(&bb, "AccountID=%d, ProjectID=%d ", tfs.accountID, tfs.projectID) for _, tf := range tfs.tfs {
if len(tfs.tfs) == 0 { a = append(a, tf.String())
fmt.Fprintf(&bb, "{}")
return bb.String()
} }
fmt.Fprintf(&bb, "{%s", tfs.tfs[0].String()) return fmt.Sprintf("{AccountID=%d,ProjectID=%d,%s}", tfs.accountID, tfs.projectID, strings.Join(a, ","))
for i := range tfs.tfs[1:] {
fmt.Fprintf(&bb, ", %s", tfs.tfs[i+1].String())
}
fmt.Fprintf(&bb, "}")
return bb.String()
} }
// Reset resets the tf for the given accountID and projectID // Reset resets the tf for the given accountID and projectID
@ -315,6 +308,16 @@ func (tf *tagFilter) String() string {
} else if tf.isRegexp { } else if tf.isRegexp {
op = "=~" op = "=~"
} }
if bytes.Equal(tf.key, graphiteReverseTagKey) {
return fmt.Sprintf("__graphite_reverse__%s%q", op, tf.value)
}
if tf.isComposite() {
metricName, key, err := unmarshalCompositeTagKey(tf.key)
if err != nil {
logger.Panicf("BUG: cannot unmarshal composite tag key: %s", err)
}
return fmt.Sprintf("composite(%s,%s)%s%q", metricName, key, op, tf.value)
}
key := tf.key key := tf.key
if len(key) == 0 { if len(key) == 0 {
key = []byte("__name__") key = []byte("__name__")

View file

@ -1271,7 +1271,7 @@ func TestTagFiltersString(t *testing.T) {
mustAdd("tag_n", "n_value", true, false) mustAdd("tag_n", "n_value", true, false)
mustAdd("tag_re_graphite", "foo\\.bar", false, true) mustAdd("tag_re_graphite", "foo\\.bar", false, true)
s := tfs.String() s := tfs.String()
sExpected := `AccountID=12, ProjectID=34 {__name__="metric_name", tag_re=~"re.value", tag_nre!~"nre.value", tag_n!="n_value", tag_re_graphite="foo.bar"}` sExpected := `{AccountID=12,ProjectID=34,__name__="metric_name",tag_re=~"re.value",tag_nre!~"nre.value",tag_n!="n_value",tag_re_graphite="foo.bar"}`
if s != sExpected { if s != sExpected {
t.Fatalf("unexpected TagFilters.String(); got %q; want %q", s, sExpected) t.Fatalf("unexpected TagFilters.String(); got %q; want %q", s, sExpected)
} }