mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app/vmselect: accept optional extra_filters[]
query args for all the supported Prometheus querying APIs
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1863
This commit is contained in:
parent
9aa9b081a4
commit
54d4a1c959
13 changed files with 400 additions and 260 deletions
|
@ -32,7 +32,7 @@ func TagsDelSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWr
|
|||
var row graphiteparser.Row
|
||||
var tagsPool []graphiteparser.Tag
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
|
@ -53,8 +53,8 @@ func TagsDelSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWr
|
|||
Value: []byte(tag.Value),
|
||||
})
|
||||
}
|
||||
tfs = append(tfs, etfs...)
|
||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, 0, ct, [][]storage.TagFilter{tfs})
|
||||
tfss := joinTagFilterss(tfs, etfs)
|
||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, 0, ct, tfss)
|
||||
n, err := netstorage.DeleteSeries(at, sq, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete series for %q: %w", sq, err)
|
||||
|
@ -183,7 +183,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, at *auth.Token, w http.R
|
|||
exprs := r.Form["expr"]
|
||||
var tagValues []string
|
||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, at *auth.Token, w http.Res
|
|||
tagPrefix := r.FormValue("tagPrefix")
|
||||
exprs := r.Form["expr"]
|
||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
|
@ -352,7 +352,7 @@ func TagsFindSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseW
|
|||
if len(exprs) == 0 {
|
||||
return fmt.Errorf("expecting at least one `expr` query arg")
|
||||
}
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
|
@ -484,14 +484,14 @@ func getInt(r *http.Request, argName string) (int, error) {
|
|||
return n, nil
|
||||
}
|
||||
|
||||
func getSearchQueryForExprs(startTime time.Time, at *auth.Token, etfs []storage.TagFilter, exprs []string) (*storage.SearchQuery, error) {
|
||||
func getSearchQueryForExprs(startTime time.Time, at *auth.Token, etfs [][]storage.TagFilter, exprs []string) (*storage.SearchQuery, error) {
|
||||
tfs, err := exprsToTagFilters(exprs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
tfs = append(tfs, etfs...)
|
||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, 0, ct, [][]storage.TagFilter{tfs})
|
||||
tfss := joinTagFilterss(tfs, etfs)
|
||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, 0, ct, tfss)
|
||||
return sq, nil
|
||||
}
|
||||
|
||||
|
@ -534,3 +534,7 @@ func parseFilterExpr(s string) (*storage.TagFilter, error) {
|
|||
IsRegexp: isRegexp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func joinTagFilterss(tfs []storage.TagFilter, extraFilters [][]storage.TagFilter) [][]storage.TagFilter {
|
||||
return searchutils.JoinTagFilterss([][]storage.TagFilter{tfs}, extraFilters)
|
||||
}
|
||||
|
|
|
@ -290,11 +290,11 @@ func ExportHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := exportHandler(at, w, r, matches, etf, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||
if err := exportHandler(at, w, r, matches, etfs, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
|
||||
}
|
||||
return nil
|
||||
|
@ -302,7 +302,7 @@ func ExportHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
|
||||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||
|
||||
func exportHandler(at *auth.Token, w http.ResponseWriter, r *http.Request, matches []string, etf []storage.TagFilter, start, end int64,
|
||||
func exportHandler(at *auth.Token, w http.ResponseWriter, r *http.Request, matches []string, etfs [][]storage.TagFilter, start, end int64,
|
||||
format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
||||
writeResponseFunc := WriteExportStdResponse
|
||||
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||
|
@ -360,7 +360,7 @@ func exportHandler(at *auth.Token, w http.ResponseWriter, r *http.Request, match
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, start, end, tagFilterss)
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
bw := bufferedwriter.Get(w)
|
||||
|
@ -533,7 +533,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -541,7 +541,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
|
|||
var labelValues []string
|
||||
var isPartial bool
|
||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labelValues, isPartial, err = netstorage.GetLabelValues(at, denyPartialResponse, labelName, deadline)
|
||||
|
@ -584,7 +584,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labelValues, isPartial, err = labelValuesWithMatches(at, denyPartialResponse, labelName, matches, etf, start, end, deadline)
|
||||
labelValues, isPartial, err = labelValuesWithMatches(at, denyPartialResponse, labelName, matches, etfs, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
||||
}
|
||||
|
@ -600,7 +600,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
|
|||
return nil
|
||||
}
|
||||
|
||||
func labelValuesWithMatches(at *auth.Token, denyPartialResponse bool, labelName string, matches []string, etf []storage.TagFilter,
|
||||
func labelValuesWithMatches(at *auth.Token, denyPartialResponse bool, labelName string, matches []string, etfs [][]storage.TagFilter,
|
||||
start, end int64, deadline searchutils.Deadline) ([]string, bool, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
|
@ -622,7 +622,7 @@ func labelValuesWithMatches(at *auth.Token, denyPartialResponse bool, labelName
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
|
@ -711,7 +711,7 @@ func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -744,13 +744,13 @@ func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
|
|||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
||||
var status *storage.TSDBStatus
|
||||
var isPartial bool
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
status, isPartial, err = netstorage.GetTSDBStatusForDate(at, denyPartialResponse, deadline, date, topN)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
||||
}
|
||||
} else {
|
||||
status, isPartial, err = tsdbStatusWithMatches(at, denyPartialResponse, matches, etf, date, topN, deadline)
|
||||
status, isPartial, err = tsdbStatusWithMatches(at, denyPartialResponse, matches, etfs, date, topN, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain tsdb status with matches for date=%d, topN=%d: %w", date, topN, err)
|
||||
}
|
||||
|
@ -766,12 +766,12 @@ func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
|
|||
return nil
|
||||
}
|
||||
|
||||
func tsdbStatusWithMatches(at *auth.Token, denyPartialResponse bool, matches []string, etf []storage.TagFilter, date uint64, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, bool, error) {
|
||||
func tsdbStatusWithMatches(at *auth.Token, denyPartialResponse bool, matches []string, etfs [][]storage.TagFilter, date uint64, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, bool, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
|
@ -797,7 +797,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -805,7 +805,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
var labels []string
|
||||
var isPartial bool
|
||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labels, isPartial, err = netstorage.GetLabels(at, denyPartialResponse, deadline)
|
||||
|
@ -846,7 +846,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labels, isPartial, err = labelsWithMatches(at, denyPartialResponse, matches, etf, start, end, deadline)
|
||||
labels, isPartial, err = labelsWithMatches(at, denyPartialResponse, matches, etfs, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
||||
}
|
||||
|
@ -862,7 +862,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
return nil
|
||||
}
|
||||
|
||||
func labelsWithMatches(at *auth.Token, denyPartialResponse bool, matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, bool, error) {
|
||||
func labelsWithMatches(at *auth.Token, denyPartialResponse bool, matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, bool, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
|
@ -870,7 +870,7 @@ func labelsWithMatches(at *auth.Token, denyPartialResponse bool, matches []strin
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
|
@ -1073,7 +1073,7 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
if len(query) > maxQueryLen.N {
|
||||
return fmt.Errorf("too long query; got %d bytes; mustn't exceed `-search.maxQueryLen=%d` bytes", len(query), maxQueryLen.N)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1088,7 +1088,7 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
if end < start {
|
||||
end = start
|
||||
}
|
||||
if err := exportHandler(at, w, r, []string{childQuery}, etf, start, end, "promapi", 0, false, deadline); err != nil {
|
||||
if err := exportHandler(at, w, r, []string{childQuery}, etfs, start, end, "promapi", 0, false, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
|
@ -1104,7 +1104,7 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := queryRangeHandler(startTime, at, w, childQuery, start, end, step, r, ct, etf); err != nil {
|
||||
if err := queryRangeHandler(startTime, at, w, childQuery, start, end, step, r, ct, etfs); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
|
@ -1122,15 +1122,15 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
|||
queryOffset = 0
|
||||
}
|
||||
ec := promql.EvalConfig{
|
||||
AuthToken: at,
|
||||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilters: etf,
|
||||
AuthToken: at,
|
||||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilterss: etfs,
|
||||
|
||||
DenyPartialResponse: searchutils.GetDenyPartialResponse(r),
|
||||
}
|
||||
|
@ -1182,17 +1182,17 @@ func QueryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := queryRangeHandler(startTime, at, w, query, start, end, step, r, ct, etf); err != nil {
|
||||
if err := queryRangeHandler(startTime, at, w, query, start, end, step, r, ct, etfs); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func queryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etf []storage.TagFilter) error {
|
||||
func queryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
mayCache := !searchutils.GetBool(r, "nocache")
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
|
@ -1215,16 +1215,16 @@ func queryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
|
|||
}
|
||||
|
||||
ec := promql.EvalConfig{
|
||||
AuthToken: at,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilters: etf,
|
||||
AuthToken: at,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilterss: etfs,
|
||||
|
||||
DenyPartialResponse: searchutils.GetDenyPartialResponse(r),
|
||||
}
|
||||
|
@ -1334,24 +1334,12 @@ func getMaxLookback(r *http.Request) (int64, error) {
|
|||
return searchutils.GetDuration(r, "max_lookback", d)
|
||||
}
|
||||
|
||||
func addEnforcedFiltersToTagFilterss(dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter) [][]storage.TagFilter {
|
||||
if len(dstTfss) == 0 {
|
||||
return [][]storage.TagFilter{
|
||||
enforcedFilters,
|
||||
}
|
||||
}
|
||||
for i := range dstTfss {
|
||||
dstTfss[i] = append(dstTfss[i], enforcedFilters...)
|
||||
}
|
||||
return dstTfss
|
||||
}
|
||||
|
||||
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
||||
tagFilterss := make([][]storage.TagFilter, 0, len(matches))
|
||||
for _, match := range matches {
|
||||
tagFilters, err := promql.ParseMetricSelector(match)
|
||||
tagFilters, err := searchutils.ParseMetricSelector(match)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %w", match, err)
|
||||
return nil, fmt.Errorf("cannot parse matches[]=%s: %w", match, err)
|
||||
}
|
||||
tagFilterss = append(tagFilterss, tagFilters)
|
||||
}
|
||||
|
@ -1367,11 +1355,11 @@ func getTagFilterssFromRequest(r *http.Request) ([][]storage.TagFilter, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
return tagFilterss, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestRemoveEmptyValuesAndTimeseries(t *testing.T) {
|
||||
|
@ -196,38 +195,3 @@ func TestAdjustLastPoints(t *testing.T) {
|
|||
},
|
||||
})
|
||||
}
|
||||
|
||||
// helper for tests
|
||||
func tfFromKV(k, v string) storage.TagFilter {
|
||||
return storage.TagFilter{
|
||||
Key: []byte(k),
|
||||
Value: []byte(v),
|
||||
}
|
||||
}
|
||||
|
||||
func Test_addEnforcedFiltersToTagFilterss(t *testing.T) {
|
||||
f := func(t *testing.T, dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter, want [][]storage.TagFilter) {
|
||||
t.Helper()
|
||||
got := addEnforcedFiltersToTagFilterss(dstTfss, enforcedFilters)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for addEnforcedFiltersToTagFilterss, \ngot: %v,\n want: %v", want, got)
|
||||
}
|
||||
}
|
||||
f(t, [][]storage.TagFilter{{tfFromKV("label", "value")}},
|
||||
nil,
|
||||
[][]storage.TagFilter{{tfFromKV("label", "value")}})
|
||||
|
||||
f(t, nil,
|
||||
[]storage.TagFilter{tfFromKV("ext-label", "ext-value")},
|
||||
[][]storage.TagFilter{{tfFromKV("ext-label", "ext-value")}})
|
||||
|
||||
f(t, [][]storage.TagFilter{
|
||||
{tfFromKV("l1", "v1")},
|
||||
{tfFromKV("l2", "v2")},
|
||||
},
|
||||
[]storage.TagFilter{tfFromKV("ext-l1", "v2")},
|
||||
[][]storage.TagFilter{
|
||||
{tfFromKV("l1", "v1"), tfFromKV("ext-l1", "v2")},
|
||||
{tfFromKV("l2", "v2"), tfFromKV("ext-l1", "v2")},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -106,8 +106,8 @@ type EvalConfig struct {
|
|||
// How many decimal digits after the point to leave in response.
|
||||
RoundDigits int
|
||||
|
||||
// EnforcedTagFilters used for apply additional label filters to query.
|
||||
EnforcedTagFilters []storage.TagFilter
|
||||
// EnforcedTagFilterss may contain additional label filters to use in the query.
|
||||
EnforcedTagFilterss [][]storage.TagFilter
|
||||
|
||||
// Whether to deny partial response.
|
||||
DenyPartialResponse bool
|
||||
|
@ -130,7 +130,7 @@ func newEvalConfig(src *EvalConfig) *EvalConfig {
|
|||
ec.MayCache = src.MayCache
|
||||
ec.LookbackDelta = src.LookbackDelta
|
||||
ec.RoundDigits = src.RoundDigits
|
||||
ec.EnforcedTagFilters = src.EnforcedTagFilters
|
||||
ec.EnforcedTagFilterss = src.EnforcedTagFilterss
|
||||
ec.DenyPartialResponse = src.DenyPartialResponse
|
||||
ec.IsPartialResponse = src.IsPartialResponse
|
||||
|
||||
|
@ -691,16 +691,15 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
|
|||
}
|
||||
|
||||
// Fetch the remaining part of the result.
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
// append external filters.
|
||||
tfs = append(tfs, ec.EnforcedTagFilters...)
|
||||
tfs := searchutils.ToTagFilters(me.LabelFilters)
|
||||
tfss := searchutils.JoinTagFilterss([][]storage.TagFilter{tfs}, ec.EnforcedTagFilterss)
|
||||
minTimestamp := start - maxSilenceInterval
|
||||
if window > ec.Step {
|
||||
minTimestamp -= window
|
||||
} else {
|
||||
minTimestamp -= ec.Step
|
||||
}
|
||||
sq := storage.NewSearchQuery(ec.AuthToken.AccountID, ec.AuthToken.ProjectID, minTimestamp, ec.End, [][]storage.TagFilter{tfs})
|
||||
sq := storage.NewSearchQuery(ec.AuthToken.AccountID, ec.AuthToken.ProjectID, minTimestamp, ec.End, tfss)
|
||||
rss, isPartial, err := netstorage.ProcessSearchQuery(ec.AuthToken, ec.DenyPartialResponse, sq, true, ec.Deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -901,26 +900,6 @@ func mulNoOverflow(a, b int64) int64 {
|
|||
return a * b
|
||||
}
|
||||
|
||||
func toTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
||||
tfs := make([]storage.TagFilter, len(lfs))
|
||||
for i := range lfs {
|
||||
toTagFilter(&tfs[i], &lfs[i])
|
||||
}
|
||||
return tfs
|
||||
}
|
||||
|
||||
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
||||
if src.Label != "__name__" {
|
||||
dst.Key = []byte(src.Label)
|
||||
} else {
|
||||
// This is required for storage.Search.
|
||||
dst.Key = nil
|
||||
}
|
||||
dst.Value = []byte(src.Value)
|
||||
dst.IsRegexp = src.IsRegexp
|
||||
dst.IsNegative = src.IsNegative
|
||||
}
|
||||
|
||||
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
if *noStaleMarkers || funcName == "default_rollup" {
|
||||
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
|
@ -43,21 +40,3 @@ func IsMetricSelectorWithRollup(s string) (childQuery string, window, offset *me
|
|||
wrappedQuery := me.AppendString(nil)
|
||||
return string(wrappedQuery), re.Window, re.Offset
|
||||
}
|
||||
|
||||
// ParseMetricSelector parses s containing PromQL metric selector
|
||||
// and returns the corresponding LabelFilters.
|
||||
func ParseMetricSelector(s string) ([]storage.TagFilter, error) {
|
||||
expr, err := parsePromQLWithCache(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
me, ok := expr.(*metricsql.MetricExpr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
||||
}
|
||||
if len(me.LabelFilters) == 0 {
|
||||
return nil, fmt.Errorf("labelFilters cannot be empty")
|
||||
}
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
return tfs, nil
|
||||
}
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseMetricSelectorSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||
}
|
||||
if tfs == nil {
|
||||
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("foo")
|
||||
f(":foo")
|
||||
f(" :fo:bar.baz")
|
||||
f(`a{}`)
|
||||
f(`{foo="bar"}`)
|
||||
f(`{:f:oo=~"bar.+"}`)
|
||||
f(`foo {bar != "baz"}`)
|
||||
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
||||
f(`(foo)`)
|
||||
f(`\п\р\и\в\е\т{\ы="111"}`)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
if tfs != nil {
|
||||
t.Fatalf("expecting nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f(`{}`)
|
||||
f(`foo bar`)
|
||||
f(`foo+bar`)
|
||||
f(`sum(bar)`)
|
||||
f(`x{y}`)
|
||||
f(`x{y+z}`)
|
||||
f(`foo[5m]`)
|
||||
f(`foo offset 5m`)
|
||||
}
|
|
@ -150,7 +150,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
|||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
if len(metainfoBuf) == 0 {
|
||||
return nil, ec.Start
|
||||
|
@ -170,7 +170,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
|||
if len(compressedResultBuf.B) == 0 {
|
||||
mi.RemoveKey(key)
|
||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
rrc.c.Set(bb.B, metainfoBuf)
|
||||
return nil, ec.Start
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window in
|
|||
bb.B = key.Marshal(bb.B[:0])
|
||||
rrc.c.SetBig(bb.B, compressedResultBuf.B)
|
||||
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], ec.AuthToken, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
var mi rollupResultCacheMetainfo
|
||||
if len(metainfoBuf) > 0 {
|
||||
|
@ -303,7 +303,7 @@ var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
|
|||
// Increment this value every time the format of the cache changes.
|
||||
const rollupResultCacheVersion = 8
|
||||
|
||||
func marshalRollupResultCacheKey(dst []byte, at *auth.Token, expr metricsql.Expr, window, step int64, filters []storage.TagFilter) []byte {
|
||||
func marshalRollupResultCacheKey(dst []byte, at *auth.Token, expr metricsql.Expr, window, step int64, etfs [][]storage.TagFilter) []byte {
|
||||
dst = append(dst, rollupResultCacheVersion)
|
||||
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix)
|
||||
dst = encoding.MarshalUint32(dst, at.AccountID)
|
||||
|
@ -311,8 +311,13 @@ func marshalRollupResultCacheKey(dst []byte, at *auth.Token, expr metricsql.Expr
|
|||
dst = encoding.MarshalInt64(dst, window)
|
||||
dst = encoding.MarshalInt64(dst, step)
|
||||
dst = expr.AppendString(dst)
|
||||
for _, f := range filters {
|
||||
dst = f.Marshal(dst)
|
||||
for i, etf := range etfs {
|
||||
for _, f := range etf {
|
||||
dst = f.Marshal(dst)
|
||||
}
|
||||
if i+1 < len(etfs) {
|
||||
dst = append(dst, '|')
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
@ -218,7 +219,7 @@ func getAbsentTimeseries(ec *EvalConfig, arg metricsql.Expr) []*timeseries {
|
|||
if !ok {
|
||||
return rvs
|
||||
}
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
tfs := searchutils.ToTagFilters(me.LabelFilters)
|
||||
for i := range tfs {
|
||||
tf := &tfs[i]
|
||||
if len(tf.Key) == 0 {
|
||||
|
|
|
@ -9,9 +9,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
|
@ -208,15 +207,17 @@ func (d *Deadline) String() string {
|
|||
return fmt.Sprintf("%.3f seconds (elapsed %.3f seconds); the timeout can be adjusted with `%s` command-line flag", d.timeout.Seconds(), elapsed.Seconds(), d.flagHint)
|
||||
}
|
||||
|
||||
// GetEnforcedTagFiltersFromRequest returns additional filters from request.
|
||||
func GetEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, error) {
|
||||
// fast path.
|
||||
extraLabels := r.Form["extra_label"]
|
||||
if len(extraLabels) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
tagFilters := make([]storage.TagFilter, 0, len(extraLabels))
|
||||
for _, match := range extraLabels {
|
||||
// GetExtraTagFilters returns additional label filters from request.
|
||||
//
|
||||
// Label filters can be present in extra_label and extra_filters[] query args.
|
||||
// They are combined. For example, the following query args:
|
||||
// extra_label=t1=v1&extra_label=t2=v2&extra_filters[]={env="prod",team="devops"}&extra_filters={env=~"dev|staging",team!="devops"}
|
||||
// should be translated to the following filters joined with "or":
|
||||
// {env="prod",team="devops",t1="v1",t2="v2"}
|
||||
// {env=~"dev|staging",team!="devops",t1="v1",t2="v2"}
|
||||
func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
|
||||
var tagFilters []storage.TagFilter
|
||||
for _, match := range r.Form["extra_label"] {
|
||||
tmp := strings.SplitN(match, "=", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
|
||||
|
@ -226,5 +227,79 @@ func GetEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, err
|
|||
Value: []byte(tmp[1]),
|
||||
})
|
||||
}
|
||||
return tagFilters, nil
|
||||
extraFilters := r.Form["extra_filters"]
|
||||
extraFilters = append(extraFilters, r.Form["extra_filters[]"]...)
|
||||
if len(extraFilters) == 0 {
|
||||
if len(tagFilters) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return [][]storage.TagFilter{tagFilters}, nil
|
||||
}
|
||||
var etfs [][]storage.TagFilter
|
||||
for _, extraFilter := range extraFilters {
|
||||
tfs, err := ParseMetricSelector(extraFilter)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse extra_filters=%s: %w", extraFilter, err)
|
||||
}
|
||||
tfs = append(tfs, tagFilters...)
|
||||
etfs = append(etfs, tfs)
|
||||
}
|
||||
return etfs, nil
|
||||
}
|
||||
|
||||
// JoinTagFilterss adds etfs to every src filter and returns the result.
|
||||
func JoinTagFilterss(src, etfs [][]storage.TagFilter) [][]storage.TagFilter {
|
||||
if len(src) == 0 {
|
||||
return etfs
|
||||
}
|
||||
if len(etfs) == 0 {
|
||||
return src
|
||||
}
|
||||
var dst [][]storage.TagFilter
|
||||
for _, tf := range src {
|
||||
for _, etf := range etfs {
|
||||
tfs := append([]storage.TagFilter{}, tf...)
|
||||
tfs = append(tfs, etf...)
|
||||
dst = append(dst, tfs)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// ParseMetricSelector parses s containing PromQL metric selector and returns the corresponding LabelFilters.
|
||||
func ParseMetricSelector(s string) ([]storage.TagFilter, error) {
|
||||
expr, err := metricsql.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
me, ok := expr.(*metricsql.MetricExpr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
||||
}
|
||||
if len(me.LabelFilters) == 0 {
|
||||
return nil, fmt.Errorf("labelFilters cannot be empty")
|
||||
}
|
||||
tfs := ToTagFilters(me.LabelFilters)
|
||||
return tfs, nil
|
||||
}
|
||||
|
||||
// ToTagFilters converts lfs to a slice of storage.TagFilter
|
||||
func ToTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
||||
tfs := make([]storage.TagFilter, len(lfs))
|
||||
for i := range lfs {
|
||||
toTagFilter(&tfs[i], &lfs[i])
|
||||
}
|
||||
return tfs
|
||||
}
|
||||
|
||||
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
||||
if src.Label != "__name__" {
|
||||
dst.Key = []byte(src.Label)
|
||||
} else {
|
||||
// This is required for storage.Search.
|
||||
dst.Key = nil
|
||||
}
|
||||
dst.Value = []byte(src.Value)
|
||||
dst.IsRegexp = src.IsRegexp
|
||||
dst.IsNegative = src.IsNegative
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
@ -80,47 +81,238 @@ func TestGetTimeError(t *testing.T) {
|
|||
f("292277025-08-18T07:12:54.999999998Z")
|
||||
}
|
||||
|
||||
// helper for tests
|
||||
func tfFromKV(k, v string) storage.TagFilter {
|
||||
return storage.TagFilter{
|
||||
Key: []byte(k),
|
||||
Value: []byte(v),
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEnforcedTagFiltersFromRequest(t *testing.T) {
|
||||
httpReqWithForm := func(tfs []string) *http.Request {
|
||||
func TestGetExtraTagFilters(t *testing.T) {
|
||||
httpReqWithForm := func(qs string) *http.Request {
|
||||
q, err := url.ParseQuery(qs)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
return &http.Request{
|
||||
Form: map[string][]string{
|
||||
"extra_label": tfs,
|
||||
},
|
||||
Form: q,
|
||||
}
|
||||
}
|
||||
f := func(t *testing.T, r *http.Request, want []storage.TagFilter, wantErr bool) {
|
||||
f := func(t *testing.T, r *http.Request, want []string, wantErr bool) {
|
||||
t.Helper()
|
||||
got, err := GetEnforcedTagFiltersFromRequest(r)
|
||||
result, err := GetExtraTagFilters(r)
|
||||
if (err != nil) != wantErr {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
got := tagFilterssToStrings(result)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for getEnforcedTagFiltersFromRequest, \ngot: %v,\n want: %v", want, got)
|
||||
t.Fatalf("unxpected result for GetExtraTagFilters\ngot: %s\nwant: %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
f(t, httpReqWithForm([]string{"label=value"}),
|
||||
[]storage.TagFilter{
|
||||
tfFromKV("label", "value"),
|
||||
},
|
||||
false)
|
||||
|
||||
f(t, httpReqWithForm([]string{"job=vmagent", "dc=gce"}),
|
||||
[]storage.TagFilter{tfFromKV("job", "vmagent"), tfFromKV("dc", "gce")},
|
||||
f(t, httpReqWithForm("extra_label=label=value"),
|
||||
[]string{`{label="value"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm([]string{"bad_filter"}),
|
||||
f(t, httpReqWithForm("extra_label=job=vmagent&extra_label=dc=gce"),
|
||||
[]string{`{job="vmagent",dc="gce"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters={foo="bar"}`),
|
||||
[]string{`{foo="bar"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters={foo="bar"}&extra_filters[]={baz!~"aa",x=~"y"}`),
|
||||
[]string{
|
||||
`{foo="bar"}`,
|
||||
`{baz!~"aa",x=~"y"}`,
|
||||
},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_label=job=vmagent&extra_label=dc=gce&extra_filters={foo="bar"}`),
|
||||
[]string{`{foo="bar",job="vmagent",dc="gce"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_label=job=vmagent&extra_label=dc=gce&extra_filters[]={foo="bar"}&extra_filters[]={x=~"y|z",a="b"}`),
|
||||
[]string{
|
||||
`{foo="bar",job="vmagent",dc="gce"}`,
|
||||
`{x=~"y|z",a="b",job="vmagent",dc="gce"}`,
|
||||
},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm("extra_label=bad_filter"),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, &http.Request{},
|
||||
nil, false)
|
||||
f(t, httpReqWithForm(`extra_filters={bad_filter}`),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters[]={bad_filter}`),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, httpReqWithForm(""),
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||
}
|
||||
if tfs == nil {
|
||||
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("foo")
|
||||
f(":foo")
|
||||
f(" :fo:bar.baz")
|
||||
f(`a{}`)
|
||||
f(`{foo="bar"}`)
|
||||
f(`{:f:oo=~"bar.+"}`)
|
||||
f(`foo {bar != "baz"}`)
|
||||
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
||||
f(`(foo)`)
|
||||
f(`\п\р\и\в\е\т{\ы="111"}`)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
if tfs != nil {
|
||||
t.Fatalf("expecting nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f(`{}`)
|
||||
f(`foo bar`)
|
||||
f(`foo+bar`)
|
||||
f(`sum(bar)`)
|
||||
f(`x{y}`)
|
||||
f(`x{y+z}`)
|
||||
f(`foo[5m]`)
|
||||
f(`foo offset 5m`)
|
||||
}
|
||||
|
||||
func TestJoinTagFilterss(t *testing.T) {
|
||||
f := func(t *testing.T, src, etfs [][]storage.TagFilter, want []string) {
|
||||
t.Helper()
|
||||
result := JoinTagFilterss(src, etfs)
|
||||
got := tagFilterssToStrings(result)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for JoinTagFilterss\ngot: %s\nwant: %v", got, want)
|
||||
}
|
||||
}
|
||||
// Single tag filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, nil, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
})
|
||||
// Miltiple tag filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, nil, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
`{k5=~"v5"}`,
|
||||
})
|
||||
// Single extra filter
|
||||
f(t, nil, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
})
|
||||
// Multiple extra filters
|
||||
f(t, nil, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
`{k5=~"v5"}`,
|
||||
})
|
||||
// Single tag filter and a single extra filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k5=~"v5"}`,
|
||||
})
|
||||
// Multiple tag filters and a single extra filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
`{k5=~"v5",k6=~"v6"}`,
|
||||
})
|
||||
// Single tag filter and multiple extra filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k5=~"v5"}`,
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
})
|
||||
// Multiple tag filters and multiple extra filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
mustParseMetricSelector(`{k7=~"v7"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k7=~"v7"}`,
|
||||
`{k5=~"v5",k6=~"v6"}`,
|
||||
`{k5=~"v5",k7=~"v7"}`,
|
||||
})
|
||||
}
|
||||
|
||||
func mustParseMetricSelector(s string) []storage.TagFilter {
|
||||
tf, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot parse %q: %w", s, err))
|
||||
}
|
||||
return tf
|
||||
}
|
||||
|
||||
func tagFilterssToStrings(tfss [][]storage.TagFilter) []string {
|
||||
var a []string
|
||||
for _, tfs := range tfss {
|
||||
a = append(a, tagFiltersToString(tfs))
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func tagFiltersToString(tfs []storage.TagFilter) string {
|
||||
b := []byte("{")
|
||||
for i, tf := range tfs {
|
||||
b = append(b, tf.Key...)
|
||||
if tf.IsNegative {
|
||||
if tf.IsRegexp {
|
||||
b = append(b, "!~"...)
|
||||
} else {
|
||||
b = append(b, "!="...)
|
||||
}
|
||||
} else {
|
||||
if tf.IsRegexp {
|
||||
b = append(b, "=~"...)
|
||||
} else {
|
||||
b = append(b, "="...)
|
||||
}
|
||||
}
|
||||
b = strconv.AppendQuote(b, string(tf.Value))
|
||||
if i+1 < len(tfs) {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, '}')
|
||||
return string(b)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ sort: 15
|
|||
|
||||
## tip
|
||||
|
||||
* FEATURE: accept optional `extra_filters[]=series_selector` query args at Prometheus query APIs additionally to `extra_label` query args. This allows enforcing additional filters for all the Prometheus query APIs by using [vmgateway](https://docs.victoriametrics.com/vmgateway.html) or [vmauth](https://docs.victoriametrics.com/vmauth.html). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1863).
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): allow specifying `http` and `https` urls in `-auth.config` command-line flag. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1898). Thanks for @TFM93 .
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying `http` and `https` urls in the following command-line flags: `-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`.
|
||||
* FEATURE: vminsert: allow specifying `http` and `https` urls in `-relabelConfig` command-line flag.
|
||||
|
|
|
@ -517,9 +517,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
|||
### Prometheus querying API enhancements
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
|
@ -556,8 +557,8 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
|||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
||||
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
|
|
|
@ -521,9 +521,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
|||
### Prometheus querying API enhancements
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
|
@ -560,8 +561,8 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
|||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
||||
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
|
|
Loading…
Reference in a new issue