app/vmselect: add optional limit query arg to /api/v1/labels and /api/v1/label_values endpoints

This arg allows limiting the number of sample values returned from these APIs
This commit is contained in:
Aliaksandr Valialkin 2022-06-10 09:50:30 +03:00
parent 1335698ba7
commit 4a94cd81ce
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
9 changed files with 149 additions and 53 deletions

View file

@ -205,7 +205,7 @@ func MetricsIndexHandler(startTime time.Time, at *auth.Token, w http.ResponseWri
deadline := searchutils.GetDeadlineForQuery(r, startTime)
jsonp := r.FormValue("jsonp")
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
metricNames, isPartial, err := netstorage.GetLabelValues(nil, at, denyPartialResponse, "__name__", deadline)
metricNames, isPartial, err := netstorage.GetLabelValues(nil, at, denyPartialResponse, "__name__", 0, deadline)
if err != nil {
return fmt.Errorf(`cannot obtain metric names: %w`, err)
}

View file

@ -5,7 +5,6 @@ import (
"net/http"
"regexp"
"sort"
"strconv"
"strings"
"time"
@ -158,7 +157,7 @@ var (
// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support
func TagsAutoCompleteValuesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
deadline := searchutils.GetDeadlineForQuery(r, startTime)
limit, err := getInt(r, "limit")
limit, err := searchutils.GetInt(r, "limit")
if err != nil {
return err
}
@ -247,7 +246,7 @@ var tagsAutoCompleteValuesDuration = metrics.NewSummary(`vm_request_duration_sec
// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support
func TagsAutoCompleteTagsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
deadline := searchutils.GetDeadlineForQuery(r, startTime)
limit, err := getInt(r, "limit")
limit, err := searchutils.GetInt(r, "limit")
if err != nil {
return err
}
@ -329,7 +328,7 @@ var tagsAutoCompleteTagsDuration = metrics.NewSummary(`vm_request_duration_secon
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
func TagsFindSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
deadline := searchutils.GetDeadlineForQuery(r, startTime)
limit, err := getInt(r, "limit")
limit, err := searchutils.GetInt(r, "limit")
if err != nil {
return err
}
@ -398,7 +397,7 @@ var tagsFindSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{pat
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
func TagValuesHandler(startTime time.Time, at *auth.Token, tagName string, w http.ResponseWriter, r *http.Request) error {
deadline := searchutils.GetDeadlineForQuery(r, startTime)
limit, err := getInt(r, "limit")
limit, err := searchutils.GetInt(r, "limit")
if err != nil {
return err
}
@ -427,7 +426,7 @@ var tagValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/t
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
func TagsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
deadline := searchutils.GetDeadlineForQuery(r, startTime)
limit, err := getInt(r, "limit")
limit, err := searchutils.GetInt(r, "limit")
if err != nil {
return err
}
@ -451,18 +450,6 @@ func TagsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *
var tagsDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags"}`)
func getInt(r *http.Request, argName string) (int, error) {
argValue := r.FormValue(argName)
if len(argValue) == 0 {
return 0, nil
}
n, err := strconv.Atoi(argValue)
if err != nil {
return 0, fmt.Errorf("cannot parse %q=%q: %w", argName, argValue, err)
}
return n, nil
}
func getSearchQueryForExprs(startTime time.Time, at *auth.Token, etfs [][]storage.TagFilter, exprs []string, maxMetrics int) (*storage.SearchQuery, error) {
tfs, err := exprsToTagFilters(exprs)
if err != nil {

View file

@ -684,7 +684,7 @@ func DeleteSeries(qt *querytracer.Tracer, at *auth.Token, sq *storage.SearchQuer
}
// GetLabelsOnTimeRange returns labels for the given tr until the given deadline.
func GetLabelsOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, bool, error) {
func GetLabelsOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, tr storage.TimeRange, limit int, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild("get labels on timeRange=%s", &tr)
defer qt.Done()
if deadline.Exceeded() {
@ -697,7 +697,7 @@ func GetLabelsOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyPartialRes
}
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelsOnTimeRangeRequests.Inc()
labels, err := sn.getLabelsOnTimeRange(qt, at.AccountID, at.ProjectID, tr, deadline)
labels, err := sn.getLabelsOnTimeRange(qt, at.AccountID, at.ProjectID, tr, limit, deadline)
if err != nil {
sn.labelsOnTimeRangeErrors.Inc()
err = fmt.Errorf("cannot get labels on time range from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -732,6 +732,9 @@ func GetLabelsOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyPartialRes
labels[i] = "__name__"
}
}
if limit > 0 && limit < len(labels) {
labels = labels[:limit]
}
// Sort labels like Prometheus does
sort.Strings(labels)
qt.Printf("sort %d labels", len(labels))
@ -745,7 +748,7 @@ func GetGraphiteTags(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse
if deadline.Exceeded() {
return nil, false, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
labels, isPartial, err := GetLabels(qt, at, denyPartialResponse, deadline)
labels, isPartial, err := GetLabels(qt, at, denyPartialResponse, 0, deadline)
if err != nil {
return nil, false, err
}
@ -786,7 +789,7 @@ func hasString(a []string, s string) bool {
}
// GetLabels returns labels until the given deadline.
func GetLabels(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, deadline searchutils.Deadline) ([]string, bool, error) {
func GetLabels(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, limit int, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild("get labels")
defer qt.Done()
if deadline.Exceeded() {
@ -799,7 +802,7 @@ func GetLabels(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool,
}
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelsRequests.Inc()
labels, err := sn.getLabels(qt, at.AccountID, at.ProjectID, deadline)
labels, err := sn.getLabels(qt, at.AccountID, at.ProjectID, limit, deadline)
if err != nil {
sn.labelsErrors.Inc()
err = fmt.Errorf("cannot get labels from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -835,6 +838,9 @@ func GetLabels(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool,
}
}
// Sort labels like Prometheus does
if limit > 0 && limit < len(labels) {
labels = labels[:limit]
}
sort.Strings(labels)
qt.Printf("sort %d labels", len(labels))
return labels, isPartial, nil
@ -843,7 +849,7 @@ func GetLabels(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool,
// GetLabelValuesOnTimeRange returns label values for the given labelName on the given tr
// until the given deadline.
func GetLabelValuesOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, labelName string,
tr storage.TimeRange, deadline searchutils.Deadline) ([]string, bool, error) {
tr storage.TimeRange, limit int, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild("get values for label %s on a timeRange %s", labelName, &tr)
defer qt.Done()
if deadline.Exceeded() {
@ -860,7 +866,7 @@ func GetLabelValuesOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyParti
}
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelValuesOnTimeRangeRequests.Inc()
labelValues, err := sn.getLabelValuesOnTimeRange(qt, at.AccountID, at.ProjectID, labelName, tr, deadline)
labelValues, err := sn.getLabelValuesOnTimeRange(qt, at.AccountID, at.ProjectID, labelName, tr, limit, deadline)
if err != nil {
sn.labelValuesOnTimeRangeErrors.Inc()
err = fmt.Errorf("cannot get label values on time range from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -890,6 +896,9 @@ func GetLabelValuesOnTimeRange(qt *querytracer.Tracer, at *auth.Token, denyParti
labelValues = deduplicateStrings(labelValues)
qt.Printf("get %d unique label values after de-duplication", len(labelValues))
// Sort labelValues like Prometheus does
if limit > 0 && limit < len(labelValues) {
labelValues = labelValues[:limit]
}
sort.Strings(labelValues)
qt.Printf("sort %d label values", len(labelValues))
return labelValues, isPartial, nil
@ -905,7 +914,7 @@ func GetGraphiteTagValues(qt *querytracer.Tracer, at *auth.Token, denyPartialRes
if tagName == "name" {
tagName = ""
}
tagValues, isPartial, err := GetLabelValues(qt, at, denyPartialResponse, tagName, deadline)
tagValues, isPartial, err := GetLabelValues(qt, at, denyPartialResponse, tagName, 0, deadline)
if err != nil {
return nil, false, err
}
@ -923,7 +932,7 @@ func GetGraphiteTagValues(qt *querytracer.Tracer, at *auth.Token, denyPartialRes
// GetLabelValues returns label values for the given labelName
// until the given deadline.
func GetLabelValues(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, labelName string, deadline searchutils.Deadline) ([]string, bool, error) {
func GetLabelValues(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, labelName string, limit int, deadline searchutils.Deadline) ([]string, bool, error) {
qt = qt.NewChild("get values for label %s", labelName)
defer qt.Done()
if deadline.Exceeded() {
@ -940,7 +949,7 @@ func GetLabelValues(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse
}
snr := startStorageNodesRequest(qt, denyPartialResponse, func(qt *querytracer.Tracer, idx int, sn *storageNode) interface{} {
sn.labelValuesRequests.Inc()
labelValues, err := sn.getLabelValues(qt, at.AccountID, at.ProjectID, labelName, deadline)
labelValues, err := sn.getLabelValues(qt, at.AccountID, at.ProjectID, labelName, limit, deadline)
if err != nil {
sn.labelValuesErrors.Inc()
err = fmt.Errorf("cannot get label values from vmstorage %s: %w", sn.connPool.Addr(), err)
@ -970,6 +979,9 @@ func GetLabelValues(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse
labelValues = deduplicateStrings(labelValues)
qt.Printf("get %d unique label values after de-duplication", len(labelValues))
// Sort labelValues like Prometheus does
if limit > 0 && limit < len(labelValues) {
labelValues = labelValues[:limit]
}
sort.Strings(labelValues)
qt.Printf("sort %d label values", len(labelValues))
return labelValues, isPartial, nil
@ -1766,10 +1778,10 @@ func (sn *storageNode) deleteMetrics(qt *querytracer.Tracer, requestData []byte,
return deletedCount, nil
}
func (sn *storageNode) getLabelsOnTimeRange(qt *querytracer.Tracer, accountID, projectID uint32, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
func (sn *storageNode) getLabelsOnTimeRange(qt *querytracer.Tracer, accountID, projectID uint32, tr storage.TimeRange, limit int, deadline searchutils.Deadline) ([]string, error) {
var labels []string
f := func(bc *handshake.BufferedConn) error {
ls, err := sn.getLabelsOnTimeRangeOnConn(bc, accountID, projectID, tr)
ls, err := sn.getLabelsOnTimeRangeOnConn(bc, accountID, projectID, tr, limit)
if err != nil {
return err
}
@ -1782,10 +1794,10 @@ func (sn *storageNode) getLabelsOnTimeRange(qt *querytracer.Tracer, accountID, p
return labels, nil
}
func (sn *storageNode) getLabels(qt *querytracer.Tracer, accountID, projectID uint32, deadline searchutils.Deadline) ([]string, error) {
func (sn *storageNode) getLabels(qt *querytracer.Tracer, accountID, projectID uint32, limit int, deadline searchutils.Deadline) ([]string, error) {
var labels []string
f := func(bc *handshake.BufferedConn) error {
ls, err := sn.getLabelsOnConn(bc, accountID, projectID)
ls, err := sn.getLabelsOnConn(bc, accountID, projectID, limit)
if err != nil {
return err
}
@ -1799,10 +1811,10 @@ func (sn *storageNode) getLabels(qt *querytracer.Tracer, accountID, projectID ui
}
func (sn *storageNode) getLabelValuesOnTimeRange(qt *querytracer.Tracer, accountID, projectID uint32, labelName string,
tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
tr storage.TimeRange, limit int, deadline searchutils.Deadline) ([]string, error) {
var labelValues []string
f := func(bc *handshake.BufferedConn) error {
lvs, err := sn.getLabelValuesOnTimeRangeOnConn(bc, accountID, projectID, labelName, tr)
lvs, err := sn.getLabelValuesOnTimeRangeOnConn(bc, accountID, projectID, labelName, tr, limit)
if err != nil {
return err
}
@ -1815,10 +1827,10 @@ func (sn *storageNode) getLabelValuesOnTimeRange(qt *querytracer.Tracer, account
return labelValues, nil
}
func (sn *storageNode) getLabelValues(qt *querytracer.Tracer, accountID, projectID uint32, labelName string, deadline searchutils.Deadline) ([]string, error) {
func (sn *storageNode) getLabelValues(qt *querytracer.Tracer, accountID, projectID uint32, labelName string, limit int, deadline searchutils.Deadline) ([]string, error) {
var labelValues []string
f := func(bc *handshake.BufferedConn) error {
lvs, err := sn.getLabelValuesOnConn(bc, accountID, projectID, labelName)
lvs, err := sn.getLabelValuesOnConn(bc, accountID, projectID, labelName, limit)
if err != nil {
return err
}
@ -2132,7 +2144,7 @@ func (sn *storageNode) deleteMetricsOnConn(bc *handshake.BufferedConn, requestDa
const maxLabelSize = 16 * 1024 * 1024
func (sn *storageNode) getLabelsOnTimeRangeOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, tr storage.TimeRange) ([]string, error) {
func (sn *storageNode) getLabelsOnTimeRangeOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, tr storage.TimeRange, limit int) ([]string, error) {
// Send the request to sn.
if err := sendAccountIDProjectID(bc, accountID, projectID); err != nil {
return nil, err
@ -2140,6 +2152,9 @@ func (sn *storageNode) getLabelsOnTimeRangeOnConn(bc *handshake.BufferedConn, ac
if err := writeTimeRange(bc, tr); err != nil {
return nil, err
}
if err := writeLimit(bc, limit); err != nil {
return nil, err
}
if err := bc.Flush(); err != nil {
return nil, fmt.Errorf("cannot flush request to conn: %w", err)
}
@ -2168,11 +2183,14 @@ func (sn *storageNode) getLabelsOnTimeRangeOnConn(bc *handshake.BufferedConn, ac
}
}
func (sn *storageNode) getLabelsOnConn(bc *handshake.BufferedConn, accountID, projectID uint32) ([]string, error) {
func (sn *storageNode) getLabelsOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, limit int) ([]string, error) {
// Send the request to sn.
if err := sendAccountIDProjectID(bc, accountID, projectID); err != nil {
return nil, err
}
if err := writeLimit(bc, limit); err != nil {
return nil, err
}
if err := bc.Flush(); err != nil {
return nil, fmt.Errorf("cannot flush request to conn: %w", err)
}
@ -2203,7 +2221,7 @@ func (sn *storageNode) getLabelsOnConn(bc *handshake.BufferedConn, accountID, pr
const maxLabelValueSize = 16 * 1024 * 1024
func (sn *storageNode) getLabelValuesOnTimeRangeOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, labelName string, tr storage.TimeRange) ([]string, error) {
func (sn *storageNode) getLabelValuesOnTimeRangeOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, labelName string, tr storage.TimeRange, limit int) ([]string, error) {
// Send the request to sn.
if err := sendAccountIDProjectID(bc, accountID, projectID); err != nil {
return nil, err
@ -2214,6 +2232,9 @@ func (sn *storageNode) getLabelValuesOnTimeRangeOnConn(bc *handshake.BufferedCon
if err := writeTimeRange(bc, tr); err != nil {
return nil, err
}
if err := writeLimit(bc, limit); err != nil {
return nil, err
}
if err := bc.Flush(); err != nil {
return nil, fmt.Errorf("cannot flush labelName to conn: %w", err)
}
@ -2235,7 +2256,7 @@ func (sn *storageNode) getLabelValuesOnTimeRangeOnConn(bc *handshake.BufferedCon
return labelValues, nil
}
func (sn *storageNode) getLabelValuesOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, labelName string) ([]string, error) {
func (sn *storageNode) getLabelValuesOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, labelName string, limit int) ([]string, error) {
// Send the request to sn.
if err := sendAccountIDProjectID(bc, accountID, projectID); err != nil {
return nil, err
@ -2243,6 +2264,9 @@ func (sn *storageNode) getLabelValuesOnConn(bc *handshake.BufferedConn, accountI
if err := writeBytes(bc, []byte(labelName)); err != nil {
return nil, fmt.Errorf("cannot send labelName=%q to conn: %w", labelName, err)
}
if err := writeLimit(bc, limit); err != nil {
return nil, err
}
if err := bc.Flush(); err != nil {
return nil, fmt.Errorf("cannot flush labelName to conn: %w", err)
}
@ -2613,6 +2637,20 @@ func writeTimeRange(bc *handshake.BufferedConn, tr storage.TimeRange) error {
return nil
}
func writeLimit(bc *handshake.BufferedConn, limit int) error {
if limit < 0 {
limit = 0
}
if limit > 1<<31-1 {
limit = 1<<31 - 1
}
limitU32 := uint32(limit)
if err := writeUint32(bc, limitU32); err != nil {
return fmt.Errorf("cannot write limit=%d to conn: %w", limitU32, err)
}
return nil
}
func writeBytes(bc *handshake.BufferedConn, buf []byte) error {
sizeBuf := encoding.MarshalUint64(nil, uint64(len(buf)))
if _, err := bc.Write(sizeBuf); err != nil {

View file

@ -509,12 +509,16 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.To
if err != nil {
return err
}
limit, err := searchutils.GetInt(r, "limit")
if err != nil {
return err
}
var labelValues []string
var isPartial bool
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
if len(cp.filterss) == 0 {
if cp.IsDefaultTimeRange() {
labelValues, isPartial, err = netstorage.GetLabelValues(qt, at, denyPartialResponse, labelName, cp.deadline)
labelValues, isPartial, err = netstorage.GetLabelValues(qt, at, denyPartialResponse, labelName, limit, cp.deadline)
if err != nil {
return fmt.Errorf(`cannot obtain label values for %q: %w`, labelName, err)
}
@ -526,7 +530,7 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.To
MinTimestamp: cp.start,
MaxTimestamp: cp.end,
}
labelValues, isPartial, err = netstorage.GetLabelValuesOnTimeRange(qt, at, denyPartialResponse, labelName, tr, cp.deadline)
labelValues, isPartial, err = netstorage.GetLabelValuesOnTimeRange(qt, at, denyPartialResponse, labelName, tr, limit, cp.deadline)
if err != nil {
return fmt.Errorf(`cannot obtain label values on time range for %q: %w`, labelName, err)
}
@ -539,7 +543,7 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.To
if cp.start == 0 {
cp.start = cp.end - defaultStep
}
labelValues, isPartial, err = labelValuesWithMatches(qt, at, denyPartialResponse, labelName, cp)
labelValues, isPartial, err = labelValuesWithMatches(qt, at, denyPartialResponse, labelName, cp, limit)
if err != nil {
return fmt.Errorf("cannot obtain label values for %q on time range [%d...%d]: %w", labelName, cp.start, cp.end, err)
}
@ -555,7 +559,7 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.To
return nil
}
func labelValuesWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, labelName string, cp *commonParams) ([]string, bool, error) {
func labelValuesWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, labelName string, cp *commonParams, limit int) ([]string, bool, error) {
// Add `labelName!=''` tag filter in order to filter out series without the labelName.
// There is no need in adding `__name__!=''` filter, since all the time series should
// already have non-empty name.
@ -610,6 +614,9 @@ func labelValuesWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialR
for labelValue := range m {
labelValues = append(labelValues, labelValue)
}
if limit > 0 && len(labelValues) > limit {
labelValues = labelValues[:limit]
}
sort.Strings(labelValues)
qt.Printf("sort %d label values", len(labelValues))
return labelValues, isPartial, nil
@ -728,12 +735,16 @@ func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token,
if err != nil {
return err
}
limit, err := searchutils.GetInt(r, "limit")
if err != nil {
return err
}
var labels []string
var isPartial bool
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
if len(cp.filterss) == 0 {
if cp.IsDefaultTimeRange() {
labels, isPartial, err = netstorage.GetLabels(qt, at, denyPartialResponse, cp.deadline)
labels, isPartial, err = netstorage.GetLabels(qt, at, denyPartialResponse, limit, cp.deadline)
if err != nil {
return fmt.Errorf("cannot obtain labels: %w", err)
}
@ -745,7 +756,7 @@ func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token,
MinTimestamp: cp.start,
MaxTimestamp: cp.end,
}
labels, isPartial, err = netstorage.GetLabelsOnTimeRange(qt, at, denyPartialResponse, tr, cp.deadline)
labels, isPartial, err = netstorage.GetLabelsOnTimeRange(qt, at, denyPartialResponse, tr, limit, cp.deadline)
if err != nil {
return fmt.Errorf("cannot obtain labels on time range: %w", err)
}
@ -756,7 +767,7 @@ func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token,
if cp.start == 0 {
cp.start = cp.end - defaultStep
}
labels, isPartial, err = labelsWithMatches(qt, at, denyPartialResponse, cp)
labels, isPartial, err = labelsWithMatches(qt, at, denyPartialResponse, cp, limit)
if err != nil {
return fmt.Errorf("cannot obtain labels for timeRange=[%d..%d]: %w", cp.start, cp.end, err)
}
@ -772,7 +783,7 @@ func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token,
return nil
}
func labelsWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, cp *commonParams) ([]string, bool, error) {
func labelsWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialResponse bool, cp *commonParams, limit int) ([]string, bool, error) {
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxSeriesLimit)
m := make(map[string]struct{})
isPartial := false
@ -815,6 +826,9 @@ func labelsWithMatches(qt *querytracer.Tracer, at *auth.Token, denyPartialRespon
for label := range m {
labels = append(labels, label)
}
if limit > 0 && limit < len(labels) {
labels = labels[:limit]
}
sort.Strings(labels)
qt.Printf("sort %d labels", len(labels))
return labels, isPartial, nil

View file

@ -27,6 +27,19 @@ func roundToSeconds(ms int64) int64 {
return ms - ms%1000
}
// GetInt returns integer value from the given argKey.
func GetInt(r *http.Request, argKey string) (int, error) {
argValue := r.FormValue(argKey)
if len(argValue) == 0 {
return 0, nil
}
n, err := strconv.Atoi(argValue)
if err != nil {
return 0, fmt.Errorf("cannot parse integer %q=%q: %w", argKey, argValue, err)
}
return n, nil
}
// GetTime returns time from the given argKey query arg.
//
// If argKey is missing in r, then defaultMs rounded to seconds is returned.

View file

@ -322,6 +322,17 @@ func (ctx *vmselectRequestCtx) readTimeRange() (storage.TimeRange, error) {
return tr, nil
}
func (ctx *vmselectRequestCtx) readLimit() (int, error) {
n, err := ctx.readUint32()
if err != nil {
return 0, fmt.Errorf("cannot read limit: %w", err)
}
if n > 1<<31-1 {
n = 1<<31 - 1
}
return int(n), nil
}
func (ctx *vmselectRequestCtx) readUint32() (uint32, error) {
ctx.sizeBuf = bytesutil.ResizeNoCopyMayOverallocate(ctx.sizeBuf, 4)
if _, err := io.ReadFull(ctx.bc, ctx.sizeBuf); err != nil {
@ -642,9 +653,16 @@ func (s *Server) processVMSelectLabelsOnTimeRange(ctx *vmselectRequestCtx) error
if err != nil {
return err
}
limit, err := ctx.readLimit()
if err != nil {
return err
}
if limit <= 0 || limit > *maxTagKeysPerSearch {
limit = *maxTagKeysPerSearch
}
// Search for tag keys
labels, err := s.storage.SearchTagKeysOnTimeRange(accountID, projectID, tr, *maxTagKeysPerSearch, ctx.deadline)
labels, err := s.storage.SearchTagKeysOnTimeRange(accountID, projectID, tr, limit, ctx.deadline)
if err != nil {
return ctx.writeErrorMessage(err)
}
@ -680,9 +698,16 @@ func (s *Server) processVMSelectLabels(ctx *vmselectRequestCtx) error {
if err != nil {
return err
}
limit, err := ctx.readLimit()
if err != nil {
return err
}
if limit <= 0 || limit > *maxTagKeysPerSearch {
limit = *maxTagKeysPerSearch
}
// Search for tag keys
labels, err := s.storage.SearchTagKeys(accountID, projectID, *maxTagKeysPerSearch, ctx.deadline)
labels, err := s.storage.SearchTagKeys(accountID, projectID, limit, ctx.deadline)
if err != nil {
return ctx.writeErrorMessage(err)
}
@ -728,9 +753,16 @@ func (s *Server) processVMSelectLabelValuesOnTimeRange(ctx *vmselectRequestCtx)
if err != nil {
return err
}
limit, err := ctx.readLimit()
if err != nil {
return err
}
if limit <= 0 || limit > *maxTagValuesPerSearch {
limit = *maxTagValuesPerSearch
}
// Search for tag values
labelValues, err := s.storage.SearchTagValuesOnTimeRange(accountID, projectID, []byte(labelName), tr, *maxTagValuesPerSearch, ctx.deadline)
labelValues, err := s.storage.SearchTagValuesOnTimeRange(accountID, projectID, []byte(labelName), tr, limit, ctx.deadline)
if err != nil {
return ctx.writeErrorMessage(err)
}
@ -755,9 +787,16 @@ func (s *Server) processVMSelectLabelValues(ctx *vmselectRequestCtx) error {
return fmt.Errorf("cannot read labelName: %w", err)
}
labelName := ctx.dataBuf
limit, err := ctx.readLimit()
if err != nil {
return err
}
if limit <= 0 || limit > *maxTagValuesPerSearch {
limit = *maxTagValuesPerSearch
}
// Search for tag values
labelValues, err := s.storage.SearchTagValues(accountID, projectID, labelName, *maxTagValuesPerSearch, ctx.deadline)
labelValues, err := s.storage.SearchTagValues(accountID, projectID, labelName, limit, ctx.deadline)
if err != nil {
return ctx.writeErrorMessage(err)
}

View file

@ -23,6 +23,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
* FEATURE: add support of `lowercase` and `uppercase` relabeling actions in the same way as [Prometheus 2.36.0 does](https://github.com/prometheus/prometheus/releases/tag/v2.36.0). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2664).
* FEATURE: add ability to change the `indexdb` rotation timezone offset via `-retentionTimezoneOffset` command-line flag. Previously it was performed at 4am UTC time. This could lead to performance degradation in the middle of the day when VictoriaMetrics runs in time zones located too far from UTC. Thanks to @cnych for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2574).
* FEATURE: limit the number of background merge threads on systems with big number of CPU cores by default. This increases the max size of parts, which can be created during background merge when `-storageDataPath` directory has limited free disk space. This may improve on-disk data compression efficiency and query performance. The limits can be tuned if needed with `-smallMergeConcurrency` and `-bigMergeConcurrency` command-line flags. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2673).
* FEATURE: accept optional `limit` query arg at [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names) and [/api/v1/label_values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values) for limiting the numbef of sample entries returned from these endpoints. See [these docs](https://docs.victoriametrics.com/#prometheus-querying-api-enhancements).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): support `limit` param per-group for limiting number of produced samples per each rule. Thanks to @Howie59 for [implementation](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2676).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): remove dependency on Internet access at [web API pages](https://docs.victoriametrics.com/vmalert.html#web). Previously the functionality and the layout of these pages was broken without Internet access. See [shis issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2594).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): implement the `http://vmagent:8429/service-discovery` page in the same way as Prometheus does. This page shows the original labels for all the discovered targets alongside the resulting labels after the relabeling. This simplifies service discovery debugging.

View file

@ -607,6 +607,8 @@ For example, the following query would return data for the last 30 minutes: `/ap
VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v1/query_range` handlers. It can be used for rounding response values to the given number of digits after the decimal point. For example, `/api/v1/query?query=avg_over_time(temperature[1h])&round_digits=2` would round response values to up to two digits after the decimal point.
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
Additionally, VictoriaMetrics provides the following handlers:

View file

@ -611,6 +611,8 @@ For example, the following query would return data for the last 30 minutes: `/ap
VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v1/query_range` handlers. It can be used for rounding response values to the given number of digits after the decimal point. For example, `/api/v1/query?query=avg_over_time(temperature[1h])&round_digits=2` would round response values to up to two digits after the decimal point.
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
Additionally, VictoriaMetrics provides the following handlers: