mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app/vmselect: move common http functionality from app/vmselect/searchutils to lib/httputils
While at it, move app/vmselect/bufferedwriter to lib/bufferedwriter, since it is going to be used in VictoriaLogs
This commit is contained in:
parent
46210c4d5e
commit
427ce69426
16 changed files with 380 additions and 325 deletions
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/vmselectapi"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/vmselectapi"
|
||||||
|
@ -47,7 +48,7 @@ func NewVMSelectServer(addr string) (*vmselectapi.Server, error) {
|
||||||
type vmstorageAPI struct{}
|
type vmstorageAPI struct{}
|
||||||
|
|
||||||
func (api *vmstorageAPI) InitSearch(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline uint64) (vmselectapi.BlockIterator, error) {
|
func (api *vmstorageAPI) InitSearch(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline uint64) (vmselectapi.BlockIterator, error) {
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(nil)
|
denyPartialResponse := httputils.GetDenyPartialResponse(nil)
|
||||||
dl := searchutils.DeadlineFromTimestamp(deadline)
|
dl := searchutils.DeadlineFromTimestamp(deadline)
|
||||||
bi := newBlockIterator(qt, denyPartialResponse, sq, dl)
|
bi := newBlockIterator(qt, denyPartialResponse, sq, dl)
|
||||||
return bi, nil
|
return bi, nil
|
||||||
|
@ -59,14 +60,14 @@ func (api *vmstorageAPI) Tenants(qt *querytracer.Tracer, tr storage.TimeRange, d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *vmstorageAPI) SearchMetricNames(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline uint64) ([]string, error) {
|
func (api *vmstorageAPI) SearchMetricNames(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline uint64) ([]string, error) {
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(nil)
|
denyPartialResponse := httputils.GetDenyPartialResponse(nil)
|
||||||
dl := searchutils.DeadlineFromTimestamp(deadline)
|
dl := searchutils.DeadlineFromTimestamp(deadline)
|
||||||
metricNames, _, err := netstorage.SearchMetricNames(qt, denyPartialResponse, sq, dl)
|
metricNames, _, err := netstorage.SearchMetricNames(qt, denyPartialResponse, sq, dl)
|
||||||
return metricNames, err
|
return metricNames, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *vmstorageAPI) LabelValues(qt *querytracer.Tracer, sq *storage.SearchQuery, labelName string, maxLabelValues int, deadline uint64) ([]string, error) {
|
func (api *vmstorageAPI) LabelValues(qt *querytracer.Tracer, sq *storage.SearchQuery, labelName string, maxLabelValues int, deadline uint64) ([]string, error) {
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(nil)
|
denyPartialResponse := httputils.GetDenyPartialResponse(nil)
|
||||||
dl := searchutils.DeadlineFromTimestamp(deadline)
|
dl := searchutils.DeadlineFromTimestamp(deadline)
|
||||||
labelValues, _, err := netstorage.LabelValues(qt, denyPartialResponse, labelName, sq, maxLabelValues, dl)
|
labelValues, _, err := netstorage.LabelValues(qt, denyPartialResponse, labelName, sq, maxLabelValues, dl)
|
||||||
return labelValues, err
|
return labelValues, err
|
||||||
|
@ -74,28 +75,28 @@ func (api *vmstorageAPI) LabelValues(qt *querytracer.Tracer, sq *storage.SearchQ
|
||||||
|
|
||||||
func (api *vmstorageAPI) TagValueSuffixes(qt *querytracer.Tracer, accountID, projectID uint32, tr storage.TimeRange, tagKey, tagValuePrefix string, delimiter byte,
|
func (api *vmstorageAPI) TagValueSuffixes(qt *querytracer.Tracer, accountID, projectID uint32, tr storage.TimeRange, tagKey, tagValuePrefix string, delimiter byte,
|
||||||
maxSuffixes int, deadline uint64) ([]string, error) {
|
maxSuffixes int, deadline uint64) ([]string, error) {
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(nil)
|
denyPartialResponse := httputils.GetDenyPartialResponse(nil)
|
||||||
dl := searchutils.DeadlineFromTimestamp(deadline)
|
dl := searchutils.DeadlineFromTimestamp(deadline)
|
||||||
suffixes, _, err := netstorage.TagValueSuffixes(qt, accountID, projectID, denyPartialResponse, tr, tagKey, tagValuePrefix, delimiter, maxSuffixes, dl)
|
suffixes, _, err := netstorage.TagValueSuffixes(qt, accountID, projectID, denyPartialResponse, tr, tagKey, tagValuePrefix, delimiter, maxSuffixes, dl)
|
||||||
return suffixes, err
|
return suffixes, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *vmstorageAPI) LabelNames(qt *querytracer.Tracer, sq *storage.SearchQuery, maxLabelNames int, deadline uint64) ([]string, error) {
|
func (api *vmstorageAPI) LabelNames(qt *querytracer.Tracer, sq *storage.SearchQuery, maxLabelNames int, deadline uint64) ([]string, error) {
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(nil)
|
denyPartialResponse := httputils.GetDenyPartialResponse(nil)
|
||||||
dl := searchutils.DeadlineFromTimestamp(deadline)
|
dl := searchutils.DeadlineFromTimestamp(deadline)
|
||||||
labelNames, _, err := netstorage.LabelNames(qt, denyPartialResponse, sq, maxLabelNames, dl)
|
labelNames, _, err := netstorage.LabelNames(qt, denyPartialResponse, sq, maxLabelNames, dl)
|
||||||
return labelNames, err
|
return labelNames, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *vmstorageAPI) SeriesCount(qt *querytracer.Tracer, accountID, projectID uint32, deadline uint64) (uint64, error) {
|
func (api *vmstorageAPI) SeriesCount(qt *querytracer.Tracer, accountID, projectID uint32, deadline uint64) (uint64, error) {
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(nil)
|
denyPartialResponse := httputils.GetDenyPartialResponse(nil)
|
||||||
dl := searchutils.DeadlineFromTimestamp(deadline)
|
dl := searchutils.DeadlineFromTimestamp(deadline)
|
||||||
seriesCount, _, err := netstorage.SeriesCount(qt, accountID, projectID, denyPartialResponse, dl)
|
seriesCount, _, err := netstorage.SeriesCount(qt, accountID, projectID, denyPartialResponse, dl)
|
||||||
return seriesCount, err
|
return seriesCount, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *vmstorageAPI) TSDBStatus(qt *querytracer.Tracer, sq *storage.SearchQuery, focusLabel string, topN int, deadline uint64) (*storage.TSDBStatus, error) {
|
func (api *vmstorageAPI) TSDBStatus(qt *querytracer.Tracer, sq *storage.SearchQuery, focusLabel string, topN int, deadline uint64) (*storage.TSDBStatus, error) {
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(nil)
|
denyPartialResponse := httputils.GetDenyPartialResponse(nil)
|
||||||
dl := searchutils.DeadlineFromTimestamp(deadline)
|
dl := searchutils.DeadlineFromTimestamp(deadline)
|
||||||
tsdbStatus, _, err := netstorage.TSDBStatus(qt, denyPartialResponse, sq, focusLabel, topN, dl)
|
tsdbStatus, _, err := netstorage.TSDBStatus(qt, denyPartialResponse, sq, focusLabel, topN, dl)
|
||||||
return tsdbStatus, err
|
return tsdbStatus, err
|
||||||
|
|
|
@ -8,14 +8,14 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FunctionsHandler implements /functions handler.
|
// FunctionsHandler implements /functions handler.
|
||||||
//
|
//
|
||||||
// See https://graphite.readthedocs.io/en/latest/functions.html#function-api
|
// See https://graphite.readthedocs.io/en/latest/functions.html#function-api
|
||||||
func FunctionsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func FunctionsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
grouped := searchutils.GetBool(r, "grouped")
|
grouped := httputils.GetBool(r, "grouped")
|
||||||
group := r.FormValue("group")
|
group := r.FormValue("group")
|
||||||
result := make(map[string]interface{})
|
result := make(map[string]interface{})
|
||||||
for funcName, fi := range funcs {
|
for funcName, fi := range funcs {
|
||||||
|
|
|
@ -10,10 +10,11 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
|
@ -47,7 +48,7 @@ func MetricsFindHandler(startTime time.Time, at *auth.Token, w http.ResponseWrit
|
||||||
if len(delimiter) > 1 {
|
if len(delimiter) > 1 {
|
||||||
return fmt.Errorf("`delimiter` query arg must contain only a single char")
|
return fmt.Errorf("`delimiter` query arg must contain only a single char")
|
||||||
}
|
}
|
||||||
if searchutils.GetBool(r, "automatic_variants") {
|
if httputils.GetBool(r, "automatic_variants") {
|
||||||
// See https://github.com/graphite-project/graphite-web/blob/bb9feb0e6815faa73f538af6ed35adea0fb273fd/webapp/graphite/metrics/views.py#L152
|
// See https://github.com/graphite-project/graphite-web/blob/bb9feb0e6815faa73f538af6ed35adea0fb273fd/webapp/graphite/metrics/views.py#L152
|
||||||
query = addAutomaticVariants(query, delimiter)
|
query = addAutomaticVariants(query, delimiter)
|
||||||
}
|
}
|
||||||
|
@ -58,19 +59,19 @@ func MetricsFindHandler(startTime time.Time, at *auth.Token, w http.ResponseWrit
|
||||||
query += "*"
|
query += "*"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
leavesOnly := searchutils.GetBool(r, "leavesOnly")
|
leavesOnly := httputils.GetBool(r, "leavesOnly")
|
||||||
wildcards := searchutils.GetBool(r, "wildcards")
|
wildcards := httputils.GetBool(r, "wildcards")
|
||||||
label := r.FormValue("label")
|
label := r.FormValue("label")
|
||||||
if label == "__name__" {
|
if label == "__name__" {
|
||||||
label = ""
|
label = ""
|
||||||
}
|
}
|
||||||
jsonp := r.FormValue("jsonp")
|
jsonp := r.FormValue("jsonp")
|
||||||
from, err := searchutils.GetTime(r, "from", 0)
|
from, err := httputils.GetTime(r, "from", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
until, err := searchutils.GetTime(r, "until", ct)
|
until, err := httputils.GetTime(r, "until", ct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -78,7 +79,7 @@ func MetricsFindHandler(startTime time.Time, at *auth.Token, w http.ResponseWrit
|
||||||
MinTimestamp: from,
|
MinTimestamp: from,
|
||||||
MaxTimestamp: until,
|
MaxTimestamp: until,
|
||||||
}
|
}
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
paths, isPartial, err := metricsFind(at, denyPartialResponse, tr, label, "", query, delimiter[0], false, deadline)
|
paths, isPartial, err := metricsFind(at, denyPartialResponse, tr, label, "", query, delimiter[0], false, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -126,8 +127,8 @@ func MetricsExpandHandler(startTime time.Time, at *auth.Token, w http.ResponseWr
|
||||||
if len(queries) == 0 {
|
if len(queries) == 0 {
|
||||||
return fmt.Errorf("missing `query` arg")
|
return fmt.Errorf("missing `query` arg")
|
||||||
}
|
}
|
||||||
groupByExpr := searchutils.GetBool(r, "groupByExpr")
|
groupByExpr := httputils.GetBool(r, "groupByExpr")
|
||||||
leavesOnly := searchutils.GetBool(r, "leavesOnly")
|
leavesOnly := httputils.GetBool(r, "leavesOnly")
|
||||||
label := r.FormValue("label")
|
label := r.FormValue("label")
|
||||||
if label == "__name__" {
|
if label == "__name__" {
|
||||||
label = ""
|
label = ""
|
||||||
|
@ -140,12 +141,12 @@ func MetricsExpandHandler(startTime time.Time, at *auth.Token, w http.ResponseWr
|
||||||
return fmt.Errorf("`delimiter` query arg must contain only a single char")
|
return fmt.Errorf("`delimiter` query arg must contain only a single char")
|
||||||
}
|
}
|
||||||
jsonp := r.FormValue("jsonp")
|
jsonp := r.FormValue("jsonp")
|
||||||
from, err := searchutils.GetTime(r, "from", 0)
|
from, err := httputils.GetTime(r, "from", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
until, err := searchutils.GetTime(r, "until", ct)
|
until, err := httputils.GetTime(r, "until", ct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -155,7 +156,7 @@ func MetricsExpandHandler(startTime time.Time, at *auth.Token, w http.ResponseWr
|
||||||
}
|
}
|
||||||
m := make(map[string][]string, len(queries))
|
m := make(map[string][]string, len(queries))
|
||||||
isPartialResponse := false
|
isPartialResponse := false
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
for _, query := range queries {
|
for _, query := range queries {
|
||||||
paths, isPartial, err := metricsFind(at, denyPartialResponse, tr, label, "", query, delimiter[0], true, deadline)
|
paths, isPartial, err := metricsFind(at, denyPartialResponse, tr, label, "", query, delimiter[0], true, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -208,7 +209,7 @@ func MetricsExpandHandler(startTime time.Time, at *auth.Token, w http.ResponseWr
|
||||||
func MetricsIndexHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
func MetricsIndexHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
jsonp := r.FormValue("jsonp")
|
jsonp := r.FormValue("jsonp")
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, 0, 0, nil, 0)
|
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, 0, 0, nil, 0)
|
||||||
metricNames, isPartial, err := netstorage.LabelValues(nil, denyPartialResponse, "__name__", sq, 0, deadline)
|
metricNames, isPartial, err := netstorage.LabelValues(nil, denyPartialResponse, "__name__", sq, 0, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -8,9 +8,9 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,11 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||||
graphiteparser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
|
graphiteparser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
|
@ -165,7 +166,7 @@ var (
|
||||||
// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support
|
// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support
|
||||||
func TagsAutoCompleteValuesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
func TagsAutoCompleteValuesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := httputils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -180,7 +181,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, at *auth.Token, w http.R
|
||||||
valuePrefix := r.FormValue("valuePrefix")
|
valuePrefix := r.FormValue("valuePrefix")
|
||||||
exprs := r.Form["expr"]
|
exprs := r.Form["expr"]
|
||||||
var tagValues []string
|
var tagValues []string
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||||
|
@ -258,7 +259,7 @@ var tagsAutoCompleteValuesDuration = metrics.NewSummary(`vm_request_duration_sec
|
||||||
// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support
|
// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support
|
||||||
func TagsAutoCompleteTagsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
func TagsAutoCompleteTagsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := httputils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -268,7 +269,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, at *auth.Token, w http.Res
|
||||||
}
|
}
|
||||||
tagPrefix := r.FormValue("tagPrefix")
|
tagPrefix := r.FormValue("tagPrefix")
|
||||||
exprs := r.Form["expr"]
|
exprs := r.Form["expr"]
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||||
|
@ -344,7 +345,7 @@ var tagsAutoCompleteTagsDuration = metrics.NewSummary(`vm_request_duration_secon
|
||||||
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
||||||
func TagsFindSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
func TagsFindSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := httputils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -360,7 +361,7 @@ func TagsFindSeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseW
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
metricNames, isPartial, err := netstorage.SearchMetricNames(nil, denyPartialResponse, sq, deadline)
|
metricNames, isPartial, err := netstorage.SearchMetricNames(nil, denyPartialResponse, sq, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch metric names for %q: %w", sq, err)
|
||||||
|
@ -420,12 +421,12 @@ var tagsFindSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{pat
|
||||||
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
||||||
func TagValuesHandler(startTime time.Time, at *auth.Token, tagName string, w http.ResponseWriter, r *http.Request) error {
|
func TagValuesHandler(startTime time.Time, at *auth.Token, tagName string, w http.ResponseWriter, r *http.Request) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := httputils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
filter := r.FormValue("filter")
|
filter := r.FormValue("filter")
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
tagValues, isPartial, err := netstorage.GraphiteTagValues(nil, at.AccountID, at.ProjectID, denyPartialResponse, tagName, filter, *maxGraphiteTagValuesPerSearch, deadline)
|
tagValues, isPartial, err := netstorage.GraphiteTagValues(nil, at.AccountID, at.ProjectID, denyPartialResponse, tagName, filter, *maxGraphiteTagValuesPerSearch, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -452,12 +453,12 @@ var tagValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/t
|
||||||
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
||||||
func TagsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
func TagsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := httputils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
filter := r.FormValue("filter")
|
filter := r.FormValue("filter")
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
labels, isPartial, err := netstorage.GraphiteTags(nil, at.AccountID, at.ProjectID, denyPartialResponse, filter, *maxGraphiteTagKeysPerSearch, deadline)
|
labels, isPartial, err := netstorage.GraphiteTags(nil, at.AccountID, at.ProjectID, denyPartialResponse, filter, *maxGraphiteTagKeysPerSearch, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||||
|
@ -191,7 +192,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
defer requestDuration.UpdateDuration(startTime)
|
defer requestDuration.UpdateDuration(startTime)
|
||||||
tracerEnabled := searchutils.GetBool(r, "trace")
|
tracerEnabled := httputils.GetBool(r, "trace")
|
||||||
qt := querytracer.New(tracerEnabled, r.URL.Path)
|
qt := querytracer.New(tracerEnabled, r.URL.Path)
|
||||||
|
|
||||||
// Limit the number of concurrent queries.
|
// Limit the number of concurrent queries.
|
||||||
|
@ -216,7 +217,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||||
requestURI := httpserver.GetRequestURI(r)
|
requestURI := httpserver.GetRequestURI(r)
|
||||||
logger.Infof("client has cancelled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
logger.Infof("client has cancelled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||||
d.Seconds(), remoteAddr, requestURI)
|
time.Since(startTime).Seconds(), remoteAddr, requestURI)
|
||||||
return true
|
return true
|
||||||
case <-t.C:
|
case <-t.C:
|
||||||
timerpool.Put(t)
|
timerpool.Put(t)
|
||||||
|
|
|
@ -13,17 +13,18 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
|
@ -97,7 +98,7 @@ func FederateHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter,
|
||||||
cp.start = cp.end - lookbackDelta
|
cp.start = cp.end - lookbackDelta
|
||||||
}
|
}
|
||||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxFederateSeries)
|
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxFederateSeries)
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
rss, isPartial, err := netstorage.ProcessSearchQuery(nil, denyPartialResponse, sq, cp.deadline)
|
rss, isPartial, err := netstorage.ProcessSearchQuery(nil, denyPartialResponse, sq, cp.deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
|
@ -140,7 +141,7 @@ func ExportCSVHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter
|
||||||
return fmt.Errorf("missing `format` arg; see https://docs.victoriametrics.com/#how-to-export-csv-data")
|
return fmt.Errorf("missing `format` arg; see https://docs.victoriametrics.com/#how-to-export-csv-data")
|
||||||
}
|
}
|
||||||
fieldNames := strings.Split(format, ",")
|
fieldNames := strings.Split(format, ",")
|
||||||
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
|
reduceMemUsage := httputils.GetBool(r, "reduce_mem_usage")
|
||||||
|
|
||||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxExportSeries)
|
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxExportSeries)
|
||||||
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
|
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
|
||||||
|
@ -280,7 +281,7 @@ func ExportHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
|
||||||
}
|
}
|
||||||
format := r.FormValue("format")
|
format := r.FormValue("format")
|
||||||
maxRowsPerLine := int(fastfloat.ParseInt64BestEffort(r.FormValue("max_rows_per_line")))
|
maxRowsPerLine := int(fastfloat.ParseInt64BestEffort(r.FormValue("max_rows_per_line")))
|
||||||
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
|
reduceMemUsage := httputils.GetBool(r, "reduce_mem_usage")
|
||||||
if err := exportHandler(nil, at, w, cp, format, maxRowsPerLine, reduceMemUsage); err != nil {
|
if err := exportHandler(nil, at, w, cp, format, maxRowsPerLine, reduceMemUsage); err != nil {
|
||||||
return fmt.Errorf("error when exporting data on the time range (start=%d, end=%d): %w", cp.start, cp.end, err)
|
return fmt.Errorf("error when exporting data on the time range (start=%d, end=%d): %w", cp.start, cp.end, err)
|
||||||
}
|
}
|
||||||
|
@ -525,12 +526,12 @@ var httpClient = &http.Client{
|
||||||
// Tenants processes /admin/tenants request.
|
// Tenants processes /admin/tenants request.
|
||||||
func Tenants(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func Tenants(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
|
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
|
||||||
start, err := searchutils.GetTime(r, "start", 0)
|
start, err := httputils.GetTime(r, "start", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
end, err := searchutils.GetTime(r, "end", ct)
|
end, err := httputils.GetTime(r, "end", ct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -562,11 +563,11 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.To
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := httputils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxUniqueTimeseries)
|
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxUniqueTimeseries)
|
||||||
labelValues, isPartial, err := netstorage.LabelValues(qt, denyPartialResponse, labelName, sq, limit, cp.deadline)
|
labelValues, isPartial, err := netstorage.LabelValues(qt, denyPartialResponse, labelName, sq, limit, cp.deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -630,7 +631,7 @@ func TSDBStatusHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Tok
|
||||||
}
|
}
|
||||||
topN = n
|
topN = n
|
||||||
}
|
}
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
start := int64(date*secsPerDay) * 1000
|
start := int64(date*secsPerDay) * 1000
|
||||||
end := int64((date+1)*secsPerDay)*1000 - 1
|
end := int64((date+1)*secsPerDay)*1000 - 1
|
||||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, start, end, cp.filterss, *maxTSDBStatusSeries)
|
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, start, end, cp.filterss, *maxTSDBStatusSeries)
|
||||||
|
@ -661,11 +662,11 @@ func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := httputils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxUniqueTimeseries)
|
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxUniqueTimeseries)
|
||||||
labels, isPartial, err := netstorage.LabelNames(qt, denyPartialResponse, sq, limit, cp.deadline)
|
labels, isPartial, err := netstorage.LabelNames(qt, denyPartialResponse, sq, limit, cp.deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -689,7 +690,7 @@ func SeriesCountHandler(startTime time.Time, at *auth.Token, w http.ResponseWrit
|
||||||
defer seriesCountDuration.UpdateDuration(startTime)
|
defer seriesCountDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
|
deadline := searchutils.GetDeadlineForStatusRequest(r, startTime)
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
n, isPartial, err := netstorage.SeriesCount(nil, at.AccountID, at.ProjectID, denyPartialResponse, deadline)
|
n, isPartial, err := netstorage.SeriesCount(nil, at.AccountID, at.ProjectID, denyPartialResponse, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain series count: %w", err)
|
return fmt.Errorf("cannot obtain series count: %w", err)
|
||||||
|
@ -722,13 +723,13 @@ func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := httputils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxSeriesLimit)
|
sq := storage.NewSearchQuery(at.AccountID, at.ProjectID, cp.start, cp.end, cp.filterss, *maxSeriesLimit)
|
||||||
denyPartialResponse := searchutils.GetDenyPartialResponse(r)
|
denyPartialResponse := httputils.GetDenyPartialResponse(r)
|
||||||
metricNames, isPartial, err := netstorage.SearchMetricNames(qt, denyPartialResponse, sq, cp.deadline)
|
metricNames, isPartial, err := netstorage.SearchMetricNames(qt, denyPartialResponse, sq, cp.deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
||||||
|
@ -759,12 +760,12 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w
|
||||||
|
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
mayCache := !searchutils.GetBool(r, "nocache")
|
mayCache := !httputils.GetBool(r, "nocache")
|
||||||
query := r.FormValue("query")
|
query := r.FormValue("query")
|
||||||
if len(query) == 0 {
|
if len(query) == 0 {
|
||||||
return fmt.Errorf("missing `query` arg")
|
return fmt.Errorf("missing `query` arg")
|
||||||
}
|
}
|
||||||
start, err := searchutils.GetTime(r, "time", ct)
|
start, err := httputils.GetTime(r, "time", ct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -772,7 +773,7 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
step, err := searchutils.GetDuration(r, "step", lookbackDelta)
|
step, err := httputils.GetDuration(r, "step", lookbackDelta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -836,7 +837,7 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !searchutils.GetBool(r, "nocache") && ct-start < queryOffset && start-ct < queryOffset {
|
if !httputils.GetBool(r, "nocache") && ct-start < queryOffset && start-ct < queryOffset {
|
||||||
// Adjust start time only if `nocache` arg isn't set.
|
// Adjust start time only if `nocache` arg isn't set.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/241
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/241
|
||||||
startPrev := start
|
startPrev := start
|
||||||
|
@ -863,7 +864,7 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w
|
||||||
return httpserver.GetRequestURI(r)
|
return httpserver.GetRequestURI(r)
|
||||||
},
|
},
|
||||||
|
|
||||||
DenyPartialResponse: searchutils.GetDenyPartialResponse(r),
|
DenyPartialResponse: httputils.GetDenyPartialResponse(r),
|
||||||
QueryStats: qs,
|
QueryStats: qs,
|
||||||
}
|
}
|
||||||
result, err := promql.Exec(qt, ec, query, true)
|
result, err := promql.Exec(qt, ec, query, true)
|
||||||
|
@ -910,15 +911,15 @@ func QueryRangeHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Tok
|
||||||
if len(query) == 0 {
|
if len(query) == 0 {
|
||||||
return fmt.Errorf("missing `query` arg")
|
return fmt.Errorf("missing `query` arg")
|
||||||
}
|
}
|
||||||
start, err := searchutils.GetTime(r, "start", ct-defaultStep)
|
start, err := httputils.GetTime(r, "start", ct-defaultStep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
end, err := searchutils.GetTime(r, "end", ct)
|
end, err := httputils.GetTime(r, "end", ct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
step, err := searchutils.GetDuration(r, "step", defaultStep)
|
step, err := httputils.GetDuration(r, "step", defaultStep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -935,7 +936,7 @@ func QueryRangeHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Tok
|
||||||
func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w http.ResponseWriter, query string,
|
func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Token, w http.ResponseWriter, query string,
|
||||||
start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
mayCache := !searchutils.GetBool(r, "nocache")
|
mayCache := !httputils.GetBool(r, "nocache")
|
||||||
lookbackDelta, err := getMaxLookback(r)
|
lookbackDelta, err := getMaxLookback(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -973,7 +974,7 @@ func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, at *auth.Tok
|
||||||
return httpserver.GetRequestURI(r)
|
return httpserver.GetRequestURI(r)
|
||||||
},
|
},
|
||||||
|
|
||||||
DenyPartialResponse: searchutils.GetDenyPartialResponse(r),
|
DenyPartialResponse: httputils.GetDenyPartialResponse(r),
|
||||||
QueryStats: qs,
|
QueryStats: qs,
|
||||||
}
|
}
|
||||||
result, err := promql.Exec(qt, ec, query, false)
|
result, err := promql.Exec(qt, ec, query, false)
|
||||||
|
@ -1087,13 +1088,13 @@ func getMaxLookback(r *http.Request) (int64, error) {
|
||||||
if d == 0 {
|
if d == 0 {
|
||||||
d = maxStalenessInterval.Milliseconds()
|
d = maxStalenessInterval.Milliseconds()
|
||||||
}
|
}
|
||||||
maxLookback, err := searchutils.GetDuration(r, "max_lookback", d)
|
maxLookback, err := httputils.GetDuration(r, "max_lookback", d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
d = maxLookback
|
d = maxLookback
|
||||||
if *setLookbackToStep {
|
if *setLookbackToStep {
|
||||||
step, err := searchutils.GetDuration(r, "step", d)
|
step, err := httputils.GetDuration(r, "step", d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -1133,7 +1134,7 @@ func getLatencyOffsetMilliseconds(r *http.Request) (int64, error) {
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2061#issuecomment-1299109836
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2061#issuecomment-1299109836
|
||||||
d = 0
|
d = 0
|
||||||
}
|
}
|
||||||
return searchutils.GetDuration(r, "latency_offset", d)
|
return httputils.GetDuration(r, "latency_offset", d)
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryStatsHandler returns query stats at `/api/v1/status/top_queries`
|
// QueryStatsHandler returns query stats at `/api/v1/status/top_queries`
|
||||||
|
@ -1149,7 +1150,7 @@ func QueryStatsHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
|
||||||
}
|
}
|
||||||
topN = n
|
topN = n
|
||||||
}
|
}
|
||||||
maxLifetimeMsecs, err := searchutils.GetDuration(r, "maxLifetime", 10*60*1000)
|
maxLifetimeMsecs, err := httputils.GetDuration(r, "maxLifetime", 10*60*1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse `maxLifetime` arg: %w", err)
|
return fmt.Errorf("cannot parse `maxLifetime` arg: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1223,12 +1224,12 @@ func getCommonParamsWithDefaultDuration(r *http.Request, startTime time.Time, re
|
||||||
// - extra_filters[]
|
// - extra_filters[]
|
||||||
func getCommonParams(r *http.Request, startTime time.Time, requireNonEmptyMatch bool) (*commonParams, error) {
|
func getCommonParams(r *http.Request, startTime time.Time, requireNonEmptyMatch bool) (*commonParams, error) {
|
||||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||||
start, err := searchutils.GetTime(r, "start", 0)
|
start, err := httputils.GetTime(r, "start", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ct := startTime.UnixNano() / 1e6
|
ct := startTime.UnixNano() / 1e6
|
||||||
end, err := searchutils.GetTime(r, "end", ct)
|
end, err := httputils.GetTime(r, "end", ct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,14 +3,12 @@ package searchutils
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/metricsql"
|
"github.com/VictoriaMetrics/metricsql"
|
||||||
)
|
)
|
||||||
|
@ -19,100 +17,11 @@ var (
|
||||||
maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call")
|
maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call")
|
||||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution")
|
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution")
|
||||||
maxStatusRequestDuration = flag.Duration("search.maxStatusRequestDuration", time.Minute*5, "The maximum duration for /api/v1/status/* requests")
|
maxStatusRequestDuration = flag.Duration("search.maxStatusRequestDuration", time.Minute*5, "The maximum duration for /api/v1/status/* requests")
|
||||||
denyPartialResponse = flag.Bool("search.denyPartialResponse", false, "Whether to deny partial responses if a part of -storageNode instances fail to perform queries; "+
|
|
||||||
"this trades availability over consistency; see also -search.maxQueryDuration")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func roundToSeconds(ms int64) int64 {
|
|
||||||
return ms - ms%1000
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInt returns integer value from the given argKey.
|
|
||||||
func GetInt(r *http.Request, argKey string) (int, error) {
|
|
||||||
argValue := r.FormValue(argKey)
|
|
||||||
if len(argValue) == 0 {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
n, err := strconv.Atoi(argValue)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("cannot parse integer %q=%q: %w", argKey, argValue, err)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTime returns time from the given argKey query arg.
|
|
||||||
//
|
|
||||||
// If argKey is missing in r, then defaultMs rounded to seconds is returned.
|
|
||||||
// The rounding is needed in order to align query results in Grafana
|
|
||||||
// executed at different times. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/720
|
|
||||||
func GetTime(r *http.Request, argKey string, defaultMs int64) (int64, error) {
|
|
||||||
argValue := r.FormValue(argKey)
|
|
||||||
if len(argValue) == 0 {
|
|
||||||
return roundToSeconds(defaultMs), nil
|
|
||||||
}
|
|
||||||
// Handle Prometheus'-provided minTime and maxTime.
|
|
||||||
// See https://github.com/prometheus/client_golang/issues/614
|
|
||||||
switch argValue {
|
|
||||||
case prometheusMinTimeFormatted:
|
|
||||||
return minTimeMsecs, nil
|
|
||||||
case prometheusMaxTimeFormatted:
|
|
||||||
return maxTimeMsecs, nil
|
|
||||||
}
|
|
||||||
// Parse argValue
|
|
||||||
secs, err := promutils.ParseTime(argValue)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("cannot parse %s=%s: %w", argKey, argValue, err)
|
|
||||||
}
|
|
||||||
msecs := int64(secs * 1e3)
|
|
||||||
if msecs < minTimeMsecs {
|
|
||||||
msecs = 0
|
|
||||||
}
|
|
||||||
if msecs > maxTimeMsecs {
|
|
||||||
msecs = maxTimeMsecs
|
|
||||||
}
|
|
||||||
return msecs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// These constants were obtained from https://github.com/prometheus/prometheus/blob/91d7175eaac18b00e370965f3a8186cc40bf9f55/web/api/v1/api.go#L442
|
|
||||||
// See https://github.com/prometheus/client_golang/issues/614 for details.
|
|
||||||
prometheusMinTimeFormatted = time.Unix(math.MinInt64/1000+62135596801, 0).UTC().Format(time.RFC3339Nano)
|
|
||||||
prometheusMaxTimeFormatted = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC().Format(time.RFC3339Nano)
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// These values prevent from overflow when storing msec-precision time in int64.
|
|
||||||
minTimeMsecs = 0 // use 0 instead of `int64(-1<<63) / 1e6` because the storage engine doesn't actually support negative time
|
|
||||||
maxTimeMsecs = int64(1<<63-1) / 1e6
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetDuration returns duration from the given argKey query arg.
|
|
||||||
func GetDuration(r *http.Request, argKey string, defaultValue int64) (int64, error) {
|
|
||||||
argValue := r.FormValue(argKey)
|
|
||||||
if len(argValue) == 0 {
|
|
||||||
return defaultValue, nil
|
|
||||||
}
|
|
||||||
secs, err := strconv.ParseFloat(argValue, 64)
|
|
||||||
if err != nil {
|
|
||||||
// Try parsing string format
|
|
||||||
d, err := promutils.ParseDuration(argValue)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
|
|
||||||
}
|
|
||||||
secs = d.Seconds()
|
|
||||||
}
|
|
||||||
msecs := int64(secs * 1e3)
|
|
||||||
if msecs <= 0 || msecs > maxDurationMsecs {
|
|
||||||
return 0, fmt.Errorf("%q=%dms is out of allowed range [%d ... %d]", argKey, msecs, 0, int64(maxDurationMsecs))
|
|
||||||
}
|
|
||||||
return msecs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxDurationMsecs = 100 * 365 * 24 * 3600 * 1000
|
|
||||||
|
|
||||||
// GetMaxQueryDuration returns the maximum duration for query from r.
|
// GetMaxQueryDuration returns the maximum duration for query from r.
|
||||||
func GetMaxQueryDuration(r *http.Request) time.Duration {
|
func GetMaxQueryDuration(r *http.Request) time.Duration {
|
||||||
dms, err := GetDuration(r, "timeout", 0)
|
dms, err := httputils.GetDuration(r, "timeout", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dms = 0
|
dms = 0
|
||||||
}
|
}
|
||||||
|
@ -142,7 +51,7 @@ func GetDeadlineForExport(r *http.Request, startTime time.Time) Deadline {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDeadlineWithMaxDuration(r *http.Request, startTime time.Time, dMax int64, flagHint string) Deadline {
|
func getDeadlineWithMaxDuration(r *http.Request, startTime time.Time, dMax int64, flagHint string) Deadline {
|
||||||
d, err := GetDuration(r, "timeout", 0)
|
d, err := httputils.GetDuration(r, "timeout", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d = 0
|
d = 0
|
||||||
}
|
}
|
||||||
|
@ -153,28 +62,6 @@ func getDeadlineWithMaxDuration(r *http.Request, startTime time.Time, dMax int64
|
||||||
return NewDeadline(startTime, timeout, flagHint)
|
return NewDeadline(startTime, timeout, flagHint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBool returns boolean value from the given argKey query arg.
|
|
||||||
func GetBool(r *http.Request, argKey string) bool {
|
|
||||||
argValue := r.FormValue(argKey)
|
|
||||||
switch strings.ToLower(argValue) {
|
|
||||||
case "", "0", "f", "false", "no":
|
|
||||||
return false
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDenyPartialResponse returns whether partial responses are denied.
|
|
||||||
func GetDenyPartialResponse(r *http.Request) bool {
|
|
||||||
if *denyPartialResponse {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if r == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return GetBool(r, "deny_partial_response")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deadline contains deadline with the corresponding timeout for pretty error messages.
|
// Deadline contains deadline with the corresponding timeout for pretty error messages.
|
||||||
type Deadline struct {
|
type Deadline struct {
|
||||||
deadline uint64
|
deadline uint64
|
||||||
|
|
|
@ -11,149 +11,6 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetDurationSuccess(t *testing.T) {
|
|
||||||
f := func(s string, dExpected int64) {
|
|
||||||
t.Helper()
|
|
||||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
|
||||||
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify defaultValue
|
|
||||||
d, err := GetDuration(r, "foo", 123456)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error when obtaining default time from GetDuration(%q): %s", s, err)
|
|
||||||
}
|
|
||||||
if d != 123456 {
|
|
||||||
t.Fatalf("unexpected default value for GetDuration(%q); got %d; want %d", s, d, 123456)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify dExpected
|
|
||||||
d, err = GetDuration(r, "s", 123)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error in GetDuration(%q): %s", s, err)
|
|
||||||
}
|
|
||||||
if d != dExpected {
|
|
||||||
t.Fatalf("unexpected timestamp for GetDuration(%q); got %d; want %d", s, d, dExpected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f("1.234", 1234)
|
|
||||||
f("1.23ms", 1)
|
|
||||||
f("1.23s", 1230)
|
|
||||||
f("2s56ms", 2056)
|
|
||||||
f("2s-5ms", 1995)
|
|
||||||
f("5m3.5s", 303500)
|
|
||||||
f("2h", 7200000)
|
|
||||||
f("1d", 24*3600*1000)
|
|
||||||
f("7d5h4m3s534ms", 623043534)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetDurationError(t *testing.T) {
|
|
||||||
f := func(s string) {
|
|
||||||
t.Helper()
|
|
||||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
|
||||||
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := GetDuration(r, "s", 123); err == nil {
|
|
||||||
t.Fatalf("expecting non-nil error in GetDuration(%q)", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Negative durations aren't supported
|
|
||||||
f("-1.234")
|
|
||||||
|
|
||||||
// Invalid duration
|
|
||||||
f("foo")
|
|
||||||
|
|
||||||
// Invalid suffix
|
|
||||||
f("1md")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetTimeSuccess(t *testing.T) {
|
|
||||||
f := func(s string, timestampExpected int64) {
|
|
||||||
t.Helper()
|
|
||||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
|
||||||
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify defaultValue
|
|
||||||
ts, err := GetTime(r, "foo", 123456)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error when obtaining default time from GetTime(%q): %s", s, err)
|
|
||||||
}
|
|
||||||
if ts != 123000 {
|
|
||||||
t.Fatalf("unexpected default value for GetTime(%q); got %d; want %d", s, ts, 123000)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify timestampExpected
|
|
||||||
ts, err = GetTime(r, "s", 123)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error in GetTime(%q): %s", s, err)
|
|
||||||
}
|
|
||||||
if ts != timestampExpected {
|
|
||||||
t.Fatalf("unexpected timestamp for GetTime(%q); got %d; want %d", s, ts, timestampExpected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f("2019", 1546300800000)
|
|
||||||
f("2019-01", 1546300800000)
|
|
||||||
f("2019-02", 1548979200000)
|
|
||||||
f("2019-02-01", 1548979200000)
|
|
||||||
f("2019-02-02", 1549065600000)
|
|
||||||
f("2019-02-02T00", 1549065600000)
|
|
||||||
f("2019-02-02T01", 1549069200000)
|
|
||||||
f("2019-02-02T01:00", 1549069200000)
|
|
||||||
f("2019-02-02T01:01", 1549069260000)
|
|
||||||
f("2019-02-02T01:01:00", 1549069260000)
|
|
||||||
f("2019-02-02T01:01:01", 1549069261000)
|
|
||||||
f("2019-07-07T20:01:02Z", 1562529662000)
|
|
||||||
f("2019-07-07T20:47:40+03:00", 1562521660000)
|
|
||||||
f("-292273086-05-16T16:47:06Z", minTimeMsecs)
|
|
||||||
f("292277025-08-18T07:12:54.999999999Z", maxTimeMsecs)
|
|
||||||
f("1562529662.324", 1562529662324)
|
|
||||||
f("-9223372036.854", minTimeMsecs)
|
|
||||||
f("-9223372036.855", minTimeMsecs)
|
|
||||||
f("9223372036.855", maxTimeMsecs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetTimeError(t *testing.T) {
|
|
||||||
f := func(s string) {
|
|
||||||
t.Helper()
|
|
||||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
|
||||||
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := GetTime(r, "s", 123); err == nil {
|
|
||||||
t.Fatalf("expecting non-nil error in GetTime(%q)", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f("foo")
|
|
||||||
f("foo1")
|
|
||||||
f("1245-5")
|
|
||||||
f("2022-x7")
|
|
||||||
f("2022-02-x7")
|
|
||||||
f("2022-02-02Tx7")
|
|
||||||
f("2022-02-02T00:x7")
|
|
||||||
f("2022-02-02T00:00:x7")
|
|
||||||
f("2022-02-02T00:00:00a")
|
|
||||||
f("2019-07-07T20:01:02Zisdf")
|
|
||||||
f("2019-07-07T20:47:40+03:00123")
|
|
||||||
f("-292273086-05-16T16:47:07Z")
|
|
||||||
f("292277025-08-18T07:12:54.999999998Z")
|
|
||||||
f("123md")
|
|
||||||
f("-12.3md")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetExtraTagFilters(t *testing.T) {
|
func TestGetExtraTagFilters(t *testing.T) {
|
||||||
httpReqWithForm := func(qs string) *http.Request {
|
httpReqWithForm := func(qs string) *http.Request {
|
||||||
q, err := url.ParseQuery(qs)
|
q, err := url.ParseQuery(qs)
|
||||||
|
|
34
lib/httputils/bool.go
Normal file
34
lib/httputils/bool.go
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
package httputils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
denyPartialResponse = flag.Bool("search.denyPartialResponse", false, "Whether to deny partial responses if a part of -storageNode instances fail to perform queries; "+
|
||||||
|
"this trades availability over consistency; see also -search.maxQueryDuration")
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetBool returns boolean value from the given argKey query arg.
|
||||||
|
func GetBool(r *http.Request, argKey string) bool {
|
||||||
|
argValue := r.FormValue(argKey)
|
||||||
|
switch strings.ToLower(argValue) {
|
||||||
|
case "", "0", "f", "false", "no":
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDenyPartialResponse returns whether partial responses are denied.
|
||||||
|
func GetDenyPartialResponse(r *http.Request) bool {
|
||||||
|
if *denyPartialResponse {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if r == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return GetBool(r, "deny_partial_response")
|
||||||
|
}
|
33
lib/httputils/duration.go
Normal file
33
lib/httputils/duration.go
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
package httputils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetDuration returns duration in milliseconds from the given argKey query arg.
|
||||||
|
func GetDuration(r *http.Request, argKey string, defaultValue int64) (int64, error) {
|
||||||
|
argValue := r.FormValue(argKey)
|
||||||
|
if len(argValue) == 0 {
|
||||||
|
return defaultValue, nil
|
||||||
|
}
|
||||||
|
secs, err := strconv.ParseFloat(argValue, 64)
|
||||||
|
if err != nil {
|
||||||
|
// Try parsing string format
|
||||||
|
d, err := promutils.ParseDuration(argValue)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
|
||||||
|
}
|
||||||
|
secs = d.Seconds()
|
||||||
|
}
|
||||||
|
msecs := int64(secs * 1e3)
|
||||||
|
if msecs <= 0 || msecs > maxDurationMsecs {
|
||||||
|
return 0, fmt.Errorf("%q=%dms is out of allowed range [%d ... %d]", argKey, msecs, 0, int64(maxDurationMsecs))
|
||||||
|
}
|
||||||
|
return msecs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxDurationMsecs = 100 * 365 * 24 * 3600 * 1000
|
71
lib/httputils/duration_test.go
Normal file
71
lib/httputils/duration_test.go
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package httputils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetDurationSuccess(t *testing.T) {
|
||||||
|
f := func(s string, dExpected int64) {
|
||||||
|
t.Helper()
|
||||||
|
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||||
|
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify defaultValue
|
||||||
|
d, err := GetDuration(r, "foo", 123456)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error when obtaining default time from GetDuration(%q): %s", s, err)
|
||||||
|
}
|
||||||
|
if d != 123456 {
|
||||||
|
t.Fatalf("unexpected default value for GetDuration(%q); got %d; want %d", s, d, 123456)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify dExpected
|
||||||
|
d, err = GetDuration(r, "s", 123)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error in GetDuration(%q): %s", s, err)
|
||||||
|
}
|
||||||
|
if d != dExpected {
|
||||||
|
t.Fatalf("unexpected timestamp for GetDuration(%q); got %d; want %d", s, d, dExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f("1.234", 1234)
|
||||||
|
f("1.23ms", 1)
|
||||||
|
f("1.23s", 1230)
|
||||||
|
f("2s56ms", 2056)
|
||||||
|
f("2s-5ms", 1995)
|
||||||
|
f("5m3.5s", 303500)
|
||||||
|
f("2h", 7200000)
|
||||||
|
f("1d", 24*3600*1000)
|
||||||
|
f("7d5h4m3s534ms", 623043534)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetDurationError(t *testing.T) {
|
||||||
|
f := func(s string) {
|
||||||
|
t.Helper()
|
||||||
|
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||||
|
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := GetDuration(r, "s", 123); err == nil {
|
||||||
|
t.Fatalf("expecting non-nil error in GetDuration(%q)", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Negative durations aren't supported
|
||||||
|
f("-1.234")
|
||||||
|
|
||||||
|
// Invalid duration
|
||||||
|
f("foo")
|
||||||
|
|
||||||
|
// Invalid suffix
|
||||||
|
f("1md")
|
||||||
|
}
|
20
lib/httputils/int.go
Normal file
20
lib/httputils/int.go
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
package httputils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetInt returns integer value from the given argKey.
|
||||||
|
func GetInt(r *http.Request, argKey string) (int, error) {
|
||||||
|
argValue := r.FormValue(argKey)
|
||||||
|
if len(argValue) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
n, err := strconv.Atoi(argValue)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot parse integer %q=%q: %w", argKey, argValue, err)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
60
lib/httputils/time.go
Normal file
60
lib/httputils/time.go
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
package httputils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetTime returns time in milliseconds from the given argKey query arg.
|
||||||
|
//
|
||||||
|
// If argKey is missing in r, then defaultMs rounded to seconds is returned.
|
||||||
|
// The rounding is needed in order to align query results in Grafana
|
||||||
|
// executed at different times. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/720
|
||||||
|
func GetTime(r *http.Request, argKey string, defaultMs int64) (int64, error) {
|
||||||
|
argValue := r.FormValue(argKey)
|
||||||
|
if len(argValue) == 0 {
|
||||||
|
return roundToSeconds(defaultMs), nil
|
||||||
|
}
|
||||||
|
// Handle Prometheus'-provided minTime and maxTime.
|
||||||
|
// See https://github.com/prometheus/client_golang/issues/614
|
||||||
|
switch argValue {
|
||||||
|
case prometheusMinTimeFormatted:
|
||||||
|
return minTimeMsecs, nil
|
||||||
|
case prometheusMaxTimeFormatted:
|
||||||
|
return maxTimeMsecs, nil
|
||||||
|
}
|
||||||
|
// Parse argValue
|
||||||
|
secs, err := promutils.ParseTime(argValue)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot parse %s=%s: %w", argKey, argValue, err)
|
||||||
|
}
|
||||||
|
msecs := int64(secs * 1e3)
|
||||||
|
if msecs < minTimeMsecs {
|
||||||
|
msecs = 0
|
||||||
|
}
|
||||||
|
if msecs > maxTimeMsecs {
|
||||||
|
msecs = maxTimeMsecs
|
||||||
|
}
|
||||||
|
return msecs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// These constants were obtained from https://github.com/prometheus/prometheus/blob/91d7175eaac18b00e370965f3a8186cc40bf9f55/web/api/v1/api.go#L442
|
||||||
|
// See https://github.com/prometheus/client_golang/issues/614 for details.
|
||||||
|
prometheusMinTimeFormatted = time.Unix(math.MinInt64/1000+62135596801, 0).UTC().Format(time.RFC3339Nano)
|
||||||
|
prometheusMaxTimeFormatted = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC().Format(time.RFC3339Nano)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// These values prevent from overflow when storing msec-precision time in int64.
|
||||||
|
minTimeMsecs = 0 // use 0 instead of `int64(-1<<63) / 1e6` because the storage engine doesn't actually support negative time
|
||||||
|
maxTimeMsecs = int64(1<<63-1) / 1e6
|
||||||
|
)
|
||||||
|
|
||||||
|
func roundToSeconds(ms int64) int64 {
|
||||||
|
return ms - ms%1000
|
||||||
|
}
|
88
lib/httputils/time_test.go
Normal file
88
lib/httputils/time_test.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package httputils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetTimeSuccess(t *testing.T) {
|
||||||
|
f := func(s string, timestampExpected int64) {
|
||||||
|
t.Helper()
|
||||||
|
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||||
|
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify defaultValue
|
||||||
|
ts, err := GetTime(r, "foo", 123456)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error when obtaining default time from GetTime(%q): %s", s, err)
|
||||||
|
}
|
||||||
|
if ts != 123000 {
|
||||||
|
t.Fatalf("unexpected default value for GetTime(%q); got %d; want %d", s, ts, 123000)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify timestampExpected
|
||||||
|
ts, err = GetTime(r, "s", 123)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error in GetTime(%q): %s", s, err)
|
||||||
|
}
|
||||||
|
if ts != timestampExpected {
|
||||||
|
t.Fatalf("unexpected timestamp for GetTime(%q); got %d; want %d", s, ts, timestampExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f("2019", 1546300800000)
|
||||||
|
f("2019-01", 1546300800000)
|
||||||
|
f("2019-02", 1548979200000)
|
||||||
|
f("2019-02-01", 1548979200000)
|
||||||
|
f("2019-02-02", 1549065600000)
|
||||||
|
f("2019-02-02T00", 1549065600000)
|
||||||
|
f("2019-02-02T01", 1549069200000)
|
||||||
|
f("2019-02-02T01:00", 1549069200000)
|
||||||
|
f("2019-02-02T01:01", 1549069260000)
|
||||||
|
f("2019-02-02T01:01:00", 1549069260000)
|
||||||
|
f("2019-02-02T01:01:01", 1549069261000)
|
||||||
|
f("2019-07-07T20:01:02Z", 1562529662000)
|
||||||
|
f("2019-07-07T20:47:40+03:00", 1562521660000)
|
||||||
|
f("-292273086-05-16T16:47:06Z", minTimeMsecs)
|
||||||
|
f("292277025-08-18T07:12:54.999999999Z", maxTimeMsecs)
|
||||||
|
f("1562529662.324", 1562529662324)
|
||||||
|
f("-9223372036.854", minTimeMsecs)
|
||||||
|
f("-9223372036.855", minTimeMsecs)
|
||||||
|
f("9223372036.855", maxTimeMsecs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetTimeError(t *testing.T) {
|
||||||
|
f := func(s string) {
|
||||||
|
t.Helper()
|
||||||
|
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||||
|
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := GetTime(r, "s", 123); err == nil {
|
||||||
|
t.Fatalf("expecting non-nil error in GetTime(%q)", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f("foo")
|
||||||
|
f("foo1")
|
||||||
|
f("1245-5")
|
||||||
|
f("2022-x7")
|
||||||
|
f("2022-02-x7")
|
||||||
|
f("2022-02-02Tx7")
|
||||||
|
f("2022-02-02T00:x7")
|
||||||
|
f("2022-02-02T00:00:x7")
|
||||||
|
f("2022-02-02T00:00:00a")
|
||||||
|
f("2019-07-07T20:01:02Zisdf")
|
||||||
|
f("2019-07-07T20:47:40+03:00123")
|
||||||
|
f("-292273086-05-16T16:47:07Z")
|
||||||
|
f("292277025-08-18T07:12:54.999999998Z")
|
||||||
|
f("123md")
|
||||||
|
f("-12.3md")
|
||||||
|
}
|
Loading…
Reference in a new issue