2019-05-22 21:16:55 +00:00
package promql
import (
2019-05-24 13:12:31 +00:00
"flag"
2019-05-22 21:16:55 +00:00
"fmt"
"math"
"sort"
2020-11-11 10:38:44 +00:00
"strings"
2019-05-22 21:16:55 +00:00
"sync"
"sync/atomic"
2019-05-24 13:12:31 +00:00
"time"
2019-05-22 21:16:55 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
2020-12-25 14:44:26 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
2019-05-24 13:12:31 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
2019-05-22 21:16:55 +00:00
"github.com/VictoriaMetrics/metrics"
2020-04-28 12:28:22 +00:00
"github.com/VictoriaMetrics/metricsql"
2019-05-22 21:16:55 +00:00
)
2020-11-11 10:38:44 +00:00
var (
logSlowQueryDuration = flag . Duration ( "search.logSlowQueryDuration" , 5 * time . Second , "Log queries with execution time exceeding this value. Zero disables slow query logging" )
treatDotsAsIsInRegexps = flag . Bool ( "search.treatDotsAsIsInRegexps" , false , "Whether to treat dots as is in regexp label filters used in queries. " +
` For example, foo { bar=~"a.b.c"} will be automatically converted to foo { bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped ` +
` in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ". { n}" regexps aren't escaped. ` +
2021-02-03 18:35:45 +00:00
` This option is DEPRECATED in favor of { __graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter ` )
2020-11-11 10:38:44 +00:00
)
2019-05-24 13:12:31 +00:00
2019-07-31 00:35:13 +00:00
var slowQueries = metrics . NewCounter ( ` vm_slow_queries_total ` )
2019-07-01 14:14:49 +00:00
// Exec executes q for the given ec.
func Exec ( ec * EvalConfig , q string , isFirstPointOnly bool ) ( [ ] netstorage . Result , error ) {
2019-05-24 13:12:31 +00:00
if * logSlowQueryDuration > 0 {
startTime := time . Now ( )
defer func ( ) {
d := time . Since ( startTime )
if d >= * logSlowQueryDuration {
2020-11-18 18:37:54 +00:00
logger . Warnf ( "slow query according to -search.logSlowQueryDuration=%s: remoteAddr=%s, duration=%.3f seconds, start=%d, end=%d, step=%d, accountID=%d, projectID=%d, query=%q" ,
* logSlowQueryDuration , ec . QuotedRemoteAddr , d . Seconds ( ) , ec . Start / 1000 , ec . End / 1000 , ec . Step / 1000 , ec . AuthToken . AccountID , ec . AuthToken . ProjectID , q )
2019-07-31 00:35:13 +00:00
slowQueries . Inc ( )
2019-05-24 13:12:31 +00:00
}
} ( )
}
2020-12-25 14:44:26 +00:00
if querystats . Enabled ( ) {
startTime := time . Now ( )
ac := ec . AuthToken
2020-12-27 10:06:04 +00:00
defer querystats . RegisterQuery ( ac . AccountID , ac . ProjectID , q , ec . End - ec . Start , startTime )
2020-12-25 14:42:05 +00:00
}
2019-05-24 13:12:31 +00:00
2019-05-22 21:16:55 +00:00
ec . validate ( )
e , err := parsePromQLWithCache ( q )
if err != nil {
2020-07-08 16:09:16 +00:00
return nil , err
2019-05-22 21:16:55 +00:00
}
2020-07-08 16:09:16 +00:00
qid := activeQueriesV . Add ( ec , q )
2019-05-22 21:16:55 +00:00
rv , err := evalExpr ( ec , e )
2020-07-08 16:09:16 +00:00
activeQueriesV . Remove ( qid )
2019-05-22 21:16:55 +00:00
if err != nil {
2020-07-08 16:09:16 +00:00
return nil , err
2019-05-22 21:16:55 +00:00
}
2019-07-01 14:14:49 +00:00
if isFirstPointOnly {
// Remove all the points except the first one from every time series.
for _ , ts := range rv {
ts . Values = ts . Values [ : 1 ]
ts . Timestamps = ts . Timestamps [ : 1 ]
}
}
2019-05-22 21:16:55 +00:00
maySort := maySortResults ( e , rv )
result , err := timeseriesToResult ( rv , maySort )
if err != nil {
2020-07-08 16:09:16 +00:00
return nil , err
2019-05-22 21:16:55 +00:00
}
2020-07-08 16:09:16 +00:00
return result , err
2019-05-22 21:16:55 +00:00
}
2019-12-25 19:35:47 +00:00
func maySortResults ( e metricsql . Expr , tss [ ] * timeseries ) bool {
2019-05-22 21:16:55 +00:00
if len ( tss ) > 100 {
// There is no sense in sorting a lot of results
return false
}
2019-12-25 19:35:47 +00:00
fe , ok := e . ( * metricsql . FuncExpr )
2019-05-22 21:16:55 +00:00
if ! ok {
return true
}
switch fe . Name {
2020-02-13 15:00:47 +00:00
case "sort" , "sort_desc" ,
"sort_by_label" , "sort_by_label_desc" :
2019-05-22 21:16:55 +00:00
return false
default :
return true
}
}
func timeseriesToResult ( tss [ ] * timeseries , maySort bool ) ( [ ] netstorage . Result , error ) {
tss = removeNaNs ( tss )
result := make ( [ ] netstorage . Result , len ( tss ) )
2019-08-20 19:51:45 +00:00
m := make ( map [ string ] struct { } , len ( tss ) )
2019-05-22 21:16:55 +00:00
bb := bbPool . Get ( )
for i , ts := range tss {
bb . B = marshalMetricNameSorted ( bb . B [ : 0 ] , & ts . MetricName )
2019-08-20 19:51:45 +00:00
if _ , ok := m [ string ( bb . B ) ] ; ok {
2019-11-23 22:02:18 +00:00
return nil , fmt . Errorf ( ` duplicate output timeseries: %s ` , stringMetricName ( & ts . MetricName ) )
2019-05-22 21:16:55 +00:00
}
2019-08-20 19:51:45 +00:00
m [ string ( bb . B ) ] = struct { } { }
2019-05-22 21:16:55 +00:00
rs := & result [ i ]
rs . MetricNameMarshaled = append ( rs . MetricNameMarshaled [ : 0 ] , bb . B ... )
rs . MetricName . CopyFrom ( & ts . MetricName )
rs . Values = append ( rs . Values [ : 0 ] , ts . Values ... )
rs . Timestamps = append ( rs . Timestamps [ : 0 ] , ts . Timestamps ... )
}
bbPool . Put ( bb )
if maySort {
sort . Slice ( result , func ( i , j int ) bool {
return string ( result [ i ] . MetricNameMarshaled ) < string ( result [ j ] . MetricNameMarshaled )
} )
}
return result , nil
}
func removeNaNs ( tss [ ] * timeseries ) [ ] * timeseries {
rvs := tss [ : 0 ]
for _ , ts := range tss {
2019-07-12 16:56:23 +00:00
allNans := true
2019-05-22 21:16:55 +00:00
for _ , v := range ts . Values {
2019-07-12 16:56:23 +00:00
if ! math . IsNaN ( v ) {
allNans = false
break
2019-05-22 21:16:55 +00:00
}
}
2019-07-12 16:56:23 +00:00
if allNans {
2019-05-22 21:16:55 +00:00
// Skip timeseries with all NaNs.
continue
}
rvs = append ( rvs , ts )
}
2019-07-12 16:56:23 +00:00
for i := len ( rvs ) ; i < len ( tss ) ; i ++ {
// Zero unused time series, so GC could reclaim them.
tss [ i ] = nil
}
2019-05-22 21:16:55 +00:00
return rvs
}
2020-08-06 20:18:03 +00:00
func adjustCmpOps ( e metricsql . Expr ) metricsql . Expr {
metricsql . VisitAll ( e , func ( expr metricsql . Expr ) {
be , ok := expr . ( * metricsql . BinaryOpExpr )
if ! ok {
return
}
if ! metricsql . IsBinaryOpCmp ( be . Op ) {
return
}
2020-12-02 10:08:47 +00:00
if isNumberExpr ( be . Right ) || ! isScalarExpr ( be . Left ) {
2020-08-06 20:18:03 +00:00
return
}
// Convert 'num cmpOp query' expression to `query reverseCmpOp num` expression
2020-12-02 10:08:47 +00:00
// like Prometheus does. For instance, `0.5 < foo` must be converted to `foo > 0.5`
2020-08-06 20:18:03 +00:00
// in order to return valid values for `foo` that are bigger than 0.5.
be . Right , be . Left = be . Left , be . Right
be . Op = getReverseCmpOp ( be . Op )
} )
return e
}
2020-12-02 10:08:47 +00:00
func isNumberExpr ( e metricsql . Expr ) bool {
_ , ok := e . ( * metricsql . NumberExpr )
return ok
}
func isScalarExpr ( e metricsql . Expr ) bool {
if isNumberExpr ( e ) {
return true
}
if fe , ok := e . ( * metricsql . FuncExpr ) ; ok {
// time() returns scalar in PromQL - see https://prometheus.io/docs/prometheus/latest/querying/functions/#time
return strings . ToLower ( fe . Name ) == "time"
}
return false
}
2020-08-06 20:18:03 +00:00
func getReverseCmpOp ( op string ) string {
switch op {
case ">" :
return "<"
case "<" :
return ">"
case ">=" :
return "<="
case "<=" :
return ">="
default :
// there is no need in changing `==` and `!=`.
return op
}
}
2019-12-25 19:35:47 +00:00
func parsePromQLWithCache ( q string ) ( metricsql . Expr , error ) {
2019-05-22 21:16:55 +00:00
pcv := parseCacheV . Get ( q )
if pcv == nil {
2019-12-25 19:35:47 +00:00
e , err := metricsql . Parse ( q )
2020-08-06 20:18:03 +00:00
if err == nil {
2020-10-07 18:15:06 +00:00
e = metricsql . Optimize ( e )
2020-08-06 20:18:03 +00:00
e = adjustCmpOps ( e )
2020-11-11 10:38:44 +00:00
if * treatDotsAsIsInRegexps {
e = escapeDotsInRegexpLabelFilters ( e )
}
2020-08-06 20:18:03 +00:00
}
2019-05-22 21:16:55 +00:00
pcv = & parseCacheValue {
e : e ,
err : err ,
}
parseCacheV . Put ( q , pcv )
}
if pcv . err != nil {
return nil , pcv . err
}
return pcv . e , nil
}
2020-11-11 10:38:44 +00:00
func escapeDotsInRegexpLabelFilters ( e metricsql . Expr ) metricsql . Expr {
metricsql . VisitAll ( e , func ( expr metricsql . Expr ) {
me , ok := expr . ( * metricsql . MetricExpr )
if ! ok {
return
}
for i := range me . LabelFilters {
f := & me . LabelFilters [ i ]
if f . IsRegexp {
f . Value = escapeDots ( f . Value )
}
}
} )
return e
}
func escapeDots ( s string ) string {
dotsCount := strings . Count ( s , "." )
if dotsCount <= 0 {
return s
}
result := make ( [ ] byte , 0 , len ( s ) + 2 * dotsCount )
for i := 0 ; i < len ( s ) ; i ++ {
if s [ i ] == '.' && ( i == 0 || s [ i - 1 ] != '\\' ) && ( i + 1 == len ( s ) || i + 1 < len ( s ) && s [ i + 1 ] != '*' && s [ i + 1 ] != '+' && s [ i + 1 ] != '{' ) {
// Escape a dot if the following conditions are met:
// - if it isn't escaped already, i.e. if there is no `\` char before the dot.
// - if there is no regexp modifiers such as '+', '*' or '{' after the dot.
result = append ( result , '\\' , '.' )
} else {
result = append ( result , s [ i ] )
}
}
return string ( result )
}
2019-05-22 21:16:55 +00:00
var parseCacheV = func ( ) * parseCache {
pc := & parseCache {
m : make ( map [ string ] * parseCacheValue ) ,
}
metrics . NewGauge ( ` vm_cache_requests_total { type="promql/parse"} ` , func ( ) float64 {
return float64 ( pc . Requests ( ) )
} )
metrics . NewGauge ( ` vm_cache_misses_total { type="promql/parse"} ` , func ( ) float64 {
return float64 ( pc . Misses ( ) )
} )
metrics . NewGauge ( ` vm_cache_entries { type="promql/parse"} ` , func ( ) float64 {
return float64 ( pc . Len ( ) )
} )
return pc
} ( )
const parseCacheMaxLen = 10e3
type parseCacheValue struct {
2019-12-25 19:35:47 +00:00
e metricsql . Expr
2019-05-22 21:16:55 +00:00
err error
}
type parseCache struct {
2019-10-17 15:22:56 +00:00
// Move atomic counters to the top of struct for 8-byte alignment on 32-bit arch.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
2019-05-22 21:16:55 +00:00
requests uint64
misses uint64
2019-10-17 15:22:56 +00:00
m map [ string ] * parseCacheValue
mu sync . RWMutex
2019-05-22 21:16:55 +00:00
}
func ( pc * parseCache ) Requests ( ) uint64 {
return atomic . LoadUint64 ( & pc . requests )
}
func ( pc * parseCache ) Misses ( ) uint64 {
return atomic . LoadUint64 ( & pc . misses )
}
func ( pc * parseCache ) Len ( ) uint64 {
pc . mu . RLock ( )
n := len ( pc . m )
pc . mu . RUnlock ( )
return uint64 ( n )
}
func ( pc * parseCache ) Get ( q string ) * parseCacheValue {
atomic . AddUint64 ( & pc . requests , 1 )
pc . mu . RLock ( )
pcv := pc . m [ q ]
pc . mu . RUnlock ( )
if pcv == nil {
atomic . AddUint64 ( & pc . misses , 1 )
}
return pcv
}
func ( pc * parseCache ) Put ( q string , pcv * parseCacheValue ) {
pc . mu . Lock ( )
overflow := len ( pc . m ) - parseCacheMaxLen
if overflow > 0 {
// Remove 10% of items from the cache.
overflow = int ( float64 ( len ( pc . m ) ) * 0.1 )
for k := range pc . m {
delete ( pc . m , k )
overflow --
if overflow <= 0 {
break
}
}
}
pc . m [ q ] = pcv
pc . mu . Unlock ( )
}